summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorShihPo Hung <shihpo.hung@sifive.com>2021-04-21 01:48:02 -0700
committerShihPo Hung <shihpo.hung@sifive.com>2021-04-21 01:48:02 -0700
commit11072a0bdbc079b2bc0c2337413f62665dc31f49 (patch)
treeef6fbefd4eea8d6fe99e86fb54fbb3c025e56086
parent57ca65e21e9a48040e929a506a259ee2e4b42088 (diff)
downloadllvm-11072a0bdbc079b2bc0c2337413f62665dc31f49.tar.gz
[RISCV][Clang] Add RVV AMO builtins
Add vamo[swap/add/xor/and/or/min/max/minu/maxu] builtins. Reviewed By: khchen Differential Revision: https://reviews.llvm.org/D100448
-rw-r--r--clang/include/clang/Basic/riscv_vector.td44
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoadd.c2247
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoand.c2247
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamomax.c2247
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamomin.c2247
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoor.c2247
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoswap.c3683
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoxor.c2247
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vamoadd.c2247
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vamoand.c2247
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vamomax.c2247
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vamomin.c2247
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vamoor.c2247
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vamoswap.c3369
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vamoxor.c2247
-rw-r--r--clang/utils/TableGen/RISCVVEmitter.cpp19
16 files changed, 34074 insertions, 5 deletions
diff --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td
index 2d94f111d8e3..6132dc956d7b 100644
--- a/clang/include/clang/Basic/riscv_vector.td
+++ b/clang/include/clang/Basic/riscv_vector.td
@@ -206,6 +206,9 @@ class RVVBuiltin<string suffix, string prototype, string type_range,
// an automatic definition in header is emitted.
string HeaderCode = "";
+ // Sub extension of vector spec. Currently only support Zvamo or Zvlsseg.
+ string RequiredExtension = "";
+
}
//===----------------------------------------------------------------------===//
@@ -754,6 +757,34 @@ multiclass RVVIndexedStore<string op> {
}
}
+multiclass RVVAMOBuiltinSet<bit has_signed = false, bit has_unsigned = false,
+ bit has_fp = false> {
+ defvar type_list = !if(has_fp, ["i","l","f","d"], ["i","l"]);
+ foreach type = type_list in
+ foreach eew_list = EEWList in {
+ defvar eew = eew_list[0];
+ defvar eew_index = eew_list[1];
+ let Name = NAME # "ei" # eew # "_" # "v",
+ IRName = NAME,
+ IRNameMask = NAME # "_mask",
+ HasMaskedOffOperand = false,
+ ManualCodegen = [{
+ // base, bindex, value, vl
+ IntrinsicTypes = {ResultType, Ops[1]->getType(), Ops[3]->getType()};
+ Ops[0] = Builder.CreateBitCast(Ops[0], ResultType->getPointerTo());
+ }],
+ ManualCodegenMask = [{
+ IntrinsicTypes = {ResultType, Ops[1]->getType(), Ops[4]->getType()};
+ Ops[0] = Builder.CreateBitCast(Ops[0], ResultType->getPointerTo());
+ }] in {
+ if has_signed then
+ def : RVVBuiltin<"v", "vPe" # eew_index # "Uvv", type>;
+ if !and(!not(IsFloat<type>.val), has_unsigned) then
+ def : RVVBuiltin<"Uv", "UvPUe" # eew_index # "UvUv", type>;
+ }
+ }
+}
+
// 6. Configuration-Setting Instructions
// 6.1. vsetvli/vsetvl instructions
let HasVL = false,
@@ -864,6 +895,19 @@ defm vle16ff: RVVVLEFFBuiltin<["s"]>;
defm vle32ff: RVVVLEFFBuiltin<["i", "f"]>;
defm vle64ff: RVVVLEFFBuiltin<["l", "d"]>;
+// 8. Vector AMO Operations
+let RequiredExtension = "Zvamo" in {
+defm vamoswap : RVVAMOBuiltinSet< /* hasSigned */ true, /* hasUnsigned */ true, /* hasFP */ true>;
+defm vamoadd : RVVAMOBuiltinSet< /* hasSigned */ true, /* hasUnsigned */ true>;
+defm vamoxor : RVVAMOBuiltinSet< /* hasSigned */ true, /* hasUnsigned */ true>;
+defm vamoand : RVVAMOBuiltinSet< /* hasSigned */ true, /* hasUnsigned */ true>;
+defm vamoor : RVVAMOBuiltinSet< /* hasSigned */ true, /* hasUnsigned */ true>;
+defm vamomin : RVVAMOBuiltinSet< /* hasSigned */ true>;
+defm vamomax : RVVAMOBuiltinSet< /* hasSigned */ true>;
+defm vamominu : RVVAMOBuiltinSet< /* hasSigned */ false, /* hasUnsigned */ true>;
+defm vamomaxu : RVVAMOBuiltinSet< /* hasSigned */ false, /* hasUnsigned */ true>;
+}
+
// 12. Vector Integer Arithmetic Instructions
// 12.1. Vector Single-Width Integer Add and Subtract
defm vadd : RVVIntBinBuiltinSet;
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoadd.c
new file mode 100644
index 000000000000..05539a500443
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoadd.c
@@ -0,0 +1,2247 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +m -target-feature +experimental-v -target-feature +experimental-zvamo -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -target-feature +experimental-zvamo -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vamoaddei8_v_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.nxv1i32.nxv1i8.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei8_v_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamoaddei8_v_i32mf2 (int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) {
+ return vamoaddei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei8_v_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.nxv2i32.nxv2i8.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei8_v_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamoaddei8_v_i32m1 (int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) {
+ return vamoaddei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei8_v_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.nxv4i32.nxv4i8.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei8_v_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamoaddei8_v_i32m2 (int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) {
+ return vamoaddei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei8_v_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.nxv8i32.nxv8i8.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei8_v_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamoaddei8_v_i32m4 (int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) {
+ return vamoaddei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei8_v_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoadd.nxv16i32.nxv16i8.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei8_v_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoadd.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t test_vamoaddei8_v_i32m8 (int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) {
+ return vamoaddei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei16_v_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.nxv1i32.nxv1i16.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei16_v_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamoaddei16_v_i32mf2 (int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) {
+ return vamoaddei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei16_v_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.nxv2i32.nxv2i16.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei16_v_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamoaddei16_v_i32m1 (int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) {
+ return vamoaddei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei16_v_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.nxv4i32.nxv4i16.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei16_v_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamoaddei16_v_i32m2 (int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) {
+ return vamoaddei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei16_v_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.nxv8i32.nxv8i16.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei16_v_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamoaddei16_v_i32m4 (int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) {
+ return vamoaddei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei16_v_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoadd.nxv16i32.nxv16i16.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei16_v_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoadd.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t test_vamoaddei16_v_i32m8 (int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) {
+ return vamoaddei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei32_v_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.nxv1i32.nxv1i32.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei32_v_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamoaddei32_v_i32mf2 (int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) {
+ return vamoaddei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei32_v_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.nxv2i32.nxv2i32.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei32_v_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamoaddei32_v_i32m1 (int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) {
+ return vamoaddei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei32_v_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.nxv4i32.nxv4i32.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei32_v_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamoaddei32_v_i32m2 (int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) {
+ return vamoaddei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei32_v_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.nxv8i32.nxv8i32.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei32_v_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamoaddei32_v_i32m4 (int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) {
+ return vamoaddei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei32_v_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoadd.nxv16i32.nxv16i32.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei32_v_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoadd.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t test_vamoaddei32_v_i32m8 (int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) {
+ return vamoaddei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei64_v_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.nxv1i32.nxv1i64.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei64_v_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamoaddei64_v_i32mf2 (int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) {
+ return vamoaddei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei64_v_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.nxv2i32.nxv2i64.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei64_v_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamoaddei64_v_i32m1 (int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) {
+ return vamoaddei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei64_v_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.nxv4i32.nxv4i64.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei64_v_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamoaddei64_v_i32m2 (int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) {
+ return vamoaddei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei64_v_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.nxv8i32.nxv8i64.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei64_v_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamoaddei64_v_i32m4 (int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) {
+ return vamoaddei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei8_v_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.nxv1i64.nxv1i8.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei8_v_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamoaddei8_v_i64m1 (int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) {
+ return vamoaddei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei8_v_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.nxv2i64.nxv2i8.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei8_v_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamoaddei8_v_i64m2 (int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) {
+ return vamoaddei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei8_v_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.nxv4i64.nxv4i8.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei8_v_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamoaddei8_v_i64m4 (int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) {
+ return vamoaddei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei8_v_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.nxv8i64.nxv8i8.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei8_v_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamoaddei8_v_i64m8 (int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) {
+ return vamoaddei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei16_v_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.nxv1i64.nxv1i16.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei16_v_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamoaddei16_v_i64m1 (int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) {
+ return vamoaddei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei16_v_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.nxv2i64.nxv2i16.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei16_v_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamoaddei16_v_i64m2 (int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) {
+ return vamoaddei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei16_v_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.nxv4i64.nxv4i16.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei16_v_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamoaddei16_v_i64m4 (int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) {
+ return vamoaddei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei16_v_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.nxv8i64.nxv8i16.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei16_v_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamoaddei16_v_i64m8 (int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) {
+ return vamoaddei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei32_v_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.nxv1i64.nxv1i32.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei32_v_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamoaddei32_v_i64m1 (int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) {
+ return vamoaddei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei32_v_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.nxv2i64.nxv2i32.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei32_v_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamoaddei32_v_i64m2 (int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) {
+ return vamoaddei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei32_v_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.nxv4i64.nxv4i32.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei32_v_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamoaddei32_v_i64m4 (int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) {
+ return vamoaddei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei32_v_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.nxv8i64.nxv8i32.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei32_v_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamoaddei32_v_i64m8 (int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) {
+ return vamoaddei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei64_v_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.nxv1i64.nxv1i64.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei64_v_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamoaddei64_v_i64m1 (int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) {
+ return vamoaddei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei64_v_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.nxv2i64.nxv2i64.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei64_v_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamoaddei64_v_i64m2 (int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) {
+ return vamoaddei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei64_v_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.nxv4i64.nxv4i64.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei64_v_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamoaddei64_v_i64m4 (int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) {
+ return vamoaddei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei64_v_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.nxv8i64.nxv8i64.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei64_v_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamoaddei64_v_i64m8 (int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) {
+ return vamoaddei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei8_v_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.nxv1i32.nxv1i8.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei8_v_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamoaddei8_v_u32mf2 (uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) {
+ return vamoaddei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei8_v_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.nxv2i32.nxv2i8.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei8_v_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamoaddei8_v_u32m1 (uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) {
+ return vamoaddei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei8_v_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.nxv4i32.nxv4i8.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei8_v_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamoaddei8_v_u32m2 (uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) {
+ return vamoaddei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei8_v_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.nxv8i32.nxv8i8.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei8_v_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamoaddei8_v_u32m4 (uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) {
+ return vamoaddei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei8_v_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoadd.nxv16i32.nxv16i8.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei8_v_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoadd.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t test_vamoaddei8_v_u32m8 (uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) {
+ return vamoaddei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei16_v_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.nxv1i32.nxv1i16.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei16_v_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamoaddei16_v_u32mf2 (uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) {
+ return vamoaddei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei16_v_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.nxv2i32.nxv2i16.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei16_v_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamoaddei16_v_u32m1 (uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) {
+ return vamoaddei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei16_v_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.nxv4i32.nxv4i16.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei16_v_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamoaddei16_v_u32m2 (uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) {
+ return vamoaddei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei16_v_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.nxv8i32.nxv8i16.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei16_v_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamoaddei16_v_u32m4 (uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) {
+ return vamoaddei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei16_v_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoadd.nxv16i32.nxv16i16.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei16_v_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoadd.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t test_vamoaddei16_v_u32m8 (uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) {
+ return vamoaddei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei32_v_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.nxv1i32.nxv1i32.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei32_v_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamoaddei32_v_u32mf2 (uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) {
+ return vamoaddei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei32_v_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.nxv2i32.nxv2i32.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei32_v_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamoaddei32_v_u32m1 (uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) {
+ return vamoaddei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei32_v_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.nxv4i32.nxv4i32.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei32_v_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamoaddei32_v_u32m2 (uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) {
+ return vamoaddei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei32_v_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.nxv8i32.nxv8i32.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei32_v_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamoaddei32_v_u32m4 (uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) {
+ return vamoaddei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei32_v_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoadd.nxv16i32.nxv16i32.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei32_v_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoadd.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t test_vamoaddei32_v_u32m8 (uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) {
+ return vamoaddei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei64_v_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.nxv1i32.nxv1i64.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei64_v_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamoaddei64_v_u32mf2 (uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) {
+ return vamoaddei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei64_v_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.nxv2i32.nxv2i64.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei64_v_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamoaddei64_v_u32m1 (uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) {
+ return vamoaddei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei64_v_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.nxv4i32.nxv4i64.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei64_v_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamoaddei64_v_u32m2 (uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) {
+ return vamoaddei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei64_v_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.nxv8i32.nxv8i64.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei64_v_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamoaddei64_v_u32m4 (uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) {
+ return vamoaddei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei8_v_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.nxv1i64.nxv1i8.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei8_v_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamoaddei8_v_u64m1 (uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) {
+ return vamoaddei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei8_v_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.nxv2i64.nxv2i8.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei8_v_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamoaddei8_v_u64m2 (uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) {
+ return vamoaddei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei8_v_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.nxv4i64.nxv4i8.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei8_v_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamoaddei8_v_u64m4 (uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) {
+ return vamoaddei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei8_v_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.nxv8i64.nxv8i8.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei8_v_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamoaddei8_v_u64m8 (uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) {
+ return vamoaddei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei16_v_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.nxv1i64.nxv1i16.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei16_v_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamoaddei16_v_u64m1 (uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) {
+ return vamoaddei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei16_v_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.nxv2i64.nxv2i16.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei16_v_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamoaddei16_v_u64m2 (uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) {
+ return vamoaddei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei16_v_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.nxv4i64.nxv4i16.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei16_v_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamoaddei16_v_u64m4 (uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) {
+ return vamoaddei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei16_v_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.nxv8i64.nxv8i16.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei16_v_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamoaddei16_v_u64m8 (uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) {
+ return vamoaddei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei32_v_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.nxv1i64.nxv1i32.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei32_v_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamoaddei32_v_u64m1 (uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) {
+ return vamoaddei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei32_v_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.nxv2i64.nxv2i32.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei32_v_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamoaddei32_v_u64m2 (uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) {
+ return vamoaddei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei32_v_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.nxv4i64.nxv4i32.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei32_v_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamoaddei32_v_u64m4 (uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) {
+ return vamoaddei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei32_v_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.nxv8i64.nxv8i32.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei32_v_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamoaddei32_v_u64m8 (uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) {
+ return vamoaddei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei64_v_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.nxv1i64.nxv1i64.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei64_v_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamoaddei64_v_u64m1 (uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) {
+ return vamoaddei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei64_v_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.nxv2i64.nxv2i64.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei64_v_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamoaddei64_v_u64m2 (uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) {
+ return vamoaddei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei64_v_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.nxv4i64.nxv4i64.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei64_v_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamoaddei64_v_u64m4 (uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) {
+ return vamoaddei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei64_v_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.nxv8i64.nxv8i64.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei64_v_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamoaddei64_v_u64m8 (uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) {
+ return vamoaddei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei8_v_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i8.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei8_v_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamoaddei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) {
+ return vamoaddei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei8_v_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i8.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei8_v_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamoaddei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) {
+ return vamoaddei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei8_v_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i8.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei8_v_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamoaddei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) {
+ return vamoaddei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei8_v_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i8.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei8_v_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamoaddei8_v_i32m4_m (vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) {
+ return vamoaddei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei8_v_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i8.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei8_v_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t test_vamoaddei8_v_i32m8_m (vbool4_t mask, int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) {
+ return vamoaddei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei16_v_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i16.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei16_v_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamoaddei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) {
+ return vamoaddei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei16_v_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i16.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei16_v_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamoaddei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) {
+ return vamoaddei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei16_v_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i16.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei16_v_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamoaddei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) {
+ return vamoaddei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei16_v_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i16.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei16_v_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamoaddei16_v_i32m4_m (vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) {
+ return vamoaddei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei16_v_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i16.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei16_v_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t test_vamoaddei16_v_i32m8_m (vbool4_t mask, int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) {
+ return vamoaddei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei32_v_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei32_v_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamoaddei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) {
+ return vamoaddei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei32_v_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei32_v_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamoaddei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) {
+ return vamoaddei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei32_v_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei32_v_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamoaddei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) {
+ return vamoaddei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei32_v_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei32_v_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamoaddei32_v_i32m4_m (vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) {
+ return vamoaddei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei32_v_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei32_v_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t test_vamoaddei32_v_i32m8_m (vbool4_t mask, int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) {
+ return vamoaddei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei64_v_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i64.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei64_v_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamoaddei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) {
+ return vamoaddei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei64_v_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i64.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei64_v_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamoaddei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) {
+ return vamoaddei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei64_v_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i64.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei64_v_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamoaddei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) {
+ return vamoaddei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei64_v_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i64.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei64_v_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamoaddei64_v_i32m4_m (vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) {
+ return vamoaddei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei8_v_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i8.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei8_v_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamoaddei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) {
+ return vamoaddei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei8_v_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i8.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei8_v_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamoaddei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) {
+ return vamoaddei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei8_v_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i8.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei8_v_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamoaddei8_v_i64m4_m (vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) {
+ return vamoaddei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei8_v_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i8.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei8_v_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamoaddei8_v_i64m8_m (vbool8_t mask, int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) {
+ return vamoaddei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei16_v_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i16.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei16_v_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamoaddei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) {
+ return vamoaddei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei16_v_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i16.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei16_v_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamoaddei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) {
+ return vamoaddei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei16_v_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i16.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei16_v_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamoaddei16_v_i64m4_m (vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) {
+ return vamoaddei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei16_v_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i16.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei16_v_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamoaddei16_v_i64m8_m (vbool8_t mask, int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) {
+ return vamoaddei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei32_v_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i32.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei32_v_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamoaddei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) {
+ return vamoaddei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei32_v_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i32.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei32_v_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamoaddei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) {
+ return vamoaddei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei32_v_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i32.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei32_v_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamoaddei32_v_i64m4_m (vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) {
+ return vamoaddei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei32_v_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i32.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei32_v_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamoaddei32_v_i64m8_m (vbool8_t mask, int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) {
+ return vamoaddei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei64_v_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei64_v_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamoaddei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) {
+ return vamoaddei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei64_v_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei64_v_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamoaddei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) {
+ return vamoaddei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei64_v_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei64_v_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamoaddei64_v_i64m4_m (vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) {
+ return vamoaddei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei64_v_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei64_v_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamoaddei64_v_i64m8_m (vbool8_t mask, int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) {
+ return vamoaddei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei8_v_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i8.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei8_v_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamoaddei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) {
+ return vamoaddei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei8_v_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i8.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei8_v_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamoaddei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) {
+ return vamoaddei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei8_v_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i8.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei8_v_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamoaddei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) {
+ return vamoaddei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei8_v_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i8.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei8_v_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamoaddei8_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) {
+ return vamoaddei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei8_v_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i8.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei8_v_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t test_vamoaddei8_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) {
+ return vamoaddei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei16_v_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i16.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei16_v_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamoaddei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) {
+ return vamoaddei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei16_v_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i16.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei16_v_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamoaddei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) {
+ return vamoaddei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei16_v_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i16.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei16_v_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamoaddei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) {
+ return vamoaddei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei16_v_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i16.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei16_v_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamoaddei16_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) {
+ return vamoaddei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei16_v_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i16.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei16_v_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t test_vamoaddei16_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) {
+ return vamoaddei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei32_v_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei32_v_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamoaddei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) {
+ return vamoaddei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei32_v_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei32_v_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamoaddei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) {
+ return vamoaddei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei32_v_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei32_v_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamoaddei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) {
+ return vamoaddei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei32_v_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei32_v_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamoaddei32_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) {
+ return vamoaddei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei32_v_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei32_v_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t test_vamoaddei32_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) {
+ return vamoaddei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei64_v_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i64.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei64_v_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamoaddei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) {
+ return vamoaddei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei64_v_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i64.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei64_v_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamoaddei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) {
+ return vamoaddei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei64_v_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i64.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei64_v_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamoaddei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) {
+ return vamoaddei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei64_v_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i64.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei64_v_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamoaddei64_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) {
+ return vamoaddei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei8_v_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i8.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei8_v_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamoaddei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) {
+ return vamoaddei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei8_v_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i8.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei8_v_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamoaddei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) {
+ return vamoaddei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei8_v_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i8.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei8_v_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamoaddei8_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) {
+ return vamoaddei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei8_v_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i8.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei8_v_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamoaddei8_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) {
+ return vamoaddei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei16_v_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i16.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei16_v_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamoaddei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) {
+ return vamoaddei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei16_v_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i16.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei16_v_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamoaddei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) {
+ return vamoaddei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei16_v_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i16.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei16_v_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamoaddei16_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) {
+ return vamoaddei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei16_v_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i16.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei16_v_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamoaddei16_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) {
+ return vamoaddei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei32_v_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i32.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei32_v_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamoaddei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) {
+ return vamoaddei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei32_v_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i32.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei32_v_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamoaddei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) {
+ return vamoaddei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei32_v_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i32.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei32_v_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamoaddei32_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) {
+ return vamoaddei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei32_v_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i32.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei32_v_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamoaddei32_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) {
+ return vamoaddei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei64_v_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei64_v_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamoaddei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) {
+ return vamoaddei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei64_v_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei64_v_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamoaddei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) {
+ return vamoaddei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei64_v_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei64_v_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamoaddei64_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) {
+ return vamoaddei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei64_v_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei64_v_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamoaddei64_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) {
+ return vamoaddei64(mask, base, bindex, value, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoand.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoand.c
new file mode 100644
index 000000000000..d6266b43a69e
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoand.c
@@ -0,0 +1,2247 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +m -target-feature +experimental-v -target-feature +experimental-zvamo -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -target-feature +experimental-zvamo -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vamoandei8_v_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoand.nxv1i32.nxv1i8.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei8_v_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoand.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamoandei8_v_i32mf2 (int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) {
+ return vamoandei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei8_v_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoand.nxv2i32.nxv2i8.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei8_v_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoand.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamoandei8_v_i32m1 (int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) {
+ return vamoandei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei8_v_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoand.nxv4i32.nxv4i8.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei8_v_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoand.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamoandei8_v_i32m2 (int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) {
+ return vamoandei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei8_v_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoand.nxv8i32.nxv8i8.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei8_v_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoand.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamoandei8_v_i32m4 (int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) {
+ return vamoandei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei8_v_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoand.nxv16i32.nxv16i8.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei8_v_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoand.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t test_vamoandei8_v_i32m8 (int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) {
+ return vamoandei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei16_v_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoand.nxv1i32.nxv1i16.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei16_v_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoand.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamoandei16_v_i32mf2 (int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) {
+ return vamoandei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei16_v_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoand.nxv2i32.nxv2i16.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei16_v_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoand.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamoandei16_v_i32m1 (int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) {
+ return vamoandei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei16_v_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoand.nxv4i32.nxv4i16.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei16_v_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoand.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamoandei16_v_i32m2 (int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) {
+ return vamoandei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei16_v_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoand.nxv8i32.nxv8i16.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei16_v_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoand.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamoandei16_v_i32m4 (int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) {
+ return vamoandei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei16_v_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoand.nxv16i32.nxv16i16.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei16_v_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoand.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t test_vamoandei16_v_i32m8 (int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) {
+ return vamoandei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei32_v_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoand.nxv1i32.nxv1i32.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei32_v_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoand.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamoandei32_v_i32mf2 (int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) {
+ return vamoandei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei32_v_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoand.nxv2i32.nxv2i32.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei32_v_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoand.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamoandei32_v_i32m1 (int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) {
+ return vamoandei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei32_v_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoand.nxv4i32.nxv4i32.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei32_v_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoand.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamoandei32_v_i32m2 (int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) {
+ return vamoandei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei32_v_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoand.nxv8i32.nxv8i32.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei32_v_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoand.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamoandei32_v_i32m4 (int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) {
+ return vamoandei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei32_v_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoand.nxv16i32.nxv16i32.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei32_v_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoand.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t test_vamoandei32_v_i32m8 (int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) {
+ return vamoandei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei64_v_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoand.nxv1i32.nxv1i64.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei64_v_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoand.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamoandei64_v_i32mf2 (int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) {
+ return vamoandei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei64_v_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoand.nxv2i32.nxv2i64.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei64_v_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoand.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamoandei64_v_i32m1 (int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) {
+ return vamoandei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei64_v_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoand.nxv4i32.nxv4i64.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei64_v_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoand.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamoandei64_v_i32m2 (int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) {
+ return vamoandei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei64_v_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoand.nxv8i32.nxv8i64.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei64_v_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoand.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamoandei64_v_i32m4 (int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) {
+ return vamoandei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei8_v_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoand.nxv1i64.nxv1i8.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei8_v_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoand.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamoandei8_v_i64m1 (int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) {
+ return vamoandei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei8_v_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoand.nxv2i64.nxv2i8.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei8_v_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoand.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamoandei8_v_i64m2 (int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) {
+ return vamoandei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei8_v_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoand.nxv4i64.nxv4i8.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei8_v_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoand.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamoandei8_v_i64m4 (int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) {
+ return vamoandei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei8_v_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoand.nxv8i64.nxv8i8.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei8_v_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoand.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamoandei8_v_i64m8 (int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) {
+ return vamoandei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei16_v_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoand.nxv1i64.nxv1i16.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei16_v_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoand.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamoandei16_v_i64m1 (int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) {
+ return vamoandei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei16_v_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoand.nxv2i64.nxv2i16.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei16_v_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoand.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamoandei16_v_i64m2 (int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) {
+ return vamoandei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei16_v_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoand.nxv4i64.nxv4i16.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei16_v_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoand.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamoandei16_v_i64m4 (int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) {
+ return vamoandei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei16_v_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoand.nxv8i64.nxv8i16.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei16_v_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoand.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamoandei16_v_i64m8 (int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) {
+ return vamoandei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei32_v_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoand.nxv1i64.nxv1i32.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei32_v_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoand.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamoandei32_v_i64m1 (int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) {
+ return vamoandei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei32_v_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoand.nxv2i64.nxv2i32.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei32_v_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoand.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamoandei32_v_i64m2 (int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) {
+ return vamoandei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei32_v_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoand.nxv4i64.nxv4i32.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei32_v_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoand.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamoandei32_v_i64m4 (int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) {
+ return vamoandei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei32_v_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoand.nxv8i64.nxv8i32.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei32_v_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoand.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamoandei32_v_i64m8 (int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) {
+ return vamoandei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei64_v_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoand.nxv1i64.nxv1i64.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei64_v_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoand.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamoandei64_v_i64m1 (int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) {
+ return vamoandei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei64_v_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoand.nxv2i64.nxv2i64.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei64_v_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoand.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamoandei64_v_i64m2 (int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) {
+ return vamoandei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei64_v_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoand.nxv4i64.nxv4i64.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei64_v_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoand.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamoandei64_v_i64m4 (int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) {
+ return vamoandei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei64_v_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoand.nxv8i64.nxv8i64.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei64_v_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoand.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamoandei64_v_i64m8 (int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) {
+ return vamoandei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei8_v_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoand.nxv1i32.nxv1i8.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei8_v_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoand.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamoandei8_v_u32mf2 (uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) {
+ return vamoandei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei8_v_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoand.nxv2i32.nxv2i8.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei8_v_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoand.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamoandei8_v_u32m1 (uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) {
+ return vamoandei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei8_v_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoand.nxv4i32.nxv4i8.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei8_v_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoand.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamoandei8_v_u32m2 (uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) {
+ return vamoandei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei8_v_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoand.nxv8i32.nxv8i8.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei8_v_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoand.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamoandei8_v_u32m4 (uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) {
+ return vamoandei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei8_v_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoand.nxv16i32.nxv16i8.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei8_v_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoand.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t test_vamoandei8_v_u32m8 (uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) {
+ return vamoandei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei16_v_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoand.nxv1i32.nxv1i16.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei16_v_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoand.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamoandei16_v_u32mf2 (uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) {
+ return vamoandei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei16_v_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoand.nxv2i32.nxv2i16.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei16_v_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoand.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamoandei16_v_u32m1 (uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) {
+ return vamoandei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei16_v_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoand.nxv4i32.nxv4i16.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei16_v_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoand.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamoandei16_v_u32m2 (uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) {
+ return vamoandei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei16_v_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoand.nxv8i32.nxv8i16.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei16_v_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoand.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamoandei16_v_u32m4 (uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) {
+ return vamoandei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei16_v_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoand.nxv16i32.nxv16i16.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei16_v_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoand.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t test_vamoandei16_v_u32m8 (uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) {
+ return vamoandei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei32_v_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoand.nxv1i32.nxv1i32.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei32_v_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoand.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamoandei32_v_u32mf2 (uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) {
+ return vamoandei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei32_v_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoand.nxv2i32.nxv2i32.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei32_v_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoand.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamoandei32_v_u32m1 (uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) {
+ return vamoandei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei32_v_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoand.nxv4i32.nxv4i32.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei32_v_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoand.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamoandei32_v_u32m2 (uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) {
+ return vamoandei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei32_v_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoand.nxv8i32.nxv8i32.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei32_v_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoand.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamoandei32_v_u32m4 (uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) {
+ return vamoandei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei32_v_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoand.nxv16i32.nxv16i32.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei32_v_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoand.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t test_vamoandei32_v_u32m8 (uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) {
+ return vamoandei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei64_v_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoand.nxv1i32.nxv1i64.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei64_v_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoand.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamoandei64_v_u32mf2 (uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) {
+ return vamoandei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei64_v_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoand.nxv2i32.nxv2i64.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei64_v_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoand.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamoandei64_v_u32m1 (uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) {
+ return vamoandei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei64_v_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoand.nxv4i32.nxv4i64.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei64_v_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoand.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamoandei64_v_u32m2 (uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) {
+ return vamoandei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei64_v_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoand.nxv8i32.nxv8i64.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei64_v_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoand.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamoandei64_v_u32m4 (uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) {
+ return vamoandei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei8_v_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoand.nxv1i64.nxv1i8.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei8_v_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoand.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamoandei8_v_u64m1 (uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) {
+ return vamoandei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei8_v_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoand.nxv2i64.nxv2i8.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei8_v_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoand.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamoandei8_v_u64m2 (uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) {
+ return vamoandei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei8_v_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoand.nxv4i64.nxv4i8.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei8_v_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoand.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamoandei8_v_u64m4 (uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) {
+ return vamoandei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei8_v_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoand.nxv8i64.nxv8i8.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei8_v_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoand.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamoandei8_v_u64m8 (uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) {
+ return vamoandei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei16_v_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoand.nxv1i64.nxv1i16.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei16_v_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoand.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamoandei16_v_u64m1 (uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) {
+ return vamoandei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei16_v_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoand.nxv2i64.nxv2i16.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei16_v_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoand.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamoandei16_v_u64m2 (uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) {
+ return vamoandei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei16_v_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoand.nxv4i64.nxv4i16.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei16_v_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoand.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamoandei16_v_u64m4 (uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) {
+ return vamoandei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei16_v_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoand.nxv8i64.nxv8i16.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei16_v_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoand.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamoandei16_v_u64m8 (uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) {
+ return vamoandei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei32_v_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoand.nxv1i64.nxv1i32.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei32_v_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoand.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamoandei32_v_u64m1 (uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) {
+ return vamoandei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei32_v_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoand.nxv2i64.nxv2i32.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei32_v_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoand.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamoandei32_v_u64m2 (uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) {
+ return vamoandei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei32_v_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoand.nxv4i64.nxv4i32.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei32_v_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoand.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamoandei32_v_u64m4 (uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) {
+ return vamoandei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei32_v_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoand.nxv8i64.nxv8i32.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei32_v_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoand.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamoandei32_v_u64m8 (uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) {
+ return vamoandei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei64_v_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoand.nxv1i64.nxv1i64.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei64_v_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoand.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamoandei64_v_u64m1 (uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) {
+ return vamoandei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei64_v_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoand.nxv2i64.nxv2i64.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei64_v_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoand.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamoandei64_v_u64m2 (uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) {
+ return vamoandei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei64_v_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoand.nxv4i64.nxv4i64.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei64_v_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoand.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamoandei64_v_u64m4 (uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) {
+ return vamoandei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei64_v_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoand.nxv8i64.nxv8i64.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei64_v_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoand.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamoandei64_v_u64m8 (uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) {
+ return vamoandei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei8_v_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoand.mask.nxv1i32.nxv1i8.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei8_v_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoand.mask.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamoandei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) {
+ return vamoandei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei8_v_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoand.mask.nxv2i32.nxv2i8.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei8_v_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoand.mask.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamoandei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) {
+ return vamoandei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei8_v_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoand.mask.nxv4i32.nxv4i8.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei8_v_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoand.mask.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamoandei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) {
+ return vamoandei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei8_v_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoand.mask.nxv8i32.nxv8i8.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei8_v_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoand.mask.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamoandei8_v_i32m4_m (vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) {
+ return vamoandei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei8_v_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoand.mask.nxv16i32.nxv16i8.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei8_v_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoand.mask.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t test_vamoandei8_v_i32m8_m (vbool4_t mask, int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) {
+ return vamoandei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei16_v_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoand.mask.nxv1i32.nxv1i16.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei16_v_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoand.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamoandei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) {
+ return vamoandei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei16_v_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoand.mask.nxv2i32.nxv2i16.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei16_v_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoand.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamoandei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) {
+ return vamoandei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei16_v_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoand.mask.nxv4i32.nxv4i16.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei16_v_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoand.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamoandei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) {
+ return vamoandei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei16_v_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoand.mask.nxv8i32.nxv8i16.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei16_v_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoand.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamoandei16_v_i32m4_m (vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) {
+ return vamoandei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei16_v_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoand.mask.nxv16i32.nxv16i16.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei16_v_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoand.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t test_vamoandei16_v_i32m8_m (vbool4_t mask, int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) {
+ return vamoandei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei32_v_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoand.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei32_v_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoand.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamoandei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) {
+ return vamoandei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei32_v_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoand.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei32_v_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoand.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamoandei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) {
+ return vamoandei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei32_v_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoand.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei32_v_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoand.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamoandei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) {
+ return vamoandei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei32_v_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoand.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei32_v_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoand.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamoandei32_v_i32m4_m (vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) {
+ return vamoandei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei32_v_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoand.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei32_v_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoand.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t test_vamoandei32_v_i32m8_m (vbool4_t mask, int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) {
+ return vamoandei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei64_v_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoand.mask.nxv1i32.nxv1i64.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei64_v_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoand.mask.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamoandei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) {
+ return vamoandei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei64_v_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoand.mask.nxv2i32.nxv2i64.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei64_v_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoand.mask.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamoandei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) {
+ return vamoandei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei64_v_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoand.mask.nxv4i32.nxv4i64.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei64_v_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoand.mask.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamoandei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) {
+ return vamoandei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei64_v_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoand.mask.nxv8i32.nxv8i64.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei64_v_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoand.mask.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamoandei64_v_i32m4_m (vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) {
+ return vamoandei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei8_v_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoand.mask.nxv1i64.nxv1i8.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei8_v_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoand.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamoandei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) {
+ return vamoandei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei8_v_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoand.mask.nxv2i64.nxv2i8.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei8_v_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoand.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamoandei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) {
+ return vamoandei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei8_v_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoand.mask.nxv4i64.nxv4i8.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei8_v_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoand.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamoandei8_v_i64m4_m (vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) {
+ return vamoandei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei8_v_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoand.mask.nxv8i64.nxv8i8.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei8_v_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoand.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamoandei8_v_i64m8_m (vbool8_t mask, int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) {
+ return vamoandei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei16_v_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoand.mask.nxv1i64.nxv1i16.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei16_v_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoand.mask.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamoandei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) {
+ return vamoandei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei16_v_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoand.mask.nxv2i64.nxv2i16.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei16_v_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoand.mask.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamoandei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) {
+ return vamoandei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei16_v_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoand.mask.nxv4i64.nxv4i16.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei16_v_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoand.mask.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamoandei16_v_i64m4_m (vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) {
+ return vamoandei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei16_v_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoand.mask.nxv8i64.nxv8i16.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei16_v_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoand.mask.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamoandei16_v_i64m8_m (vbool8_t mask, int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) {
+ return vamoandei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei32_v_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoand.mask.nxv1i64.nxv1i32.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei32_v_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoand.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamoandei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) {
+ return vamoandei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei32_v_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoand.mask.nxv2i64.nxv2i32.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei32_v_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoand.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamoandei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) {
+ return vamoandei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei32_v_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoand.mask.nxv4i64.nxv4i32.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei32_v_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoand.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamoandei32_v_i64m4_m (vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) {
+ return vamoandei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei32_v_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoand.mask.nxv8i64.nxv8i32.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei32_v_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoand.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamoandei32_v_i64m8_m (vbool8_t mask, int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) {
+ return vamoandei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei64_v_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoand.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei64_v_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoand.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamoandei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) {
+ return vamoandei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei64_v_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoand.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei64_v_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoand.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamoandei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) {
+ return vamoandei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei64_v_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoand.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei64_v_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoand.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamoandei64_v_i64m4_m (vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) {
+ return vamoandei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei64_v_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoand.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei64_v_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoand.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamoandei64_v_i64m8_m (vbool8_t mask, int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) {
+ return vamoandei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei8_v_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoand.mask.nxv1i32.nxv1i8.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei8_v_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoand.mask.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamoandei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) {
+ return vamoandei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei8_v_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoand.mask.nxv2i32.nxv2i8.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei8_v_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoand.mask.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamoandei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) {
+ return vamoandei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei8_v_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoand.mask.nxv4i32.nxv4i8.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei8_v_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoand.mask.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamoandei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) {
+ return vamoandei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei8_v_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoand.mask.nxv8i32.nxv8i8.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei8_v_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoand.mask.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamoandei8_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) {
+ return vamoandei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei8_v_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoand.mask.nxv16i32.nxv16i8.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei8_v_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoand.mask.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t test_vamoandei8_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) {
+ return vamoandei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei16_v_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoand.mask.nxv1i32.nxv1i16.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei16_v_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoand.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamoandei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) {
+ return vamoandei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei16_v_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoand.mask.nxv2i32.nxv2i16.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei16_v_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoand.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamoandei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) {
+ return vamoandei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei16_v_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoand.mask.nxv4i32.nxv4i16.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei16_v_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoand.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamoandei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) {
+ return vamoandei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei16_v_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoand.mask.nxv8i32.nxv8i16.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei16_v_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoand.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamoandei16_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) {
+ return vamoandei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei16_v_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoand.mask.nxv16i32.nxv16i16.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei16_v_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoand.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t test_vamoandei16_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) {
+ return vamoandei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei32_v_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoand.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei32_v_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoand.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamoandei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) {
+ return vamoandei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei32_v_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoand.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei32_v_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoand.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamoandei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) {
+ return vamoandei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei32_v_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoand.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei32_v_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoand.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamoandei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) {
+ return vamoandei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei32_v_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoand.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei32_v_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoand.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamoandei32_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) {
+ return vamoandei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei32_v_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoand.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei32_v_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoand.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t test_vamoandei32_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) {
+ return vamoandei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei64_v_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoand.mask.nxv1i32.nxv1i64.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei64_v_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoand.mask.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamoandei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) {
+ return vamoandei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei64_v_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoand.mask.nxv2i32.nxv2i64.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei64_v_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoand.mask.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamoandei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) {
+ return vamoandei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei64_v_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoand.mask.nxv4i32.nxv4i64.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei64_v_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoand.mask.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamoandei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) {
+ return vamoandei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei64_v_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoand.mask.nxv8i32.nxv8i64.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei64_v_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoand.mask.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamoandei64_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) {
+ return vamoandei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei8_v_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoand.mask.nxv1i64.nxv1i8.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei8_v_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoand.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamoandei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) {
+ return vamoandei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei8_v_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoand.mask.nxv2i64.nxv2i8.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei8_v_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoand.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamoandei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) {
+ return vamoandei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei8_v_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoand.mask.nxv4i64.nxv4i8.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei8_v_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoand.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamoandei8_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) {
+ return vamoandei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei8_v_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoand.mask.nxv8i64.nxv8i8.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei8_v_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoand.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamoandei8_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) {
+ return vamoandei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei16_v_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoand.mask.nxv1i64.nxv1i16.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei16_v_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoand.mask.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamoandei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) {
+ return vamoandei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei16_v_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoand.mask.nxv2i64.nxv2i16.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei16_v_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoand.mask.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamoandei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) {
+ return vamoandei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei16_v_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoand.mask.nxv4i64.nxv4i16.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei16_v_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoand.mask.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamoandei16_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) {
+ return vamoandei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei16_v_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoand.mask.nxv8i64.nxv8i16.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei16_v_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoand.mask.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamoandei16_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) {
+ return vamoandei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei32_v_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoand.mask.nxv1i64.nxv1i32.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei32_v_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoand.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamoandei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) {
+ return vamoandei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei32_v_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoand.mask.nxv2i64.nxv2i32.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei32_v_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoand.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamoandei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) {
+ return vamoandei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei32_v_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoand.mask.nxv4i64.nxv4i32.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei32_v_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoand.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamoandei32_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) {
+ return vamoandei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei32_v_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoand.mask.nxv8i64.nxv8i32.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei32_v_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoand.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamoandei32_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) {
+ return vamoandei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei64_v_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoand.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei64_v_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoand.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamoandei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) {
+ return vamoandei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei64_v_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoand.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei64_v_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoand.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamoandei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) {
+ return vamoandei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei64_v_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoand.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei64_v_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoand.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamoandei64_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) {
+ return vamoandei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei64_v_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoand.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei64_v_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoand.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamoandei64_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) {
+ return vamoandei64(mask, base, bindex, value, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamomax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamomax.c
new file mode 100644
index 000000000000..2b75cd3bf30c
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamomax.c
@@ -0,0 +1,2247 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +m -target-feature +experimental-v -target-feature +experimental-zvamo -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -target-feature +experimental-zvamo -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vamomaxei8_v_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomax.nxv1i32.nxv1i8.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei8_v_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomax.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamomaxei8_v_i32mf2 (int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) {
+ return vamomaxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei8_v_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomax.nxv2i32.nxv2i8.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei8_v_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomax.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamomaxei8_v_i32m1 (int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) {
+ return vamomaxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei8_v_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomax.nxv4i32.nxv4i8.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei8_v_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomax.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamomaxei8_v_i32m2 (int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) {
+ return vamomaxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei8_v_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomax.nxv8i32.nxv8i8.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei8_v_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomax.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamomaxei8_v_i32m4 (int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) {
+ return vamomaxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei8_v_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomax.nxv16i32.nxv16i8.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei8_v_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomax.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t test_vamomaxei8_v_i32m8 (int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) {
+ return vamomaxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei16_v_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomax.nxv1i32.nxv1i16.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei16_v_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomax.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamomaxei16_v_i32mf2 (int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) {
+ return vamomaxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei16_v_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomax.nxv2i32.nxv2i16.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei16_v_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomax.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamomaxei16_v_i32m1 (int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) {
+ return vamomaxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei16_v_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomax.nxv4i32.nxv4i16.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei16_v_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomax.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamomaxei16_v_i32m2 (int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) {
+ return vamomaxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei16_v_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomax.nxv8i32.nxv8i16.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei16_v_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomax.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamomaxei16_v_i32m4 (int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) {
+ return vamomaxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei16_v_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomax.nxv16i32.nxv16i16.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei16_v_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomax.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t test_vamomaxei16_v_i32m8 (int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) {
+ return vamomaxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei32_v_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomax.nxv1i32.nxv1i32.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei32_v_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomax.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamomaxei32_v_i32mf2 (int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) {
+ return vamomaxei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei32_v_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomax.nxv2i32.nxv2i32.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei32_v_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomax.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamomaxei32_v_i32m1 (int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) {
+ return vamomaxei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei32_v_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomax.nxv4i32.nxv4i32.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei32_v_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomax.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamomaxei32_v_i32m2 (int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) {
+ return vamomaxei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei32_v_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomax.nxv8i32.nxv8i32.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei32_v_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomax.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamomaxei32_v_i32m4 (int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) {
+ return vamomaxei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei32_v_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomax.nxv16i32.nxv16i32.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei32_v_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomax.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t test_vamomaxei32_v_i32m8 (int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) {
+ return vamomaxei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei64_v_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomax.nxv1i32.nxv1i64.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei64_v_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomax.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamomaxei64_v_i32mf2 (int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) {
+ return vamomaxei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei64_v_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomax.nxv2i32.nxv2i64.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei64_v_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomax.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamomaxei64_v_i32m1 (int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) {
+ return vamomaxei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei64_v_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomax.nxv4i32.nxv4i64.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei64_v_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomax.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamomaxei64_v_i32m2 (int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) {
+ return vamomaxei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei64_v_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomax.nxv8i32.nxv8i64.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei64_v_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomax.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamomaxei64_v_i32m4 (int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) {
+ return vamomaxei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei8_v_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomax.nxv1i64.nxv1i8.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei8_v_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomax.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamomaxei8_v_i64m1 (int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) {
+ return vamomaxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei8_v_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomax.nxv2i64.nxv2i8.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei8_v_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomax.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamomaxei8_v_i64m2 (int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) {
+ return vamomaxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei8_v_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomax.nxv4i64.nxv4i8.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei8_v_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomax.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamomaxei8_v_i64m4 (int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) {
+ return vamomaxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei8_v_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomax.nxv8i64.nxv8i8.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei8_v_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomax.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamomaxei8_v_i64m8 (int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) {
+ return vamomaxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei16_v_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomax.nxv1i64.nxv1i16.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei16_v_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomax.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamomaxei16_v_i64m1 (int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) {
+ return vamomaxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei16_v_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomax.nxv2i64.nxv2i16.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei16_v_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomax.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamomaxei16_v_i64m2 (int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) {
+ return vamomaxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei16_v_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomax.nxv4i64.nxv4i16.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei16_v_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomax.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamomaxei16_v_i64m4 (int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) {
+ return vamomaxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei16_v_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomax.nxv8i64.nxv8i16.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei16_v_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomax.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamomaxei16_v_i64m8 (int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) {
+ return vamomaxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei32_v_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomax.nxv1i64.nxv1i32.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei32_v_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomax.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamomaxei32_v_i64m1 (int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) {
+ return vamomaxei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei32_v_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomax.nxv2i64.nxv2i32.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei32_v_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomax.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamomaxei32_v_i64m2 (int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) {
+ return vamomaxei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei32_v_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomax.nxv4i64.nxv4i32.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei32_v_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomax.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamomaxei32_v_i64m4 (int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) {
+ return vamomaxei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei32_v_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomax.nxv8i64.nxv8i32.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei32_v_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomax.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamomaxei32_v_i64m8 (int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) {
+ return vamomaxei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei64_v_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomax.nxv1i64.nxv1i64.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei64_v_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomax.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamomaxei64_v_i64m1 (int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) {
+ return vamomaxei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei64_v_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomax.nxv2i64.nxv2i64.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei64_v_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomax.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamomaxei64_v_i64m2 (int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) {
+ return vamomaxei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei64_v_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomax.nxv4i64.nxv4i64.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei64_v_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomax.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamomaxei64_v_i64m4 (int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) {
+ return vamomaxei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei64_v_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomax.nxv8i64.nxv8i64.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei64_v_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomax.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamomaxei64_v_i64m8 (int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) {
+ return vamomaxei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei8_v_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.nxv1i32.nxv1i8.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei8_v_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamomaxuei8_v_u32mf2 (uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) {
+ return vamomaxuei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei8_v_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.nxv2i32.nxv2i8.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei8_v_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamomaxuei8_v_u32m1 (uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) {
+ return vamomaxuei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei8_v_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.nxv4i32.nxv4i8.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei8_v_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamomaxuei8_v_u32m2 (uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) {
+ return vamomaxuei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei8_v_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.nxv8i32.nxv8i8.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei8_v_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamomaxuei8_v_u32m4 (uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) {
+ return vamomaxuei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei8_v_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomaxu.nxv16i32.nxv16i8.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei8_v_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomaxu.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t test_vamomaxuei8_v_u32m8 (uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) {
+ return vamomaxuei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei16_v_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.nxv1i32.nxv1i16.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei16_v_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamomaxuei16_v_u32mf2 (uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) {
+ return vamomaxuei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei16_v_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.nxv2i32.nxv2i16.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei16_v_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamomaxuei16_v_u32m1 (uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) {
+ return vamomaxuei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei16_v_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.nxv4i32.nxv4i16.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei16_v_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamomaxuei16_v_u32m2 (uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) {
+ return vamomaxuei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei16_v_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.nxv8i32.nxv8i16.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei16_v_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamomaxuei16_v_u32m4 (uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) {
+ return vamomaxuei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei16_v_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomaxu.nxv16i32.nxv16i16.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei16_v_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomaxu.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t test_vamomaxuei16_v_u32m8 (uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) {
+ return vamomaxuei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei32_v_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.nxv1i32.nxv1i32.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei32_v_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamomaxuei32_v_u32mf2 (uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) {
+ return vamomaxuei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei32_v_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.nxv2i32.nxv2i32.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei32_v_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamomaxuei32_v_u32m1 (uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) {
+ return vamomaxuei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei32_v_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.nxv4i32.nxv4i32.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei32_v_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamomaxuei32_v_u32m2 (uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) {
+ return vamomaxuei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei32_v_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.nxv8i32.nxv8i32.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei32_v_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamomaxuei32_v_u32m4 (uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) {
+ return vamomaxuei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei32_v_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomaxu.nxv16i32.nxv16i32.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei32_v_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomaxu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t test_vamomaxuei32_v_u32m8 (uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) {
+ return vamomaxuei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei64_v_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.nxv1i32.nxv1i64.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei64_v_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamomaxuei64_v_u32mf2 (uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) {
+ return vamomaxuei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei64_v_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.nxv2i32.nxv2i64.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei64_v_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamomaxuei64_v_u32m1 (uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) {
+ return vamomaxuei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei64_v_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.nxv4i32.nxv4i64.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei64_v_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamomaxuei64_v_u32m2 (uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) {
+ return vamomaxuei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei64_v_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.nxv8i32.nxv8i64.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei64_v_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamomaxuei64_v_u32m4 (uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) {
+ return vamomaxuei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei8_v_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.nxv1i64.nxv1i8.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei8_v_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamomaxuei8_v_u64m1 (uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) {
+ return vamomaxuei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei8_v_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.nxv2i64.nxv2i8.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei8_v_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamomaxuei8_v_u64m2 (uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) {
+ return vamomaxuei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei8_v_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.nxv4i64.nxv4i8.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei8_v_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamomaxuei8_v_u64m4 (uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) {
+ return vamomaxuei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei8_v_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.nxv8i64.nxv8i8.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei8_v_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamomaxuei8_v_u64m8 (uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) {
+ return vamomaxuei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei16_v_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.nxv1i64.nxv1i16.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei16_v_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamomaxuei16_v_u64m1 (uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) {
+ return vamomaxuei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei16_v_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.nxv2i64.nxv2i16.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei16_v_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamomaxuei16_v_u64m2 (uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) {
+ return vamomaxuei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei16_v_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.nxv4i64.nxv4i16.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei16_v_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamomaxuei16_v_u64m4 (uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) {
+ return vamomaxuei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei16_v_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.nxv8i64.nxv8i16.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei16_v_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamomaxuei16_v_u64m8 (uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) {
+ return vamomaxuei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei32_v_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.nxv1i64.nxv1i32.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei32_v_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamomaxuei32_v_u64m1 (uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) {
+ return vamomaxuei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei32_v_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.nxv2i64.nxv2i32.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei32_v_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamomaxuei32_v_u64m2 (uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) {
+ return vamomaxuei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei32_v_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.nxv4i64.nxv4i32.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei32_v_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamomaxuei32_v_u64m4 (uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) {
+ return vamomaxuei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei32_v_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.nxv8i64.nxv8i32.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei32_v_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamomaxuei32_v_u64m8 (uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) {
+ return vamomaxuei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei64_v_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.nxv1i64.nxv1i64.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei64_v_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamomaxuei64_v_u64m1 (uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) {
+ return vamomaxuei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei64_v_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.nxv2i64.nxv2i64.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei64_v_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamomaxuei64_v_u64m2 (uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) {
+ return vamomaxuei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei64_v_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.nxv4i64.nxv4i64.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei64_v_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamomaxuei64_v_u64m4 (uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) {
+ return vamomaxuei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei64_v_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.nxv8i64.nxv8i64.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei64_v_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamomaxuei64_v_u64m8 (uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) {
+ return vamomaxuei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei8_v_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomax.mask.nxv1i32.nxv1i8.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei8_v_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomax.mask.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamomaxei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) {
+ return vamomaxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei8_v_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomax.mask.nxv2i32.nxv2i8.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei8_v_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomax.mask.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamomaxei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) {
+ return vamomaxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei8_v_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomax.mask.nxv4i32.nxv4i8.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei8_v_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomax.mask.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamomaxei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) {
+ return vamomaxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei8_v_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomax.mask.nxv8i32.nxv8i8.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei8_v_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomax.mask.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamomaxei8_v_i32m4_m (vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) {
+ return vamomaxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei8_v_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomax.mask.nxv16i32.nxv16i8.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei8_v_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomax.mask.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t test_vamomaxei8_v_i32m8_m (vbool4_t mask, int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) {
+ return vamomaxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei16_v_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomax.mask.nxv1i32.nxv1i16.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei16_v_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomax.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamomaxei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) {
+ return vamomaxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei16_v_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomax.mask.nxv2i32.nxv2i16.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei16_v_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomax.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamomaxei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) {
+ return vamomaxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei16_v_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomax.mask.nxv4i32.nxv4i16.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei16_v_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomax.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamomaxei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) {
+ return vamomaxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei16_v_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomax.mask.nxv8i32.nxv8i16.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei16_v_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomax.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamomaxei16_v_i32m4_m (vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) {
+ return vamomaxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei16_v_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomax.mask.nxv16i32.nxv16i16.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei16_v_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomax.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t test_vamomaxei16_v_i32m8_m (vbool4_t mask, int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) {
+ return vamomaxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei32_v_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomax.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei32_v_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomax.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamomaxei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) {
+ return vamomaxei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei32_v_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomax.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei32_v_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomax.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamomaxei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) {
+ return vamomaxei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei32_v_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomax.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei32_v_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomax.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamomaxei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) {
+ return vamomaxei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei32_v_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomax.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei32_v_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomax.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamomaxei32_v_i32m4_m (vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) {
+ return vamomaxei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei32_v_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomax.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei32_v_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomax.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t test_vamomaxei32_v_i32m8_m (vbool4_t mask, int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) {
+ return vamomaxei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei64_v_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomax.mask.nxv1i32.nxv1i64.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei64_v_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomax.mask.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamomaxei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) {
+ return vamomaxei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei64_v_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomax.mask.nxv2i32.nxv2i64.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei64_v_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomax.mask.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamomaxei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) {
+ return vamomaxei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei64_v_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomax.mask.nxv4i32.nxv4i64.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei64_v_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomax.mask.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamomaxei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) {
+ return vamomaxei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei64_v_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomax.mask.nxv8i32.nxv8i64.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei64_v_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomax.mask.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamomaxei64_v_i32m4_m (vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) {
+ return vamomaxei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei8_v_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomax.mask.nxv1i64.nxv1i8.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei8_v_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomax.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamomaxei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) {
+ return vamomaxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei8_v_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomax.mask.nxv2i64.nxv2i8.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei8_v_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomax.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamomaxei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) {
+ return vamomaxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei8_v_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomax.mask.nxv4i64.nxv4i8.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei8_v_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomax.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamomaxei8_v_i64m4_m (vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) {
+ return vamomaxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei8_v_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomax.mask.nxv8i64.nxv8i8.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei8_v_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomax.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamomaxei8_v_i64m8_m (vbool8_t mask, int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) {
+ return vamomaxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei16_v_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomax.mask.nxv1i64.nxv1i16.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei16_v_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomax.mask.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamomaxei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) {
+ return vamomaxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei16_v_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomax.mask.nxv2i64.nxv2i16.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei16_v_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomax.mask.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamomaxei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) {
+ return vamomaxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei16_v_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomax.mask.nxv4i64.nxv4i16.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei16_v_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomax.mask.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamomaxei16_v_i64m4_m (vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) {
+ return vamomaxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei16_v_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomax.mask.nxv8i64.nxv8i16.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei16_v_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomax.mask.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamomaxei16_v_i64m8_m (vbool8_t mask, int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) {
+ return vamomaxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei32_v_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomax.mask.nxv1i64.nxv1i32.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei32_v_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomax.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamomaxei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) {
+ return vamomaxei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei32_v_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomax.mask.nxv2i64.nxv2i32.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei32_v_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomax.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamomaxei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) {
+ return vamomaxei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei32_v_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomax.mask.nxv4i64.nxv4i32.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei32_v_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomax.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamomaxei32_v_i64m4_m (vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) {
+ return vamomaxei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei32_v_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomax.mask.nxv8i64.nxv8i32.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei32_v_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomax.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamomaxei32_v_i64m8_m (vbool8_t mask, int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) {
+ return vamomaxei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei64_v_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomax.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei64_v_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomax.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamomaxei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) {
+ return vamomaxei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei64_v_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomax.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei64_v_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomax.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamomaxei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) {
+ return vamomaxei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei64_v_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomax.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei64_v_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomax.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamomaxei64_v_i64m4_m (vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) {
+ return vamomaxei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei64_v_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomax.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei64_v_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomax.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamomaxei64_v_i64m8_m (vbool8_t mask, int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) {
+ return vamomaxei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei8_v_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i8.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei8_v_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamomaxuei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) {
+ return vamomaxuei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei8_v_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i8.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei8_v_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamomaxuei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) {
+ return vamomaxuei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei8_v_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i8.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei8_v_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamomaxuei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) {
+ return vamomaxuei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei8_v_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i8.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei8_v_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamomaxuei8_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) {
+ return vamomaxuei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei8_v_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomaxu.mask.nxv16i32.nxv16i8.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei8_v_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomaxu.mask.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t test_vamomaxuei8_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) {
+ return vamomaxuei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei16_v_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i16.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei16_v_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamomaxuei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) {
+ return vamomaxuei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei16_v_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i16.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei16_v_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamomaxuei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) {
+ return vamomaxuei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei16_v_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i16.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei16_v_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamomaxuei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) {
+ return vamomaxuei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei16_v_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i16.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei16_v_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamomaxuei16_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) {
+ return vamomaxuei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei16_v_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomaxu.mask.nxv16i32.nxv16i16.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei16_v_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomaxu.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t test_vamomaxuei16_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) {
+ return vamomaxuei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei32_v_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei32_v_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamomaxuei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) {
+ return vamomaxuei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei32_v_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei32_v_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamomaxuei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) {
+ return vamomaxuei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei32_v_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei32_v_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamomaxuei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) {
+ return vamomaxuei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei32_v_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei32_v_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamomaxuei32_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) {
+ return vamomaxuei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei32_v_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomaxu.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei32_v_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomaxu.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t test_vamomaxuei32_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) {
+ return vamomaxuei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei64_v_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i64.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei64_v_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamomaxuei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) {
+ return vamomaxuei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei64_v_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i64.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei64_v_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamomaxuei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) {
+ return vamomaxuei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei64_v_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i64.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei64_v_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamomaxuei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) {
+ return vamomaxuei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei64_v_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i64.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei64_v_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamomaxuei64_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) {
+ return vamomaxuei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei8_v_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i8.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei8_v_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamomaxuei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) {
+ return vamomaxuei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei8_v_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i8.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei8_v_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamomaxuei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) {
+ return vamomaxuei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei8_v_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i8.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei8_v_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamomaxuei8_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) {
+ return vamomaxuei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei8_v_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i8.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei8_v_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamomaxuei8_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) {
+ return vamomaxuei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei16_v_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i16.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei16_v_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamomaxuei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) {
+ return vamomaxuei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei16_v_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i16.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei16_v_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamomaxuei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) {
+ return vamomaxuei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei16_v_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i16.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei16_v_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamomaxuei16_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) {
+ return vamomaxuei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei16_v_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i16.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei16_v_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamomaxuei16_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) {
+ return vamomaxuei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei32_v_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i32.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei32_v_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamomaxuei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) {
+ return vamomaxuei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei32_v_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i32.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei32_v_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamomaxuei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) {
+ return vamomaxuei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei32_v_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i32.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei32_v_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamomaxuei32_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) {
+ return vamomaxuei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei32_v_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i32.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei32_v_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamomaxuei32_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) {
+ return vamomaxuei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei64_v_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei64_v_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamomaxuei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) {
+ return vamomaxuei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei64_v_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei64_v_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamomaxuei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) {
+ return vamomaxuei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei64_v_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei64_v_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamomaxuei64_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) {
+ return vamomaxuei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei64_v_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei64_v_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamomaxuei64_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) {
+ return vamomaxuei64(mask, base, bindex, value, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamomin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamomin.c
new file mode 100644
index 000000000000..0a0c8c90f53b
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamomin.c
@@ -0,0 +1,2247 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +m -target-feature +experimental-v -target-feature +experimental-zvamo -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -target-feature +experimental-zvamo -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vamominei8_v_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomin.nxv1i32.nxv1i8.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei8_v_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomin.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamominei8_v_i32mf2 (int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) {
+ return vamominei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei8_v_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomin.nxv2i32.nxv2i8.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei8_v_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomin.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamominei8_v_i32m1 (int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) {
+ return vamominei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei8_v_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomin.nxv4i32.nxv4i8.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei8_v_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomin.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamominei8_v_i32m2 (int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) {
+ return vamominei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei8_v_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomin.nxv8i32.nxv8i8.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei8_v_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomin.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamominei8_v_i32m4 (int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) {
+ return vamominei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei8_v_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomin.nxv16i32.nxv16i8.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei8_v_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomin.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t test_vamominei8_v_i32m8 (int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) {
+ return vamominei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei16_v_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomin.nxv1i32.nxv1i16.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei16_v_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomin.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamominei16_v_i32mf2 (int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) {
+ return vamominei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei16_v_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomin.nxv2i32.nxv2i16.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei16_v_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomin.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamominei16_v_i32m1 (int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) {
+ return vamominei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei16_v_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomin.nxv4i32.nxv4i16.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei16_v_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomin.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamominei16_v_i32m2 (int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) {
+ return vamominei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei16_v_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomin.nxv8i32.nxv8i16.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei16_v_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomin.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamominei16_v_i32m4 (int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) {
+ return vamominei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei16_v_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomin.nxv16i32.nxv16i16.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei16_v_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomin.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t test_vamominei16_v_i32m8 (int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) {
+ return vamominei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei32_v_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomin.nxv1i32.nxv1i32.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei32_v_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomin.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamominei32_v_i32mf2 (int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) {
+ return vamominei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei32_v_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomin.nxv2i32.nxv2i32.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei32_v_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomin.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamominei32_v_i32m1 (int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) {
+ return vamominei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei32_v_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomin.nxv4i32.nxv4i32.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei32_v_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomin.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamominei32_v_i32m2 (int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) {
+ return vamominei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei32_v_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomin.nxv8i32.nxv8i32.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei32_v_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomin.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamominei32_v_i32m4 (int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) {
+ return vamominei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei32_v_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomin.nxv16i32.nxv16i32.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei32_v_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomin.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t test_vamominei32_v_i32m8 (int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) {
+ return vamominei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei64_v_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomin.nxv1i32.nxv1i64.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei64_v_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomin.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamominei64_v_i32mf2 (int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) {
+ return vamominei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei64_v_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomin.nxv2i32.nxv2i64.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei64_v_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomin.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamominei64_v_i32m1 (int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) {
+ return vamominei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei64_v_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomin.nxv4i32.nxv4i64.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei64_v_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomin.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamominei64_v_i32m2 (int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) {
+ return vamominei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei64_v_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomin.nxv8i32.nxv8i64.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei64_v_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomin.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamominei64_v_i32m4 (int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) {
+ return vamominei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei8_v_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomin.nxv1i64.nxv1i8.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei8_v_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomin.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamominei8_v_i64m1 (int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) {
+ return vamominei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei8_v_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomin.nxv2i64.nxv2i8.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei8_v_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomin.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamominei8_v_i64m2 (int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) {
+ return vamominei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei8_v_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomin.nxv4i64.nxv4i8.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei8_v_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomin.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamominei8_v_i64m4 (int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) {
+ return vamominei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei8_v_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomin.nxv8i64.nxv8i8.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei8_v_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomin.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamominei8_v_i64m8 (int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) {
+ return vamominei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei16_v_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomin.nxv1i64.nxv1i16.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei16_v_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomin.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamominei16_v_i64m1 (int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) {
+ return vamominei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei16_v_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomin.nxv2i64.nxv2i16.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei16_v_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomin.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamominei16_v_i64m2 (int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) {
+ return vamominei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei16_v_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomin.nxv4i64.nxv4i16.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei16_v_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomin.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamominei16_v_i64m4 (int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) {
+ return vamominei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei16_v_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomin.nxv8i64.nxv8i16.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei16_v_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomin.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamominei16_v_i64m8 (int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) {
+ return vamominei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei32_v_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomin.nxv1i64.nxv1i32.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei32_v_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomin.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamominei32_v_i64m1 (int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) {
+ return vamominei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei32_v_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomin.nxv2i64.nxv2i32.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei32_v_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomin.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamominei32_v_i64m2 (int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) {
+ return vamominei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei32_v_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomin.nxv4i64.nxv4i32.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei32_v_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomin.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamominei32_v_i64m4 (int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) {
+ return vamominei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei32_v_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomin.nxv8i64.nxv8i32.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei32_v_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomin.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamominei32_v_i64m8 (int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) {
+ return vamominei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei64_v_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomin.nxv1i64.nxv1i64.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei64_v_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomin.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamominei64_v_i64m1 (int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) {
+ return vamominei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei64_v_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomin.nxv2i64.nxv2i64.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei64_v_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomin.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamominei64_v_i64m2 (int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) {
+ return vamominei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei64_v_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomin.nxv4i64.nxv4i64.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei64_v_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomin.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamominei64_v_i64m4 (int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) {
+ return vamominei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei64_v_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomin.nxv8i64.nxv8i64.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei64_v_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomin.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamominei64_v_i64m8 (int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) {
+ return vamominei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei8_v_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamominu.nxv1i32.nxv1i8.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei8_v_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamominu.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamominuei8_v_u32mf2 (uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) {
+ return vamominuei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei8_v_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamominu.nxv2i32.nxv2i8.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei8_v_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamominu.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamominuei8_v_u32m1 (uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) {
+ return vamominuei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei8_v_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamominu.nxv4i32.nxv4i8.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei8_v_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamominu.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamominuei8_v_u32m2 (uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) {
+ return vamominuei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei8_v_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamominu.nxv8i32.nxv8i8.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei8_v_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamominu.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamominuei8_v_u32m4 (uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) {
+ return vamominuei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei8_v_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamominu.nxv16i32.nxv16i8.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei8_v_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamominu.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t test_vamominuei8_v_u32m8 (uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) {
+ return vamominuei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei16_v_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamominu.nxv1i32.nxv1i16.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei16_v_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamominu.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamominuei16_v_u32mf2 (uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) {
+ return vamominuei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei16_v_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamominu.nxv2i32.nxv2i16.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei16_v_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamominu.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamominuei16_v_u32m1 (uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) {
+ return vamominuei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei16_v_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamominu.nxv4i32.nxv4i16.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei16_v_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamominu.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamominuei16_v_u32m2 (uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) {
+ return vamominuei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei16_v_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamominu.nxv8i32.nxv8i16.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei16_v_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamominu.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamominuei16_v_u32m4 (uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) {
+ return vamominuei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei16_v_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamominu.nxv16i32.nxv16i16.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei16_v_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamominu.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t test_vamominuei16_v_u32m8 (uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) {
+ return vamominuei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei32_v_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamominu.nxv1i32.nxv1i32.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei32_v_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamominu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamominuei32_v_u32mf2 (uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) {
+ return vamominuei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei32_v_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamominu.nxv2i32.nxv2i32.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei32_v_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamominu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamominuei32_v_u32m1 (uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) {
+ return vamominuei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei32_v_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamominu.nxv4i32.nxv4i32.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei32_v_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamominu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamominuei32_v_u32m2 (uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) {
+ return vamominuei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei32_v_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamominu.nxv8i32.nxv8i32.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei32_v_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamominu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamominuei32_v_u32m4 (uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) {
+ return vamominuei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei32_v_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamominu.nxv16i32.nxv16i32.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei32_v_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamominu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t test_vamominuei32_v_u32m8 (uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) {
+ return vamominuei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei64_v_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamominu.nxv1i32.nxv1i64.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei64_v_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamominu.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamominuei64_v_u32mf2 (uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) {
+ return vamominuei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei64_v_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamominu.nxv2i32.nxv2i64.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei64_v_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamominu.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamominuei64_v_u32m1 (uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) {
+ return vamominuei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei64_v_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamominu.nxv4i32.nxv4i64.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei64_v_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamominu.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamominuei64_v_u32m2 (uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) {
+ return vamominuei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei64_v_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamominu.nxv8i32.nxv8i64.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei64_v_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamominu.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamominuei64_v_u32m4 (uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) {
+ return vamominuei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei8_v_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamominu.nxv1i64.nxv1i8.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei8_v_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamominu.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamominuei8_v_u64m1 (uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) {
+ return vamominuei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei8_v_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamominu.nxv2i64.nxv2i8.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei8_v_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamominu.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamominuei8_v_u64m2 (uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) {
+ return vamominuei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei8_v_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamominu.nxv4i64.nxv4i8.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei8_v_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamominu.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamominuei8_v_u64m4 (uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) {
+ return vamominuei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei8_v_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamominu.nxv8i64.nxv8i8.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei8_v_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamominu.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamominuei8_v_u64m8 (uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) {
+ return vamominuei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei16_v_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamominu.nxv1i64.nxv1i16.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei16_v_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamominu.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamominuei16_v_u64m1 (uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) {
+ return vamominuei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei16_v_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamominu.nxv2i64.nxv2i16.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei16_v_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamominu.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamominuei16_v_u64m2 (uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) {
+ return vamominuei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei16_v_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamominu.nxv4i64.nxv4i16.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei16_v_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamominu.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamominuei16_v_u64m4 (uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) {
+ return vamominuei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei16_v_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamominu.nxv8i64.nxv8i16.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei16_v_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamominu.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamominuei16_v_u64m8 (uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) {
+ return vamominuei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei32_v_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamominu.nxv1i64.nxv1i32.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei32_v_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamominu.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamominuei32_v_u64m1 (uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) {
+ return vamominuei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei32_v_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamominu.nxv2i64.nxv2i32.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei32_v_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamominu.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamominuei32_v_u64m2 (uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) {
+ return vamominuei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei32_v_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamominu.nxv4i64.nxv4i32.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei32_v_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamominu.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamominuei32_v_u64m4 (uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) {
+ return vamominuei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei32_v_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamominu.nxv8i64.nxv8i32.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei32_v_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamominu.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamominuei32_v_u64m8 (uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) {
+ return vamominuei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei64_v_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamominu.nxv1i64.nxv1i64.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei64_v_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamominu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamominuei64_v_u64m1 (uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) {
+ return vamominuei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei64_v_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamominu.nxv2i64.nxv2i64.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei64_v_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamominu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamominuei64_v_u64m2 (uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) {
+ return vamominuei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei64_v_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamominu.nxv4i64.nxv4i64.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei64_v_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamominu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamominuei64_v_u64m4 (uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) {
+ return vamominuei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei64_v_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamominu.nxv8i64.nxv8i64.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei64_v_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamominu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamominuei64_v_u64m8 (uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) {
+ return vamominuei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei8_v_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomin.mask.nxv1i32.nxv1i8.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei8_v_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomin.mask.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamominei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) {
+ return vamominei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei8_v_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomin.mask.nxv2i32.nxv2i8.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei8_v_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomin.mask.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamominei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) {
+ return vamominei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei8_v_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomin.mask.nxv4i32.nxv4i8.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei8_v_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomin.mask.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamominei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) {
+ return vamominei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei8_v_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomin.mask.nxv8i32.nxv8i8.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei8_v_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomin.mask.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamominei8_v_i32m4_m (vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) {
+ return vamominei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei8_v_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomin.mask.nxv16i32.nxv16i8.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei8_v_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomin.mask.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t test_vamominei8_v_i32m8_m (vbool4_t mask, int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) {
+ return vamominei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei16_v_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomin.mask.nxv1i32.nxv1i16.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei16_v_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomin.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamominei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) {
+ return vamominei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei16_v_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomin.mask.nxv2i32.nxv2i16.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei16_v_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomin.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamominei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) {
+ return vamominei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei16_v_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomin.mask.nxv4i32.nxv4i16.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei16_v_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomin.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamominei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) {
+ return vamominei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei16_v_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomin.mask.nxv8i32.nxv8i16.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei16_v_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomin.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamominei16_v_i32m4_m (vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) {
+ return vamominei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei16_v_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomin.mask.nxv16i32.nxv16i16.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei16_v_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomin.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t test_vamominei16_v_i32m8_m (vbool4_t mask, int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) {
+ return vamominei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei32_v_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomin.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei32_v_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomin.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamominei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) {
+ return vamominei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei32_v_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomin.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei32_v_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomin.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamominei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) {
+ return vamominei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei32_v_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomin.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei32_v_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomin.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamominei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) {
+ return vamominei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei32_v_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomin.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei32_v_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomin.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamominei32_v_i32m4_m (vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) {
+ return vamominei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei32_v_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomin.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei32_v_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomin.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t test_vamominei32_v_i32m8_m (vbool4_t mask, int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) {
+ return vamominei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei64_v_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomin.mask.nxv1i32.nxv1i64.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei64_v_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomin.mask.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamominei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) {
+ return vamominei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei64_v_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomin.mask.nxv2i32.nxv2i64.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei64_v_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomin.mask.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamominei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) {
+ return vamominei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei64_v_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomin.mask.nxv4i32.nxv4i64.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei64_v_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomin.mask.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamominei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) {
+ return vamominei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei64_v_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomin.mask.nxv8i32.nxv8i64.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei64_v_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomin.mask.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamominei64_v_i32m4_m (vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) {
+ return vamominei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei8_v_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomin.mask.nxv1i64.nxv1i8.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei8_v_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomin.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamominei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) {
+ return vamominei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei8_v_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomin.mask.nxv2i64.nxv2i8.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei8_v_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomin.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamominei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) {
+ return vamominei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei8_v_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomin.mask.nxv4i64.nxv4i8.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei8_v_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomin.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamominei8_v_i64m4_m (vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) {
+ return vamominei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei8_v_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomin.mask.nxv8i64.nxv8i8.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei8_v_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomin.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamominei8_v_i64m8_m (vbool8_t mask, int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) {
+ return vamominei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei16_v_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomin.mask.nxv1i64.nxv1i16.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei16_v_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomin.mask.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamominei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) {
+ return vamominei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei16_v_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomin.mask.nxv2i64.nxv2i16.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei16_v_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomin.mask.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamominei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) {
+ return vamominei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei16_v_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomin.mask.nxv4i64.nxv4i16.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei16_v_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomin.mask.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamominei16_v_i64m4_m (vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) {
+ return vamominei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei16_v_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomin.mask.nxv8i64.nxv8i16.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei16_v_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomin.mask.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamominei16_v_i64m8_m (vbool8_t mask, int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) {
+ return vamominei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei32_v_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomin.mask.nxv1i64.nxv1i32.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei32_v_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomin.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamominei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) {
+ return vamominei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei32_v_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomin.mask.nxv2i64.nxv2i32.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei32_v_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomin.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamominei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) {
+ return vamominei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei32_v_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomin.mask.nxv4i64.nxv4i32.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei32_v_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomin.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamominei32_v_i64m4_m (vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) {
+ return vamominei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei32_v_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomin.mask.nxv8i64.nxv8i32.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei32_v_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomin.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamominei32_v_i64m8_m (vbool8_t mask, int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) {
+ return vamominei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei64_v_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomin.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei64_v_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomin.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamominei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) {
+ return vamominei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei64_v_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomin.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei64_v_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomin.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamominei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) {
+ return vamominei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei64_v_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomin.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei64_v_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomin.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamominei64_v_i64m4_m (vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) {
+ return vamominei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei64_v_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomin.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei64_v_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomin.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamominei64_v_i64m8_m (vbool8_t mask, int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) {
+ return vamominei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei8_v_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamominu.mask.nxv1i32.nxv1i8.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei8_v_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamominu.mask.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamominuei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) {
+ return vamominuei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei8_v_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamominu.mask.nxv2i32.nxv2i8.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei8_v_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamominu.mask.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamominuei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) {
+ return vamominuei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei8_v_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamominu.mask.nxv4i32.nxv4i8.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei8_v_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamominu.mask.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamominuei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) {
+ return vamominuei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei8_v_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamominu.mask.nxv8i32.nxv8i8.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei8_v_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamominu.mask.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamominuei8_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) {
+ return vamominuei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei8_v_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamominu.mask.nxv16i32.nxv16i8.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei8_v_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamominu.mask.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t test_vamominuei8_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) {
+ return vamominuei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei16_v_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamominu.mask.nxv1i32.nxv1i16.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei16_v_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamominu.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamominuei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) {
+ return vamominuei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei16_v_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamominu.mask.nxv2i32.nxv2i16.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei16_v_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamominu.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamominuei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) {
+ return vamominuei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei16_v_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamominu.mask.nxv4i32.nxv4i16.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei16_v_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamominu.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamominuei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) {
+ return vamominuei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei16_v_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamominu.mask.nxv8i32.nxv8i16.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei16_v_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamominu.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamominuei16_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) {
+ return vamominuei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei16_v_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamominu.mask.nxv16i32.nxv16i16.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei16_v_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamominu.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t test_vamominuei16_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) {
+ return vamominuei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei32_v_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamominu.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei32_v_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamominu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamominuei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) {
+ return vamominuei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei32_v_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamominu.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei32_v_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamominu.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamominuei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) {
+ return vamominuei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei32_v_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamominu.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei32_v_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamominu.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamominuei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) {
+ return vamominuei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei32_v_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamominu.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei32_v_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamominu.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamominuei32_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) {
+ return vamominuei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei32_v_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamominu.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei32_v_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamominu.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t test_vamominuei32_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) {
+ return vamominuei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei64_v_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamominu.mask.nxv1i32.nxv1i64.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei64_v_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamominu.mask.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamominuei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) {
+ return vamominuei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei64_v_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamominu.mask.nxv2i32.nxv2i64.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei64_v_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamominu.mask.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamominuei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) {
+ return vamominuei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei64_v_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamominu.mask.nxv4i32.nxv4i64.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei64_v_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamominu.mask.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamominuei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) {
+ return vamominuei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei64_v_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamominu.mask.nxv8i32.nxv8i64.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei64_v_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamominu.mask.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamominuei64_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) {
+ return vamominuei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei8_v_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamominu.mask.nxv1i64.nxv1i8.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei8_v_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamominu.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamominuei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) {
+ return vamominuei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei8_v_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamominu.mask.nxv2i64.nxv2i8.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei8_v_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamominu.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamominuei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) {
+ return vamominuei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei8_v_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamominu.mask.nxv4i64.nxv4i8.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei8_v_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamominu.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamominuei8_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) {
+ return vamominuei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei8_v_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamominu.mask.nxv8i64.nxv8i8.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei8_v_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamominu.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamominuei8_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) {
+ return vamominuei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei16_v_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamominu.mask.nxv1i64.nxv1i16.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei16_v_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamominu.mask.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamominuei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) {
+ return vamominuei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei16_v_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamominu.mask.nxv2i64.nxv2i16.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei16_v_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamominu.mask.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamominuei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) {
+ return vamominuei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei16_v_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamominu.mask.nxv4i64.nxv4i16.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei16_v_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamominu.mask.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamominuei16_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) {
+ return vamominuei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei16_v_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamominu.mask.nxv8i64.nxv8i16.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei16_v_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamominu.mask.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamominuei16_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) {
+ return vamominuei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei32_v_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamominu.mask.nxv1i64.nxv1i32.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei32_v_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamominu.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamominuei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) {
+ return vamominuei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei32_v_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamominu.mask.nxv2i64.nxv2i32.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei32_v_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamominu.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamominuei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) {
+ return vamominuei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei32_v_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamominu.mask.nxv4i64.nxv4i32.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei32_v_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamominu.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamominuei32_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) {
+ return vamominuei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei32_v_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamominu.mask.nxv8i64.nxv8i32.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei32_v_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamominu.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamominuei32_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) {
+ return vamominuei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei64_v_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamominu.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei64_v_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamominu.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamominuei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) {
+ return vamominuei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei64_v_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamominu.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei64_v_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamominu.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamominuei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) {
+ return vamominuei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei64_v_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamominu.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei64_v_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamominu.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamominuei64_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) {
+ return vamominuei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei64_v_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamominu.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei64_v_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamominu.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamominuei64_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) {
+ return vamominuei64(mask, base, bindex, value, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoor.c
new file mode 100644
index 000000000000..1bf6d8b78cd5
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoor.c
@@ -0,0 +1,2247 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +m -target-feature +experimental-v -target-feature +experimental-zvamo -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -target-feature +experimental-zvamo -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vamoorei8_v_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoor.nxv1i32.nxv1i8.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei8_v_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoor.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamoorei8_v_i32mf2 (int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) {
+ return vamoorei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei8_v_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoor.nxv2i32.nxv2i8.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei8_v_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoor.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamoorei8_v_i32m1 (int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) {
+ return vamoorei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei8_v_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoor.nxv4i32.nxv4i8.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei8_v_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoor.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamoorei8_v_i32m2 (int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) {
+ return vamoorei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei8_v_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoor.nxv8i32.nxv8i8.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei8_v_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoor.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamoorei8_v_i32m4 (int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) {
+ return vamoorei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei8_v_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoor.nxv16i32.nxv16i8.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei8_v_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoor.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t test_vamoorei8_v_i32m8 (int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) {
+ return vamoorei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei16_v_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoor.nxv1i32.nxv1i16.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei16_v_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoor.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamoorei16_v_i32mf2 (int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) {
+ return vamoorei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei16_v_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoor.nxv2i32.nxv2i16.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei16_v_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoor.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamoorei16_v_i32m1 (int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) {
+ return vamoorei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei16_v_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoor.nxv4i32.nxv4i16.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei16_v_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoor.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamoorei16_v_i32m2 (int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) {
+ return vamoorei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei16_v_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoor.nxv8i32.nxv8i16.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei16_v_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoor.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamoorei16_v_i32m4 (int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) {
+ return vamoorei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei16_v_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoor.nxv16i32.nxv16i16.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei16_v_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoor.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t test_vamoorei16_v_i32m8 (int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) {
+ return vamoorei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei32_v_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoor.nxv1i32.nxv1i32.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei32_v_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoor.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamoorei32_v_i32mf2 (int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) {
+ return vamoorei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei32_v_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoor.nxv2i32.nxv2i32.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei32_v_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamoorei32_v_i32m1 (int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) {
+ return vamoorei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei32_v_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoor.nxv4i32.nxv4i32.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei32_v_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoor.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamoorei32_v_i32m2 (int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) {
+ return vamoorei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei32_v_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoor.nxv8i32.nxv8i32.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei32_v_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoor.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamoorei32_v_i32m4 (int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) {
+ return vamoorei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei32_v_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoor.nxv16i32.nxv16i32.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei32_v_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoor.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t test_vamoorei32_v_i32m8 (int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) {
+ return vamoorei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei64_v_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoor.nxv1i32.nxv1i64.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei64_v_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoor.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamoorei64_v_i32mf2 (int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) {
+ return vamoorei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei64_v_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoor.nxv2i32.nxv2i64.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei64_v_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoor.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamoorei64_v_i32m1 (int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) {
+ return vamoorei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei64_v_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoor.nxv4i32.nxv4i64.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei64_v_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoor.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamoorei64_v_i32m2 (int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) {
+ return vamoorei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei64_v_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoor.nxv8i32.nxv8i64.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei64_v_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoor.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamoorei64_v_i32m4 (int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) {
+ return vamoorei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei8_v_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoor.nxv1i64.nxv1i8.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei8_v_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoor.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamoorei8_v_i64m1 (int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) {
+ return vamoorei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei8_v_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoor.nxv2i64.nxv2i8.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei8_v_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoor.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamoorei8_v_i64m2 (int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) {
+ return vamoorei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei8_v_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoor.nxv4i64.nxv4i8.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei8_v_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoor.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamoorei8_v_i64m4 (int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) {
+ return vamoorei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei8_v_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoor.nxv8i64.nxv8i8.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei8_v_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoor.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamoorei8_v_i64m8 (int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) {
+ return vamoorei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei16_v_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoor.nxv1i64.nxv1i16.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei16_v_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoor.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamoorei16_v_i64m1 (int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) {
+ return vamoorei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei16_v_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoor.nxv2i64.nxv2i16.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei16_v_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoor.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamoorei16_v_i64m2 (int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) {
+ return vamoorei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei16_v_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoor.nxv4i64.nxv4i16.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei16_v_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoor.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamoorei16_v_i64m4 (int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) {
+ return vamoorei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei16_v_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoor.nxv8i64.nxv8i16.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei16_v_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoor.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamoorei16_v_i64m8 (int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) {
+ return vamoorei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei32_v_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoor.nxv1i64.nxv1i32.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei32_v_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoor.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamoorei32_v_i64m1 (int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) {
+ return vamoorei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei32_v_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoor.nxv2i64.nxv2i32.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei32_v_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoor.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamoorei32_v_i64m2 (int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) {
+ return vamoorei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei32_v_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoor.nxv4i64.nxv4i32.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei32_v_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoor.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamoorei32_v_i64m4 (int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) {
+ return vamoorei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei32_v_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoor.nxv8i64.nxv8i32.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei32_v_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoor.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamoorei32_v_i64m8 (int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) {
+ return vamoorei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei64_v_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoor.nxv1i64.nxv1i64.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei64_v_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamoorei64_v_i64m1 (int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) {
+ return vamoorei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei64_v_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoor.nxv2i64.nxv2i64.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei64_v_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoor.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamoorei64_v_i64m2 (int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) {
+ return vamoorei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei64_v_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoor.nxv4i64.nxv4i64.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei64_v_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoor.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamoorei64_v_i64m4 (int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) {
+ return vamoorei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei64_v_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoor.nxv8i64.nxv8i64.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei64_v_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoor.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamoorei64_v_i64m8 (int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) {
+ return vamoorei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei8_v_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoor.nxv1i32.nxv1i8.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei8_v_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoor.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamoorei8_v_u32mf2 (uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) {
+ return vamoorei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei8_v_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoor.nxv2i32.nxv2i8.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei8_v_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoor.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamoorei8_v_u32m1 (uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) {
+ return vamoorei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei8_v_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoor.nxv4i32.nxv4i8.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei8_v_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoor.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamoorei8_v_u32m2 (uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) {
+ return vamoorei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei8_v_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoor.nxv8i32.nxv8i8.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei8_v_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoor.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamoorei8_v_u32m4 (uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) {
+ return vamoorei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei8_v_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoor.nxv16i32.nxv16i8.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei8_v_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoor.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t test_vamoorei8_v_u32m8 (uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) {
+ return vamoorei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei16_v_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoor.nxv1i32.nxv1i16.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei16_v_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoor.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamoorei16_v_u32mf2 (uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) {
+ return vamoorei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei16_v_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoor.nxv2i32.nxv2i16.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei16_v_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoor.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamoorei16_v_u32m1 (uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) {
+ return vamoorei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei16_v_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoor.nxv4i32.nxv4i16.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei16_v_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoor.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamoorei16_v_u32m2 (uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) {
+ return vamoorei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei16_v_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoor.nxv8i32.nxv8i16.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei16_v_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoor.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamoorei16_v_u32m4 (uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) {
+ return vamoorei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei16_v_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoor.nxv16i32.nxv16i16.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei16_v_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoor.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t test_vamoorei16_v_u32m8 (uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) {
+ return vamoorei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei32_v_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoor.nxv1i32.nxv1i32.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei32_v_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoor.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamoorei32_v_u32mf2 (uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) {
+ return vamoorei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei32_v_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoor.nxv2i32.nxv2i32.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei32_v_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamoorei32_v_u32m1 (uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) {
+ return vamoorei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei32_v_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoor.nxv4i32.nxv4i32.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei32_v_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoor.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamoorei32_v_u32m2 (uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) {
+ return vamoorei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei32_v_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoor.nxv8i32.nxv8i32.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei32_v_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoor.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamoorei32_v_u32m4 (uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) {
+ return vamoorei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei32_v_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoor.nxv16i32.nxv16i32.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei32_v_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoor.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t test_vamoorei32_v_u32m8 (uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) {
+ return vamoorei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei64_v_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoor.nxv1i32.nxv1i64.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei64_v_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoor.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamoorei64_v_u32mf2 (uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) {
+ return vamoorei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei64_v_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoor.nxv2i32.nxv2i64.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei64_v_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoor.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamoorei64_v_u32m1 (uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) {
+ return vamoorei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei64_v_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoor.nxv4i32.nxv4i64.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei64_v_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoor.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamoorei64_v_u32m2 (uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) {
+ return vamoorei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei64_v_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoor.nxv8i32.nxv8i64.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei64_v_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoor.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamoorei64_v_u32m4 (uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) {
+ return vamoorei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei8_v_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoor.nxv1i64.nxv1i8.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei8_v_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoor.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamoorei8_v_u64m1 (uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) {
+ return vamoorei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei8_v_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoor.nxv2i64.nxv2i8.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei8_v_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoor.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamoorei8_v_u64m2 (uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) {
+ return vamoorei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei8_v_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoor.nxv4i64.nxv4i8.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei8_v_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoor.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamoorei8_v_u64m4 (uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) {
+ return vamoorei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei8_v_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoor.nxv8i64.nxv8i8.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei8_v_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoor.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamoorei8_v_u64m8 (uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) {
+ return vamoorei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei16_v_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoor.nxv1i64.nxv1i16.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei16_v_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoor.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamoorei16_v_u64m1 (uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) {
+ return vamoorei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei16_v_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoor.nxv2i64.nxv2i16.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei16_v_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoor.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamoorei16_v_u64m2 (uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) {
+ return vamoorei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei16_v_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoor.nxv4i64.nxv4i16.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei16_v_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoor.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamoorei16_v_u64m4 (uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) {
+ return vamoorei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei16_v_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoor.nxv8i64.nxv8i16.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei16_v_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoor.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamoorei16_v_u64m8 (uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) {
+ return vamoorei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei32_v_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoor.nxv1i64.nxv1i32.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei32_v_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoor.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamoorei32_v_u64m1 (uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) {
+ return vamoorei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei32_v_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoor.nxv2i64.nxv2i32.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei32_v_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoor.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamoorei32_v_u64m2 (uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) {
+ return vamoorei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei32_v_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoor.nxv4i64.nxv4i32.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei32_v_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoor.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamoorei32_v_u64m4 (uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) {
+ return vamoorei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei32_v_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoor.nxv8i64.nxv8i32.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei32_v_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoor.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamoorei32_v_u64m8 (uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) {
+ return vamoorei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei64_v_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoor.nxv1i64.nxv1i64.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei64_v_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamoorei64_v_u64m1 (uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) {
+ return vamoorei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei64_v_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoor.nxv2i64.nxv2i64.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei64_v_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoor.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamoorei64_v_u64m2 (uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) {
+ return vamoorei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei64_v_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoor.nxv4i64.nxv4i64.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei64_v_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoor.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamoorei64_v_u64m4 (uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) {
+ return vamoorei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei64_v_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoor.nxv8i64.nxv8i64.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei64_v_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoor.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamoorei64_v_u64m8 (uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) {
+ return vamoorei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei8_v_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoor.mask.nxv1i32.nxv1i8.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei8_v_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoor.mask.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamoorei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) {
+ return vamoorei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei8_v_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoor.mask.nxv2i32.nxv2i8.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei8_v_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoor.mask.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamoorei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) {
+ return vamoorei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei8_v_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoor.mask.nxv4i32.nxv4i8.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei8_v_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoor.mask.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamoorei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) {
+ return vamoorei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei8_v_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoor.mask.nxv8i32.nxv8i8.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei8_v_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoor.mask.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamoorei8_v_i32m4_m (vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) {
+ return vamoorei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei8_v_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoor.mask.nxv16i32.nxv16i8.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei8_v_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoor.mask.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t test_vamoorei8_v_i32m8_m (vbool4_t mask, int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) {
+ return vamoorei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei16_v_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoor.mask.nxv1i32.nxv1i16.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei16_v_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoor.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamoorei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) {
+ return vamoorei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei16_v_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoor.mask.nxv2i32.nxv2i16.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei16_v_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoor.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamoorei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) {
+ return vamoorei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei16_v_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoor.mask.nxv4i32.nxv4i16.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei16_v_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoor.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamoorei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) {
+ return vamoorei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei16_v_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoor.mask.nxv8i32.nxv8i16.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei16_v_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoor.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamoorei16_v_i32m4_m (vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) {
+ return vamoorei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei16_v_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoor.mask.nxv16i32.nxv16i16.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei16_v_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoor.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t test_vamoorei16_v_i32m8_m (vbool4_t mask, int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) {
+ return vamoorei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei32_v_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoor.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei32_v_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoor.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamoorei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) {
+ return vamoorei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei32_v_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoor.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei32_v_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoor.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamoorei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) {
+ return vamoorei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei32_v_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoor.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei32_v_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoor.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamoorei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) {
+ return vamoorei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei32_v_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoor.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei32_v_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoor.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamoorei32_v_i32m4_m (vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) {
+ return vamoorei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei32_v_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoor.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei32_v_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoor.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t test_vamoorei32_v_i32m8_m (vbool4_t mask, int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) {
+ return vamoorei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei64_v_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoor.mask.nxv1i32.nxv1i64.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei64_v_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoor.mask.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamoorei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) {
+ return vamoorei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei64_v_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoor.mask.nxv2i32.nxv2i64.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei64_v_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoor.mask.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamoorei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) {
+ return vamoorei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei64_v_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoor.mask.nxv4i32.nxv4i64.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei64_v_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoor.mask.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamoorei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) {
+ return vamoorei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei64_v_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoor.mask.nxv8i32.nxv8i64.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei64_v_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoor.mask.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamoorei64_v_i32m4_m (vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) {
+ return vamoorei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei8_v_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoor.mask.nxv1i64.nxv1i8.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei8_v_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoor.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamoorei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) {
+ return vamoorei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei8_v_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoor.mask.nxv2i64.nxv2i8.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei8_v_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoor.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamoorei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) {
+ return vamoorei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei8_v_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoor.mask.nxv4i64.nxv4i8.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei8_v_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoor.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamoorei8_v_i64m4_m (vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) {
+ return vamoorei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei8_v_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoor.mask.nxv8i64.nxv8i8.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei8_v_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoor.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamoorei8_v_i64m8_m (vbool8_t mask, int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) {
+ return vamoorei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei16_v_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoor.mask.nxv1i64.nxv1i16.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei16_v_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoor.mask.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamoorei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) {
+ return vamoorei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei16_v_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoor.mask.nxv2i64.nxv2i16.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei16_v_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoor.mask.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamoorei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) {
+ return vamoorei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei16_v_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoor.mask.nxv4i64.nxv4i16.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei16_v_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoor.mask.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamoorei16_v_i64m4_m (vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) {
+ return vamoorei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei16_v_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoor.mask.nxv8i64.nxv8i16.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei16_v_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoor.mask.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamoorei16_v_i64m8_m (vbool8_t mask, int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) {
+ return vamoorei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei32_v_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoor.mask.nxv1i64.nxv1i32.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei32_v_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoor.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamoorei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) {
+ return vamoorei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei32_v_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoor.mask.nxv2i64.nxv2i32.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei32_v_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoor.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamoorei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) {
+ return vamoorei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei32_v_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoor.mask.nxv4i64.nxv4i32.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei32_v_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoor.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamoorei32_v_i64m4_m (vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) {
+ return vamoorei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei32_v_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoor.mask.nxv8i64.nxv8i32.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei32_v_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoor.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamoorei32_v_i64m8_m (vbool8_t mask, int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) {
+ return vamoorei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei64_v_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoor.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei64_v_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoor.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamoorei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) {
+ return vamoorei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei64_v_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoor.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei64_v_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoor.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamoorei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) {
+ return vamoorei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei64_v_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoor.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei64_v_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoor.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamoorei64_v_i64m4_m (vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) {
+ return vamoorei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei64_v_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoor.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei64_v_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoor.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamoorei64_v_i64m8_m (vbool8_t mask, int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) {
+ return vamoorei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei8_v_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoor.mask.nxv1i32.nxv1i8.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei8_v_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoor.mask.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamoorei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) {
+ return vamoorei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei8_v_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoor.mask.nxv2i32.nxv2i8.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei8_v_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoor.mask.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamoorei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) {
+ return vamoorei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei8_v_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoor.mask.nxv4i32.nxv4i8.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei8_v_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoor.mask.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamoorei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) {
+ return vamoorei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei8_v_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoor.mask.nxv8i32.nxv8i8.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei8_v_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoor.mask.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamoorei8_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) {
+ return vamoorei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei8_v_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoor.mask.nxv16i32.nxv16i8.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei8_v_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoor.mask.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t test_vamoorei8_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) {
+ return vamoorei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei16_v_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoor.mask.nxv1i32.nxv1i16.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei16_v_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoor.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamoorei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) {
+ return vamoorei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei16_v_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoor.mask.nxv2i32.nxv2i16.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei16_v_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoor.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamoorei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) {
+ return vamoorei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei16_v_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoor.mask.nxv4i32.nxv4i16.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei16_v_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoor.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamoorei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) {
+ return vamoorei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei16_v_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoor.mask.nxv8i32.nxv8i16.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei16_v_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoor.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamoorei16_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) {
+ return vamoorei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei16_v_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoor.mask.nxv16i32.nxv16i16.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei16_v_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoor.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t test_vamoorei16_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) {
+ return vamoorei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei32_v_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoor.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei32_v_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoor.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamoorei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) {
+ return vamoorei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei32_v_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoor.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei32_v_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoor.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamoorei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) {
+ return vamoorei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei32_v_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoor.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei32_v_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoor.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamoorei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) {
+ return vamoorei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei32_v_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoor.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei32_v_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoor.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamoorei32_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) {
+ return vamoorei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei32_v_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoor.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei32_v_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoor.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t test_vamoorei32_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) {
+ return vamoorei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei64_v_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoor.mask.nxv1i32.nxv1i64.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei64_v_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoor.mask.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamoorei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) {
+ return vamoorei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei64_v_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoor.mask.nxv2i32.nxv2i64.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei64_v_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoor.mask.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamoorei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) {
+ return vamoorei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei64_v_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoor.mask.nxv4i32.nxv4i64.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei64_v_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoor.mask.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamoorei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) {
+ return vamoorei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei64_v_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoor.mask.nxv8i32.nxv8i64.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei64_v_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoor.mask.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamoorei64_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) {
+ return vamoorei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei8_v_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoor.mask.nxv1i64.nxv1i8.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei8_v_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoor.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamoorei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) {
+ return vamoorei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei8_v_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoor.mask.nxv2i64.nxv2i8.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei8_v_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoor.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamoorei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) {
+ return vamoorei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei8_v_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoor.mask.nxv4i64.nxv4i8.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei8_v_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoor.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamoorei8_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) {
+ return vamoorei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei8_v_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoor.mask.nxv8i64.nxv8i8.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei8_v_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoor.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamoorei8_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) {
+ return vamoorei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei16_v_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoor.mask.nxv1i64.nxv1i16.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei16_v_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoor.mask.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamoorei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) {
+ return vamoorei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei16_v_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoor.mask.nxv2i64.nxv2i16.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei16_v_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoor.mask.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamoorei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) {
+ return vamoorei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei16_v_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoor.mask.nxv4i64.nxv4i16.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei16_v_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoor.mask.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamoorei16_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) {
+ return vamoorei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei16_v_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoor.mask.nxv8i64.nxv8i16.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei16_v_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoor.mask.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamoorei16_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) {
+ return vamoorei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei32_v_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoor.mask.nxv1i64.nxv1i32.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei32_v_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoor.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamoorei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) {
+ return vamoorei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei32_v_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoor.mask.nxv2i64.nxv2i32.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei32_v_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoor.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamoorei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) {
+ return vamoorei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei32_v_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoor.mask.nxv4i64.nxv4i32.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei32_v_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoor.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamoorei32_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) {
+ return vamoorei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei32_v_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoor.mask.nxv8i64.nxv8i32.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei32_v_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoor.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamoorei32_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) {
+ return vamoorei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei64_v_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoor.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei64_v_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoor.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamoorei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) {
+ return vamoorei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei64_v_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoor.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei64_v_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoor.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamoorei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) {
+ return vamoorei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei64_v_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoor.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei64_v_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoor.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamoorei64_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) {
+ return vamoorei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei64_v_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoor.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei64_v_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoor.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamoorei64_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) {
+ return vamoorei64(mask, base, bindex, value, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoswap.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoswap.c
new file mode 100644
index 000000000000..40c98ac2aa6d
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoswap.c
@@ -0,0 +1,3683 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -target-feature +experimental-zfh -target-feature +experimental-zvamo -disable-O0-optnone -fallow-half-arguments-and-returns -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -target-feature +experimental-zfh -target-feature +experimental-zvamo -disable-O0-optnone -fallow-half-arguments-and-returns -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vamoswapei8_v_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.nxv1i32.nxv1i8.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei8_v_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamoswapei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex,
+ vint32mf2_t value, size_t vl) {
+ return vamoswapei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei8_v_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.nxv2i32.nxv2i8.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei8_v_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamoswapei8_v_i32m1(int32_t *base, vuint8mf4_t bindex,
+ vint32m1_t value, size_t vl) {
+ return vamoswapei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei8_v_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.nxv4i32.nxv4i8.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei8_v_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamoswapei8_v_i32m2(int32_t *base, vuint8mf2_t bindex,
+ vint32m2_t value, size_t vl) {
+ return vamoswapei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei8_v_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.nxv8i32.nxv8i8.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei8_v_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamoswapei8_v_i32m4(int32_t *base, vuint8m1_t bindex,
+ vint32m4_t value, size_t vl) {
+ return vamoswapei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei8_v_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoswap.nxv16i32.nxv16i8.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei8_v_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoswap.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t test_vamoswapei8_v_i32m8(int32_t *base, vuint8m2_t bindex,
+ vint32m8_t value, size_t vl) {
+ return vamoswapei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei16_v_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.nxv1i32.nxv1i16.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei16_v_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamoswapei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex,
+ vint32mf2_t value, size_t vl) {
+ return vamoswapei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei16_v_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.nxv2i32.nxv2i16.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei16_v_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamoswapei16_v_i32m1(int32_t *base, vuint16mf2_t bindex,
+ vint32m1_t value, size_t vl) {
+ return vamoswapei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei16_v_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.nxv4i32.nxv4i16.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei16_v_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamoswapei16_v_i32m2(int32_t *base, vuint16m1_t bindex,
+ vint32m2_t value, size_t vl) {
+ return vamoswapei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei16_v_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.nxv8i32.nxv8i16.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei16_v_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamoswapei16_v_i32m4(int32_t *base, vuint16m2_t bindex,
+ vint32m4_t value, size_t vl) {
+ return vamoswapei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei16_v_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoswap.nxv16i32.nxv16i16.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei16_v_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoswap.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t test_vamoswapei16_v_i32m8(int32_t *base, vuint16m4_t bindex,
+ vint32m8_t value, size_t vl) {
+ return vamoswapei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei32_v_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.nxv1i32.nxv1i32.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei32_v_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamoswapei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex,
+ vint32mf2_t value, size_t vl) {
+ return vamoswapei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei32_v_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.nxv2i32.nxv2i32.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei32_v_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamoswapei32_v_i32m1(int32_t *base, vuint32m1_t bindex,
+ vint32m1_t value, size_t vl) {
+ return vamoswapei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei32_v_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.nxv4i32.nxv4i32.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei32_v_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamoswapei32_v_i32m2(int32_t *base, vuint32m2_t bindex,
+ vint32m2_t value, size_t vl) {
+ return vamoswapei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei32_v_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.nxv8i32.nxv8i32.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei32_v_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamoswapei32_v_i32m4(int32_t *base, vuint32m4_t bindex,
+ vint32m4_t value, size_t vl) {
+ return vamoswapei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei32_v_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoswap.nxv16i32.nxv16i32.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei32_v_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoswap.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t test_vamoswapei32_v_i32m8(int32_t *base, vuint32m8_t bindex,
+ vint32m8_t value, size_t vl) {
+ return vamoswapei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei64_v_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.nxv1i32.nxv1i64.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei64_v_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamoswapei64_v_i32mf2(int32_t *base, vuint64m1_t bindex,
+ vint32mf2_t value, size_t vl) {
+ return vamoswapei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei64_v_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.nxv2i32.nxv2i64.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei64_v_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamoswapei64_v_i32m1(int32_t *base, vuint64m2_t bindex,
+ vint32m1_t value, size_t vl) {
+ return vamoswapei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei64_v_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.nxv4i32.nxv4i64.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei64_v_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamoswapei64_v_i32m2(int32_t *base, vuint64m4_t bindex,
+ vint32m2_t value, size_t vl) {
+ return vamoswapei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei64_v_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.nxv8i32.nxv8i64.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei64_v_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamoswapei64_v_i32m4(int32_t *base, vuint64m8_t bindex,
+ vint32m4_t value, size_t vl) {
+ return vamoswapei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei8_v_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.nxv1i64.nxv1i8.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei8_v_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamoswapei8_v_i64m1(int64_t *base, vuint8mf8_t bindex,
+ vint64m1_t value, size_t vl) {
+ return vamoswapei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei8_v_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.nxv2i64.nxv2i8.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei8_v_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamoswapei8_v_i64m2(int64_t *base, vuint8mf4_t bindex,
+ vint64m2_t value, size_t vl) {
+ return vamoswapei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei8_v_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.nxv4i64.nxv4i8.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei8_v_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamoswapei8_v_i64m4(int64_t *base, vuint8mf2_t bindex,
+ vint64m4_t value, size_t vl) {
+ return vamoswapei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei8_v_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.nxv8i64.nxv8i8.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei8_v_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamoswapei8_v_i64m8(int64_t *base, vuint8m1_t bindex,
+ vint64m8_t value, size_t vl) {
+ return vamoswapei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei16_v_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.nxv1i64.nxv1i16.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei16_v_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamoswapei16_v_i64m1(int64_t *base, vuint16mf4_t bindex,
+ vint64m1_t value, size_t vl) {
+ return vamoswapei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei16_v_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.nxv2i64.nxv2i16.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei16_v_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamoswapei16_v_i64m2(int64_t *base, vuint16mf2_t bindex,
+ vint64m2_t value, size_t vl) {
+ return vamoswapei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei16_v_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.nxv4i64.nxv4i16.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei16_v_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamoswapei16_v_i64m4(int64_t *base, vuint16m1_t bindex,
+ vint64m4_t value, size_t vl) {
+ return vamoswapei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei16_v_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.nxv8i64.nxv8i16.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei16_v_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamoswapei16_v_i64m8(int64_t *base, vuint16m2_t bindex,
+ vint64m8_t value, size_t vl) {
+ return vamoswapei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei32_v_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.nxv1i64.nxv1i32.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei32_v_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamoswapei32_v_i64m1(int64_t *base, vuint32mf2_t bindex,
+ vint64m1_t value, size_t vl) {
+ return vamoswapei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei32_v_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.nxv2i64.nxv2i32.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei32_v_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamoswapei32_v_i64m2(int64_t *base, vuint32m1_t bindex,
+ vint64m2_t value, size_t vl) {
+ return vamoswapei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei32_v_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.nxv4i64.nxv4i32.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei32_v_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamoswapei32_v_i64m4(int64_t *base, vuint32m2_t bindex,
+ vint64m4_t value, size_t vl) {
+ return vamoswapei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei32_v_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.nxv8i64.nxv8i32.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei32_v_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamoswapei32_v_i64m8(int64_t *base, vuint32m4_t bindex,
+ vint64m8_t value, size_t vl) {
+ return vamoswapei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei64_v_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.nxv1i64.nxv1i64.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei64_v_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamoswapei64_v_i64m1(int64_t *base, vuint64m1_t bindex,
+ vint64m1_t value, size_t vl) {
+ return vamoswapei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei64_v_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.nxv2i64.nxv2i64.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei64_v_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamoswapei64_v_i64m2(int64_t *base, vuint64m2_t bindex,
+ vint64m2_t value, size_t vl) {
+ return vamoswapei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei64_v_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.nxv4i64.nxv4i64.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei64_v_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamoswapei64_v_i64m4(int64_t *base, vuint64m4_t bindex,
+ vint64m4_t value, size_t vl) {
+ return vamoswapei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei64_v_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.nxv8i64.nxv8i64.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei64_v_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamoswapei64_v_i64m8(int64_t *base, vuint64m8_t bindex,
+ vint64m8_t value, size_t vl) {
+ return vamoswapei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei8_v_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.nxv1i32.nxv1i8.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei8_v_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamoswapei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex,
+ vuint32mf2_t value, size_t vl) {
+ return vamoswapei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei8_v_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.nxv2i32.nxv2i8.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei8_v_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamoswapei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex,
+ vuint32m1_t value, size_t vl) {
+ return vamoswapei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei8_v_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.nxv4i32.nxv4i8.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei8_v_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamoswapei8_v_u32m2(uint32_t *base, vuint8mf2_t bindex,
+ vuint32m2_t value, size_t vl) {
+ return vamoswapei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei8_v_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.nxv8i32.nxv8i8.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei8_v_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamoswapei8_v_u32m4(uint32_t *base, vuint8m1_t bindex,
+ vuint32m4_t value, size_t vl) {
+ return vamoswapei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei8_v_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoswap.nxv16i32.nxv16i8.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei8_v_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoswap.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t test_vamoswapei8_v_u32m8(uint32_t *base, vuint8m2_t bindex,
+ vuint32m8_t value, size_t vl) {
+ return vamoswapei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei16_v_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.nxv1i32.nxv1i16.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei16_v_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamoswapei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex,
+ vuint32mf2_t value, size_t vl) {
+ return vamoswapei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei16_v_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.nxv2i32.nxv2i16.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei16_v_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamoswapei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex,
+ vuint32m1_t value, size_t vl) {
+ return vamoswapei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei16_v_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.nxv4i32.nxv4i16.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei16_v_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamoswapei16_v_u32m2(uint32_t *base, vuint16m1_t bindex,
+ vuint32m2_t value, size_t vl) {
+ return vamoswapei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei16_v_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.nxv8i32.nxv8i16.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei16_v_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamoswapei16_v_u32m4(uint32_t *base, vuint16m2_t bindex,
+ vuint32m4_t value, size_t vl) {
+ return vamoswapei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei16_v_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoswap.nxv16i32.nxv16i16.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei16_v_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoswap.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t test_vamoswapei16_v_u32m8(uint32_t *base, vuint16m4_t bindex,
+ vuint32m8_t value, size_t vl) {
+ return vamoswapei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei32_v_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.nxv1i32.nxv1i32.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei32_v_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamoswapei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex,
+ vuint32mf2_t value, size_t vl) {
+ return vamoswapei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei32_v_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.nxv2i32.nxv2i32.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei32_v_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamoswapei32_v_u32m1(uint32_t *base, vuint32m1_t bindex,
+ vuint32m1_t value, size_t vl) {
+ return vamoswapei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei32_v_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.nxv4i32.nxv4i32.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei32_v_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamoswapei32_v_u32m2(uint32_t *base, vuint32m2_t bindex,
+ vuint32m2_t value, size_t vl) {
+ return vamoswapei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei32_v_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.nxv8i32.nxv8i32.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei32_v_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamoswapei32_v_u32m4(uint32_t *base, vuint32m4_t bindex,
+ vuint32m4_t value, size_t vl) {
+ return vamoswapei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei32_v_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoswap.nxv16i32.nxv16i32.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei32_v_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoswap.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t test_vamoswapei32_v_u32m8(uint32_t *base, vuint32m8_t bindex,
+ vuint32m8_t value, size_t vl) {
+ return vamoswapei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei64_v_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.nxv1i32.nxv1i64.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei64_v_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamoswapei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex,
+ vuint32mf2_t value, size_t vl) {
+ return vamoswapei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei64_v_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.nxv2i32.nxv2i64.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei64_v_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamoswapei64_v_u32m1(uint32_t *base, vuint64m2_t bindex,
+ vuint32m1_t value, size_t vl) {
+ return vamoswapei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei64_v_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.nxv4i32.nxv4i64.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei64_v_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamoswapei64_v_u32m2(uint32_t *base, vuint64m4_t bindex,
+ vuint32m2_t value, size_t vl) {
+ return vamoswapei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei64_v_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.nxv8i32.nxv8i64.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei64_v_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamoswapei64_v_u32m4(uint32_t *base, vuint64m8_t bindex,
+ vuint32m4_t value, size_t vl) {
+ return vamoswapei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei8_v_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.nxv1i64.nxv1i8.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei8_v_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamoswapei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex,
+ vuint64m1_t value, size_t vl) {
+ return vamoswapei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei8_v_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.nxv2i64.nxv2i8.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei8_v_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamoswapei8_v_u64m2(uint64_t *base, vuint8mf4_t bindex,
+ vuint64m2_t value, size_t vl) {
+ return vamoswapei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei8_v_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.nxv4i64.nxv4i8.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei8_v_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamoswapei8_v_u64m4(uint64_t *base, vuint8mf2_t bindex,
+ vuint64m4_t value, size_t vl) {
+ return vamoswapei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei8_v_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.nxv8i64.nxv8i8.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei8_v_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamoswapei8_v_u64m8(uint64_t *base, vuint8m1_t bindex,
+ vuint64m8_t value, size_t vl) {
+ return vamoswapei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei16_v_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.nxv1i64.nxv1i16.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei16_v_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamoswapei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex,
+ vuint64m1_t value, size_t vl) {
+ return vamoswapei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei16_v_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.nxv2i64.nxv2i16.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei16_v_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamoswapei16_v_u64m2(uint64_t *base, vuint16mf2_t bindex,
+ vuint64m2_t value, size_t vl) {
+ return vamoswapei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei16_v_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.nxv4i64.nxv4i16.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei16_v_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamoswapei16_v_u64m4(uint64_t *base, vuint16m1_t bindex,
+ vuint64m4_t value, size_t vl) {
+ return vamoswapei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei16_v_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.nxv8i64.nxv8i16.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei16_v_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamoswapei16_v_u64m8(uint64_t *base, vuint16m2_t bindex,
+ vuint64m8_t value, size_t vl) {
+ return vamoswapei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei32_v_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.nxv1i64.nxv1i32.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei32_v_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamoswapei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex,
+ vuint64m1_t value, size_t vl) {
+ return vamoswapei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei32_v_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.nxv2i64.nxv2i32.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei32_v_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamoswapei32_v_u64m2(uint64_t *base, vuint32m1_t bindex,
+ vuint64m2_t value, size_t vl) {
+ return vamoswapei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei32_v_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.nxv4i64.nxv4i32.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei32_v_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamoswapei32_v_u64m4(uint64_t *base, vuint32m2_t bindex,
+ vuint64m4_t value, size_t vl) {
+ return vamoswapei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei32_v_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.nxv8i64.nxv8i32.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei32_v_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamoswapei32_v_u64m8(uint64_t *base, vuint32m4_t bindex,
+ vuint64m8_t value, size_t vl) {
+ return vamoswapei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei64_v_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.nxv1i64.nxv1i64.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei64_v_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamoswapei64_v_u64m1(uint64_t *base, vuint64m1_t bindex,
+ vuint64m1_t value, size_t vl) {
+ return vamoswapei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei64_v_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.nxv2i64.nxv2i64.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei64_v_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamoswapei64_v_u64m2(uint64_t *base, vuint64m2_t bindex,
+ vuint64m2_t value, size_t vl) {
+ return vamoswapei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei64_v_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.nxv4i64.nxv4i64.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei64_v_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamoswapei64_v_u64m4(uint64_t *base, vuint64m4_t bindex,
+ vuint64m4_t value, size_t vl) {
+ return vamoswapei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei64_v_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.nxv8i64.nxv8i64.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei64_v_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamoswapei64_v_u64m8(uint64_t *base, vuint64m8_t bindex,
+ vuint64m8_t value, size_t vl) {
+ return vamoswapei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei8_v_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vamoswap.nxv1f32.nxv1i8.i32(<vscale x 1 x float>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x float> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei8_v_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vamoswap.nxv1f32.nxv1i8.i64(<vscale x 1 x float>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x float> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP1]]
+//
+vfloat32mf2_t test_vamoswapei8_v_f32mf2(float *base, vuint8mf8_t bindex,
+ vfloat32mf2_t value, size_t vl) {
+ return vamoswapei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei8_v_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vamoswap.nxv2f32.nxv2i8.i32(<vscale x 2 x float>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x float> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei8_v_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vamoswap.nxv2f32.nxv2i8.i64(<vscale x 2 x float>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x float> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP1]]
+//
+vfloat32m1_t test_vamoswapei8_v_f32m1(float *base, vuint8mf4_t bindex,
+ vfloat32m1_t value, size_t vl) {
+ return vamoswapei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei8_v_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vamoswap.nxv4f32.nxv4i8.i32(<vscale x 4 x float>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x float> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei8_v_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vamoswap.nxv4f32.nxv4i8.i64(<vscale x 4 x float>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x float> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP1]]
+//
+vfloat32m2_t test_vamoswapei8_v_f32m2(float *base, vuint8mf2_t bindex,
+ vfloat32m2_t value, size_t vl) {
+ return vamoswapei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei8_v_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vamoswap.nxv8f32.nxv8i8.i32(<vscale x 8 x float>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x float> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei8_v_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vamoswap.nxv8f32.nxv8i8.i64(<vscale x 8 x float>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x float> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP1]]
+//
+vfloat32m4_t test_vamoswapei8_v_f32m4(float *base, vuint8m1_t bindex,
+ vfloat32m4_t value, size_t vl) {
+ return vamoswapei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei8_v_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vamoswap.nxv16f32.nxv16i8.i32(<vscale x 16 x float>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x float> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei8_v_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vamoswap.nxv16f32.nxv16i8.i64(<vscale x 16 x float>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x float> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP1]]
+//
+vfloat32m8_t test_vamoswapei8_v_f32m8(float *base, vuint8m2_t bindex,
+ vfloat32m8_t value, size_t vl) {
+ return vamoswapei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei16_v_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vamoswap.nxv1f32.nxv1i16.i32(<vscale x 1 x float>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x float> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei16_v_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vamoswap.nxv1f32.nxv1i16.i64(<vscale x 1 x float>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x float> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP1]]
+//
+vfloat32mf2_t test_vamoswapei16_v_f32mf2(float *base, vuint16mf4_t bindex,
+ vfloat32mf2_t value, size_t vl) {
+ return vamoswapei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei16_v_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vamoswap.nxv2f32.nxv2i16.i32(<vscale x 2 x float>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x float> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei16_v_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vamoswap.nxv2f32.nxv2i16.i64(<vscale x 2 x float>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x float> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP1]]
+//
+vfloat32m1_t test_vamoswapei16_v_f32m1(float *base, vuint16mf2_t bindex,
+ vfloat32m1_t value, size_t vl) {
+ return vamoswapei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei16_v_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vamoswap.nxv4f32.nxv4i16.i32(<vscale x 4 x float>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x float> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei16_v_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vamoswap.nxv4f32.nxv4i16.i64(<vscale x 4 x float>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x float> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP1]]
+//
+vfloat32m2_t test_vamoswapei16_v_f32m2(float *base, vuint16m1_t bindex,
+ vfloat32m2_t value, size_t vl) {
+ return vamoswapei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei16_v_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vamoswap.nxv8f32.nxv8i16.i32(<vscale x 8 x float>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x float> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei16_v_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vamoswap.nxv8f32.nxv8i16.i64(<vscale x 8 x float>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x float> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP1]]
+//
+vfloat32m4_t test_vamoswapei16_v_f32m4(float *base, vuint16m2_t bindex,
+ vfloat32m4_t value, size_t vl) {
+ return vamoswapei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei16_v_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vamoswap.nxv16f32.nxv16i16.i32(<vscale x 16 x float>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x float> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei16_v_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vamoswap.nxv16f32.nxv16i16.i64(<vscale x 16 x float>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x float> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP1]]
+//
+vfloat32m8_t test_vamoswapei16_v_f32m8(float *base, vuint16m4_t bindex,
+ vfloat32m8_t value, size_t vl) {
+ return vamoswapei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei32_v_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vamoswap.nxv1f32.nxv1i32.i32(<vscale x 1 x float>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x float> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei32_v_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vamoswap.nxv1f32.nxv1i32.i64(<vscale x 1 x float>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x float> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP1]]
+//
+vfloat32mf2_t test_vamoswapei32_v_f32mf2(float *base, vuint32mf2_t bindex,
+ vfloat32mf2_t value, size_t vl) {
+ return vamoswapei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei32_v_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vamoswap.nxv2f32.nxv2i32.i32(<vscale x 2 x float>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x float> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei32_v_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vamoswap.nxv2f32.nxv2i32.i64(<vscale x 2 x float>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x float> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP1]]
+//
+vfloat32m1_t test_vamoswapei32_v_f32m1(float *base, vuint32m1_t bindex,
+ vfloat32m1_t value, size_t vl) {
+ return vamoswapei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei32_v_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vamoswap.nxv4f32.nxv4i32.i32(<vscale x 4 x float>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x float> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei32_v_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vamoswap.nxv4f32.nxv4i32.i64(<vscale x 4 x float>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x float> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP1]]
+//
+vfloat32m2_t test_vamoswapei32_v_f32m2(float *base, vuint32m2_t bindex,
+ vfloat32m2_t value, size_t vl) {
+ return vamoswapei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei32_v_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vamoswap.nxv8f32.nxv8i32.i32(<vscale x 8 x float>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x float> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei32_v_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vamoswap.nxv8f32.nxv8i32.i64(<vscale x 8 x float>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x float> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP1]]
+//
+vfloat32m4_t test_vamoswapei32_v_f32m4(float *base, vuint32m4_t bindex,
+ vfloat32m4_t value, size_t vl) {
+ return vamoswapei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei32_v_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vamoswap.nxv16f32.nxv16i32.i32(<vscale x 16 x float>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x float> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei32_v_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vamoswap.nxv16f32.nxv16i32.i64(<vscale x 16 x float>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x float> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP1]]
+//
+vfloat32m8_t test_vamoswapei32_v_f32m8(float *base, vuint32m8_t bindex,
+ vfloat32m8_t value, size_t vl) {
+ return vamoswapei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei64_v_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vamoswap.nxv1f32.nxv1i64.i32(<vscale x 1 x float>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x float> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei64_v_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vamoswap.nxv1f32.nxv1i64.i64(<vscale x 1 x float>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x float> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP1]]
+//
+vfloat32mf2_t test_vamoswapei64_v_f32mf2(float *base, vuint64m1_t bindex,
+ vfloat32mf2_t value, size_t vl) {
+ return vamoswapei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei64_v_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vamoswap.nxv2f32.nxv2i64.i32(<vscale x 2 x float>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x float> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei64_v_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vamoswap.nxv2f32.nxv2i64.i64(<vscale x 2 x float>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x float> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP1]]
+//
+vfloat32m1_t test_vamoswapei64_v_f32m1(float *base, vuint64m2_t bindex,
+ vfloat32m1_t value, size_t vl) {
+ return vamoswapei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei64_v_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vamoswap.nxv4f32.nxv4i64.i32(<vscale x 4 x float>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x float> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei64_v_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vamoswap.nxv4f32.nxv4i64.i64(<vscale x 4 x float>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x float> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP1]]
+//
+vfloat32m2_t test_vamoswapei64_v_f32m2(float *base, vuint64m4_t bindex,
+ vfloat32m2_t value, size_t vl) {
+ return vamoswapei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei64_v_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vamoswap.nxv8f32.nxv8i64.i32(<vscale x 8 x float>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x float> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei64_v_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vamoswap.nxv8f32.nxv8i64.i64(<vscale x 8 x float>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x float> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP1]]
+//
+vfloat32m4_t test_vamoswapei64_v_f32m4(float *base, vuint64m8_t bindex,
+ vfloat32m4_t value, size_t vl) {
+ return vamoswapei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei8_v_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vamoswap.nxv1f64.nxv1i8.i32(<vscale x 1 x double>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x double> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei8_v_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vamoswap.nxv1f64.nxv1i8.i64(<vscale x 1 x double>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x double> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP1]]
+//
+vfloat64m1_t test_vamoswapei8_v_f64m1(double *base, vuint8mf8_t bindex,
+ vfloat64m1_t value, size_t vl) {
+ return vamoswapei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei8_v_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vamoswap.nxv2f64.nxv2i8.i32(<vscale x 2 x double>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x double> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei8_v_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vamoswap.nxv2f64.nxv2i8.i64(<vscale x 2 x double>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x double> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP1]]
+//
+vfloat64m2_t test_vamoswapei8_v_f64m2(double *base, vuint8mf4_t bindex,
+ vfloat64m2_t value, size_t vl) {
+ return vamoswapei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei8_v_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vamoswap.nxv4f64.nxv4i8.i32(<vscale x 4 x double>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x double> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei8_v_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vamoswap.nxv4f64.nxv4i8.i64(<vscale x 4 x double>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x double> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP1]]
+//
+vfloat64m4_t test_vamoswapei8_v_f64m4(double *base, vuint8mf2_t bindex,
+ vfloat64m4_t value, size_t vl) {
+ return vamoswapei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei8_v_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vamoswap.nxv8f64.nxv8i8.i32(<vscale x 8 x double>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x double> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei8_v_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vamoswap.nxv8f64.nxv8i8.i64(<vscale x 8 x double>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x double> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP1]]
+//
+vfloat64m8_t test_vamoswapei8_v_f64m8(double *base, vuint8m1_t bindex,
+ vfloat64m8_t value, size_t vl) {
+ return vamoswapei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei16_v_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vamoswap.nxv1f64.nxv1i16.i32(<vscale x 1 x double>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x double> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei16_v_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vamoswap.nxv1f64.nxv1i16.i64(<vscale x 1 x double>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x double> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP1]]
+//
+vfloat64m1_t test_vamoswapei16_v_f64m1(double *base, vuint16mf4_t bindex,
+ vfloat64m1_t value, size_t vl) {
+ return vamoswapei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei16_v_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vamoswap.nxv2f64.nxv2i16.i32(<vscale x 2 x double>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x double> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei16_v_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vamoswap.nxv2f64.nxv2i16.i64(<vscale x 2 x double>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x double> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP1]]
+//
+vfloat64m2_t test_vamoswapei16_v_f64m2(double *base, vuint16mf2_t bindex,
+ vfloat64m2_t value, size_t vl) {
+ return vamoswapei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei16_v_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vamoswap.nxv4f64.nxv4i16.i32(<vscale x 4 x double>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x double> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei16_v_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vamoswap.nxv4f64.nxv4i16.i64(<vscale x 4 x double>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x double> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP1]]
+//
+vfloat64m4_t test_vamoswapei16_v_f64m4(double *base, vuint16m1_t bindex,
+ vfloat64m4_t value, size_t vl) {
+ return vamoswapei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei16_v_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vamoswap.nxv8f64.nxv8i16.i32(<vscale x 8 x double>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x double> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei16_v_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vamoswap.nxv8f64.nxv8i16.i64(<vscale x 8 x double>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x double> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP1]]
+//
+vfloat64m8_t test_vamoswapei16_v_f64m8(double *base, vuint16m2_t bindex,
+ vfloat64m8_t value, size_t vl) {
+ return vamoswapei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei32_v_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vamoswap.nxv1f64.nxv1i32.i32(<vscale x 1 x double>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x double> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei32_v_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vamoswap.nxv1f64.nxv1i32.i64(<vscale x 1 x double>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x double> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP1]]
+//
+vfloat64m1_t test_vamoswapei32_v_f64m1(double *base, vuint32mf2_t bindex,
+ vfloat64m1_t value, size_t vl) {
+ return vamoswapei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei32_v_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vamoswap.nxv2f64.nxv2i32.i32(<vscale x 2 x double>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x double> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei32_v_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vamoswap.nxv2f64.nxv2i32.i64(<vscale x 2 x double>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x double> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP1]]
+//
+vfloat64m2_t test_vamoswapei32_v_f64m2(double *base, vuint32m1_t bindex,
+ vfloat64m2_t value, size_t vl) {
+ return vamoswapei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei32_v_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vamoswap.nxv4f64.nxv4i32.i32(<vscale x 4 x double>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x double> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei32_v_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vamoswap.nxv4f64.nxv4i32.i64(<vscale x 4 x double>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x double> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP1]]
+//
+vfloat64m4_t test_vamoswapei32_v_f64m4(double *base, vuint32m2_t bindex,
+ vfloat64m4_t value, size_t vl) {
+ return vamoswapei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei32_v_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vamoswap.nxv8f64.nxv8i32.i32(<vscale x 8 x double>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x double> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei32_v_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vamoswap.nxv8f64.nxv8i32.i64(<vscale x 8 x double>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x double> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP1]]
+//
+vfloat64m8_t test_vamoswapei32_v_f64m8(double *base, vuint32m4_t bindex,
+ vfloat64m8_t value, size_t vl) {
+ return vamoswapei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei64_v_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vamoswap.nxv1f64.nxv1i64.i32(<vscale x 1 x double>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x double> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei64_v_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vamoswap.nxv1f64.nxv1i64.i64(<vscale x 1 x double>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x double> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP1]]
+//
+vfloat64m1_t test_vamoswapei64_v_f64m1(double *base, vuint64m1_t bindex,
+ vfloat64m1_t value, size_t vl) {
+ return vamoswapei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei64_v_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vamoswap.nxv2f64.nxv2i64.i32(<vscale x 2 x double>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x double> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei64_v_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vamoswap.nxv2f64.nxv2i64.i64(<vscale x 2 x double>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x double> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP1]]
+//
+vfloat64m2_t test_vamoswapei64_v_f64m2(double *base, vuint64m2_t bindex,
+ vfloat64m2_t value, size_t vl) {
+ return vamoswapei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei64_v_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vamoswap.nxv4f64.nxv4i64.i32(<vscale x 4 x double>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x double> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei64_v_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vamoswap.nxv4f64.nxv4i64.i64(<vscale x 4 x double>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x double> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP1]]
+//
+vfloat64m4_t test_vamoswapei64_v_f64m4(double *base, vuint64m4_t bindex,
+ vfloat64m4_t value, size_t vl) {
+ return vamoswapei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei64_v_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vamoswap.nxv8f64.nxv8i64.i32(<vscale x 8 x double>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x double> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei64_v_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vamoswap.nxv8f64.nxv8i64.i64(<vscale x 8 x double>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x double> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP1]]
+//
+vfloat64m8_t test_vamoswapei64_v_f64m8(double *base, vuint64m8_t bindex,
+ vfloat64m8_t value, size_t vl) {
+ return vamoswapei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei8_v_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i8.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei8_v_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamoswapei8_v_i32mf2_m(vbool64_t mask, int32_t *base,
+ vuint8mf8_t bindex, vint32mf2_t value,
+ size_t vl) {
+ return vamoswapei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei8_v_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i8.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei8_v_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamoswapei8_v_i32m1_m(vbool32_t mask, int32_t *base,
+ vuint8mf4_t bindex, vint32m1_t value,
+ size_t vl) {
+ return vamoswapei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei8_v_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i8.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei8_v_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamoswapei8_v_i32m2_m(vbool16_t mask, int32_t *base,
+ vuint8mf2_t bindex, vint32m2_t value,
+ size_t vl) {
+ return vamoswapei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei8_v_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i8.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei8_v_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamoswapei8_v_i32m4_m(vbool8_t mask, int32_t *base,
+ vuint8m1_t bindex, vint32m4_t value,
+ size_t vl) {
+ return vamoswapei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei8_v_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i8.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei8_v_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t test_vamoswapei8_v_i32m8_m(vbool4_t mask, int32_t *base,
+ vuint8m2_t bindex, vint32m8_t value,
+ size_t vl) {
+ return vamoswapei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei16_v_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i16.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei16_v_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamoswapei16_v_i32mf2_m(vbool64_t mask, int32_t *base,
+ vuint16mf4_t bindex, vint32mf2_t value,
+ size_t vl) {
+ return vamoswapei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei16_v_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i16.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei16_v_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamoswapei16_v_i32m1_m(vbool32_t mask, int32_t *base,
+ vuint16mf2_t bindex, vint32m1_t value,
+ size_t vl) {
+ return vamoswapei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei16_v_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i16.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei16_v_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamoswapei16_v_i32m2_m(vbool16_t mask, int32_t *base,
+ vuint16m1_t bindex, vint32m2_t value,
+ size_t vl) {
+ return vamoswapei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei16_v_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i16.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei16_v_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamoswapei16_v_i32m4_m(vbool8_t mask, int32_t *base,
+ vuint16m2_t bindex, vint32m4_t value,
+ size_t vl) {
+ return vamoswapei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei16_v_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i16.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei16_v_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t test_vamoswapei16_v_i32m8_m(vbool4_t mask, int32_t *base,
+ vuint16m4_t bindex, vint32m8_t value,
+ size_t vl) {
+ return vamoswapei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei32_v_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei32_v_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamoswapei32_v_i32mf2_m(vbool64_t mask, int32_t *base,
+ vuint32mf2_t bindex, vint32mf2_t value,
+ size_t vl) {
+ return vamoswapei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei32_v_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei32_v_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamoswapei32_v_i32m1_m(vbool32_t mask, int32_t *base,
+ vuint32m1_t bindex, vint32m1_t value,
+ size_t vl) {
+ return vamoswapei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei32_v_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei32_v_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamoswapei32_v_i32m2_m(vbool16_t mask, int32_t *base,
+ vuint32m2_t bindex, vint32m2_t value,
+ size_t vl) {
+ return vamoswapei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei32_v_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei32_v_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamoswapei32_v_i32m4_m(vbool8_t mask, int32_t *base,
+ vuint32m4_t bindex, vint32m4_t value,
+ size_t vl) {
+ return vamoswapei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei32_v_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei32_v_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t test_vamoswapei32_v_i32m8_m(vbool4_t mask, int32_t *base,
+ vuint32m8_t bindex, vint32m8_t value,
+ size_t vl) {
+ return vamoswapei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei64_v_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i64.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei64_v_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamoswapei64_v_i32mf2_m(vbool64_t mask, int32_t *base,
+ vuint64m1_t bindex, vint32mf2_t value,
+ size_t vl) {
+ return vamoswapei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei64_v_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i64.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei64_v_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamoswapei64_v_i32m1_m(vbool32_t mask, int32_t *base,
+ vuint64m2_t bindex, vint32m1_t value,
+ size_t vl) {
+ return vamoswapei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei64_v_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i64.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei64_v_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamoswapei64_v_i32m2_m(vbool16_t mask, int32_t *base,
+ vuint64m4_t bindex, vint32m2_t value,
+ size_t vl) {
+ return vamoswapei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei64_v_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i64.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei64_v_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamoswapei64_v_i32m4_m(vbool8_t mask, int32_t *base,
+ vuint64m8_t bindex, vint32m4_t value,
+ size_t vl) {
+ return vamoswapei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei8_v_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i8.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei8_v_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamoswapei8_v_i64m1_m(vbool64_t mask, int64_t *base,
+ vuint8mf8_t bindex, vint64m1_t value,
+ size_t vl) {
+ return vamoswapei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei8_v_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i8.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei8_v_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamoswapei8_v_i64m2_m(vbool32_t mask, int64_t *base,
+ vuint8mf4_t bindex, vint64m2_t value,
+ size_t vl) {
+ return vamoswapei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei8_v_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i8.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei8_v_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamoswapei8_v_i64m4_m(vbool16_t mask, int64_t *base,
+ vuint8mf2_t bindex, vint64m4_t value,
+ size_t vl) {
+ return vamoswapei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei8_v_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i8.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei8_v_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamoswapei8_v_i64m8_m(vbool8_t mask, int64_t *base,
+ vuint8m1_t bindex, vint64m8_t value,
+ size_t vl) {
+ return vamoswapei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei16_v_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i16.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei16_v_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamoswapei16_v_i64m1_m(vbool64_t mask, int64_t *base,
+ vuint16mf4_t bindex, vint64m1_t value,
+ size_t vl) {
+ return vamoswapei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei16_v_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i16.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei16_v_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamoswapei16_v_i64m2_m(vbool32_t mask, int64_t *base,
+ vuint16mf2_t bindex, vint64m2_t value,
+ size_t vl) {
+ return vamoswapei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei16_v_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i16.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei16_v_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamoswapei16_v_i64m4_m(vbool16_t mask, int64_t *base,
+ vuint16m1_t bindex, vint64m4_t value,
+ size_t vl) {
+ return vamoswapei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei16_v_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i16.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei16_v_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamoswapei16_v_i64m8_m(vbool8_t mask, int64_t *base,
+ vuint16m2_t bindex, vint64m8_t value,
+ size_t vl) {
+ return vamoswapei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei32_v_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i32.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei32_v_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamoswapei32_v_i64m1_m(vbool64_t mask, int64_t *base,
+ vuint32mf2_t bindex, vint64m1_t value,
+ size_t vl) {
+ return vamoswapei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei32_v_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i32.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei32_v_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamoswapei32_v_i64m2_m(vbool32_t mask, int64_t *base,
+ vuint32m1_t bindex, vint64m2_t value,
+ size_t vl) {
+ return vamoswapei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei32_v_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i32.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei32_v_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamoswapei32_v_i64m4_m(vbool16_t mask, int64_t *base,
+ vuint32m2_t bindex, vint64m4_t value,
+ size_t vl) {
+ return vamoswapei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei32_v_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i32.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei32_v_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamoswapei32_v_i64m8_m(vbool8_t mask, int64_t *base,
+ vuint32m4_t bindex, vint64m8_t value,
+ size_t vl) {
+ return vamoswapei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei64_v_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei64_v_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamoswapei64_v_i64m1_m(vbool64_t mask, int64_t *base,
+ vuint64m1_t bindex, vint64m1_t value,
+ size_t vl) {
+ return vamoswapei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei64_v_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei64_v_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamoswapei64_v_i64m2_m(vbool32_t mask, int64_t *base,
+ vuint64m2_t bindex, vint64m2_t value,
+ size_t vl) {
+ return vamoswapei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei64_v_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei64_v_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamoswapei64_v_i64m4_m(vbool16_t mask, int64_t *base,
+ vuint64m4_t bindex, vint64m4_t value,
+ size_t vl) {
+ return vamoswapei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei64_v_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei64_v_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamoswapei64_v_i64m8_m(vbool8_t mask, int64_t *base,
+ vuint64m8_t bindex, vint64m8_t value,
+ size_t vl) {
+ return vamoswapei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei8_v_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i8.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei8_v_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamoswapei8_v_u32mf2_m(vbool64_t mask, uint32_t *base,
+ vuint8mf8_t bindex, vuint32mf2_t value,
+ size_t vl) {
+ return vamoswapei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei8_v_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i8.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei8_v_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamoswapei8_v_u32m1_m(vbool32_t mask, uint32_t *base,
+ vuint8mf4_t bindex, vuint32m1_t value,
+ size_t vl) {
+ return vamoswapei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei8_v_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i8.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei8_v_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamoswapei8_v_u32m2_m(vbool16_t mask, uint32_t *base,
+ vuint8mf2_t bindex, vuint32m2_t value,
+ size_t vl) {
+ return vamoswapei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei8_v_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i8.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei8_v_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamoswapei8_v_u32m4_m(vbool8_t mask, uint32_t *base,
+ vuint8m1_t bindex, vuint32m4_t value,
+ size_t vl) {
+ return vamoswapei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei8_v_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i8.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei8_v_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t test_vamoswapei8_v_u32m8_m(vbool4_t mask, uint32_t *base,
+ vuint8m2_t bindex, vuint32m8_t value,
+ size_t vl) {
+ return vamoswapei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei16_v_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i16.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei16_v_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamoswapei16_v_u32mf2_m(vbool64_t mask, uint32_t *base,
+ vuint16mf4_t bindex,
+ vuint32mf2_t value, size_t vl) {
+ return vamoswapei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei16_v_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i16.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei16_v_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamoswapei16_v_u32m1_m(vbool32_t mask, uint32_t *base,
+ vuint16mf2_t bindex, vuint32m1_t value,
+ size_t vl) {
+ return vamoswapei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei16_v_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i16.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei16_v_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamoswapei16_v_u32m2_m(vbool16_t mask, uint32_t *base,
+ vuint16m1_t bindex, vuint32m2_t value,
+ size_t vl) {
+ return vamoswapei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei16_v_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i16.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei16_v_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamoswapei16_v_u32m4_m(vbool8_t mask, uint32_t *base,
+ vuint16m2_t bindex, vuint32m4_t value,
+ size_t vl) {
+ return vamoswapei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei16_v_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i16.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei16_v_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t test_vamoswapei16_v_u32m8_m(vbool4_t mask, uint32_t *base,
+ vuint16m4_t bindex, vuint32m8_t value,
+ size_t vl) {
+ return vamoswapei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei32_v_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei32_v_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamoswapei32_v_u32mf2_m(vbool64_t mask, uint32_t *base,
+ vuint32mf2_t bindex,
+ vuint32mf2_t value, size_t vl) {
+ return vamoswapei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei32_v_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei32_v_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamoswapei32_v_u32m1_m(vbool32_t mask, uint32_t *base,
+ vuint32m1_t bindex, vuint32m1_t value,
+ size_t vl) {
+ return vamoswapei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei32_v_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei32_v_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamoswapei32_v_u32m2_m(vbool16_t mask, uint32_t *base,
+ vuint32m2_t bindex, vuint32m2_t value,
+ size_t vl) {
+ return vamoswapei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei32_v_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei32_v_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamoswapei32_v_u32m4_m(vbool8_t mask, uint32_t *base,
+ vuint32m4_t bindex, vuint32m4_t value,
+ size_t vl) {
+ return vamoswapei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei32_v_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei32_v_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t test_vamoswapei32_v_u32m8_m(vbool4_t mask, uint32_t *base,
+ vuint32m8_t bindex, vuint32m8_t value,
+ size_t vl) {
+ return vamoswapei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei64_v_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i64.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei64_v_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamoswapei64_v_u32mf2_m(vbool64_t mask, uint32_t *base,
+ vuint64m1_t bindex,
+ vuint32mf2_t value, size_t vl) {
+ return vamoswapei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei64_v_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i64.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei64_v_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamoswapei64_v_u32m1_m(vbool32_t mask, uint32_t *base,
+ vuint64m2_t bindex, vuint32m1_t value,
+ size_t vl) {
+ return vamoswapei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei64_v_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i64.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei64_v_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamoswapei64_v_u32m2_m(vbool16_t mask, uint32_t *base,
+ vuint64m4_t bindex, vuint32m2_t value,
+ size_t vl) {
+ return vamoswapei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei64_v_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i64.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei64_v_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamoswapei64_v_u32m4_m(vbool8_t mask, uint32_t *base,
+ vuint64m8_t bindex, vuint32m4_t value,
+ size_t vl) {
+ return vamoswapei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei8_v_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i8.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei8_v_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamoswapei8_v_u64m1_m(vbool64_t mask, uint64_t *base,
+ vuint8mf8_t bindex, vuint64m1_t value,
+ size_t vl) {
+ return vamoswapei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei8_v_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i8.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei8_v_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamoswapei8_v_u64m2_m(vbool32_t mask, uint64_t *base,
+ vuint8mf4_t bindex, vuint64m2_t value,
+ size_t vl) {
+ return vamoswapei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei8_v_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i8.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei8_v_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamoswapei8_v_u64m4_m(vbool16_t mask, uint64_t *base,
+ vuint8mf2_t bindex, vuint64m4_t value,
+ size_t vl) {
+ return vamoswapei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei8_v_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i8.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei8_v_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamoswapei8_v_u64m8_m(vbool8_t mask, uint64_t *base,
+ vuint8m1_t bindex, vuint64m8_t value,
+ size_t vl) {
+ return vamoswapei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei16_v_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i16.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei16_v_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamoswapei16_v_u64m1_m(vbool64_t mask, uint64_t *base,
+ vuint16mf4_t bindex, vuint64m1_t value,
+ size_t vl) {
+ return vamoswapei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei16_v_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i16.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei16_v_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamoswapei16_v_u64m2_m(vbool32_t mask, uint64_t *base,
+ vuint16mf2_t bindex, vuint64m2_t value,
+ size_t vl) {
+ return vamoswapei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei16_v_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i16.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei16_v_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamoswapei16_v_u64m4_m(vbool16_t mask, uint64_t *base,
+ vuint16m1_t bindex, vuint64m4_t value,
+ size_t vl) {
+ return vamoswapei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei16_v_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i16.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei16_v_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamoswapei16_v_u64m8_m(vbool8_t mask, uint64_t *base,
+ vuint16m2_t bindex, vuint64m8_t value,
+ size_t vl) {
+ return vamoswapei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei32_v_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i32.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei32_v_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamoswapei32_v_u64m1_m(vbool64_t mask, uint64_t *base,
+ vuint32mf2_t bindex, vuint64m1_t value,
+ size_t vl) {
+ return vamoswapei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei32_v_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i32.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei32_v_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamoswapei32_v_u64m2_m(vbool32_t mask, uint64_t *base,
+ vuint32m1_t bindex, vuint64m2_t value,
+ size_t vl) {
+ return vamoswapei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei32_v_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i32.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei32_v_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamoswapei32_v_u64m4_m(vbool16_t mask, uint64_t *base,
+ vuint32m2_t bindex, vuint64m4_t value,
+ size_t vl) {
+ return vamoswapei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei32_v_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i32.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei32_v_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamoswapei32_v_u64m8_m(vbool8_t mask, uint64_t *base,
+ vuint32m4_t bindex, vuint64m8_t value,
+ size_t vl) {
+ return vamoswapei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei64_v_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei64_v_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamoswapei64_v_u64m1_m(vbool64_t mask, uint64_t *base,
+ vuint64m1_t bindex, vuint64m1_t value,
+ size_t vl) {
+ return vamoswapei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei64_v_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei64_v_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamoswapei64_v_u64m2_m(vbool32_t mask, uint64_t *base,
+ vuint64m2_t bindex, vuint64m2_t value,
+ size_t vl) {
+ return vamoswapei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei64_v_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei64_v_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamoswapei64_v_u64m4_m(vbool16_t mask, uint64_t *base,
+ vuint64m4_t bindex, vuint64m4_t value,
+ size_t vl) {
+ return vamoswapei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei64_v_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei64_v_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamoswapei64_v_u64m8_m(vbool8_t mask, uint64_t *base,
+ vuint64m8_t bindex, vuint64m8_t value,
+ size_t vl) {
+ return vamoswapei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei8_v_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i8.i32(<vscale x 1 x float>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x float> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei8_v_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i8.i64(<vscale x 1 x float>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x float> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP1]]
+//
+vfloat32mf2_t test_vamoswapei8_v_f32mf2_m(vbool64_t mask, float *base,
+ vuint8mf8_t bindex,
+ vfloat32mf2_t value, size_t vl) {
+ return vamoswapei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei8_v_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i8.i32(<vscale x 2 x float>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x float> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei8_v_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i8.i64(<vscale x 2 x float>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x float> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP1]]
+//
+vfloat32m1_t test_vamoswapei8_v_f32m1_m(vbool32_t mask, float *base,
+ vuint8mf4_t bindex, vfloat32m1_t value,
+ size_t vl) {
+ return vamoswapei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei8_v_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i8.i32(<vscale x 4 x float>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x float> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei8_v_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i8.i64(<vscale x 4 x float>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x float> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP1]]
+//
+vfloat32m2_t test_vamoswapei8_v_f32m2_m(vbool16_t mask, float *base,
+ vuint8mf2_t bindex, vfloat32m2_t value,
+ size_t vl) {
+ return vamoswapei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei8_v_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i8.i32(<vscale x 8 x float>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x float> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei8_v_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i8.i64(<vscale x 8 x float>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x float> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP1]]
+//
+vfloat32m4_t test_vamoswapei8_v_f32m4_m(vbool8_t mask, float *base,
+ vuint8m1_t bindex, vfloat32m4_t value,
+ size_t vl) {
+ return vamoswapei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei8_v_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vamoswap.mask.nxv16f32.nxv16i8.i32(<vscale x 16 x float>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x float> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei8_v_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vamoswap.mask.nxv16f32.nxv16i8.i64(<vscale x 16 x float>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x float> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP1]]
+//
+vfloat32m8_t test_vamoswapei8_v_f32m8_m(vbool4_t mask, float *base,
+ vuint8m2_t bindex, vfloat32m8_t value,
+ size_t vl) {
+ return vamoswapei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei16_v_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i16.i32(<vscale x 1 x float>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x float> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei16_v_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i16.i64(<vscale x 1 x float>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x float> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP1]]
+//
+vfloat32mf2_t test_vamoswapei16_v_f32mf2_m(vbool64_t mask, float *base,
+ vuint16mf4_t bindex,
+ vfloat32mf2_t value, size_t vl) {
+ return vamoswapei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei16_v_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i16.i32(<vscale x 2 x float>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x float> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei16_v_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i16.i64(<vscale x 2 x float>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x float> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP1]]
+//
+vfloat32m1_t test_vamoswapei16_v_f32m1_m(vbool32_t mask, float *base,
+ vuint16mf2_t bindex,
+ vfloat32m1_t value, size_t vl) {
+ return vamoswapei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei16_v_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i16.i32(<vscale x 4 x float>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x float> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei16_v_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i16.i64(<vscale x 4 x float>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x float> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP1]]
+//
+vfloat32m2_t test_vamoswapei16_v_f32m2_m(vbool16_t mask, float *base,
+ vuint16m1_t bindex, vfloat32m2_t value,
+ size_t vl) {
+ return vamoswapei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei16_v_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i16.i32(<vscale x 8 x float>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x float> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei16_v_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i16.i64(<vscale x 8 x float>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x float> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP1]]
+//
+vfloat32m4_t test_vamoswapei16_v_f32m4_m(vbool8_t mask, float *base,
+ vuint16m2_t bindex, vfloat32m4_t value,
+ size_t vl) {
+ return vamoswapei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei16_v_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vamoswap.mask.nxv16f32.nxv16i16.i32(<vscale x 16 x float>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x float> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei16_v_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vamoswap.mask.nxv16f32.nxv16i16.i64(<vscale x 16 x float>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x float> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP1]]
+//
+vfloat32m8_t test_vamoswapei16_v_f32m8_m(vbool4_t mask, float *base,
+ vuint16m4_t bindex, vfloat32m8_t value,
+ size_t vl) {
+ return vamoswapei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei32_v_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i32.i32(<vscale x 1 x float>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x float> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei32_v_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i32.i64(<vscale x 1 x float>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x float> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP1]]
+//
+vfloat32mf2_t test_vamoswapei32_v_f32mf2_m(vbool64_t mask, float *base,
+ vuint32mf2_t bindex,
+ vfloat32mf2_t value, size_t vl) {
+ return vamoswapei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei32_v_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i32.i32(<vscale x 2 x float>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x float> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei32_v_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i32.i64(<vscale x 2 x float>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x float> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP1]]
+//
+vfloat32m1_t test_vamoswapei32_v_f32m1_m(vbool32_t mask, float *base,
+ vuint32m1_t bindex, vfloat32m1_t value,
+ size_t vl) {
+ return vamoswapei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei32_v_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i32.i32(<vscale x 4 x float>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x float> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei32_v_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i32.i64(<vscale x 4 x float>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x float> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP1]]
+//
+vfloat32m2_t test_vamoswapei32_v_f32m2_m(vbool16_t mask, float *base,
+ vuint32m2_t bindex, vfloat32m2_t value,
+ size_t vl) {
+ return vamoswapei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei32_v_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i32.i32(<vscale x 8 x float>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x float> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei32_v_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i32.i64(<vscale x 8 x float>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x float> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP1]]
+//
+vfloat32m4_t test_vamoswapei32_v_f32m4_m(vbool8_t mask, float *base,
+ vuint32m4_t bindex, vfloat32m4_t value,
+ size_t vl) {
+ return vamoswapei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei32_v_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vamoswap.mask.nxv16f32.nxv16i32.i32(<vscale x 16 x float>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x float> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei32_v_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vamoswap.mask.nxv16f32.nxv16i32.i64(<vscale x 16 x float>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x float> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP1]]
+//
+vfloat32m8_t test_vamoswapei32_v_f32m8_m(vbool4_t mask, float *base,
+ vuint32m8_t bindex, vfloat32m8_t value,
+ size_t vl) {
+ return vamoswapei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei64_v_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i64.i32(<vscale x 1 x float>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x float> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei64_v_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i64.i64(<vscale x 1 x float>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x float> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP1]]
+//
+vfloat32mf2_t test_vamoswapei64_v_f32mf2_m(vbool64_t mask, float *base,
+ vuint64m1_t bindex,
+ vfloat32mf2_t value, size_t vl) {
+ return vamoswapei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei64_v_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i64.i32(<vscale x 2 x float>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x float> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei64_v_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i64.i64(<vscale x 2 x float>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x float> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP1]]
+//
+vfloat32m1_t test_vamoswapei64_v_f32m1_m(vbool32_t mask, float *base,
+ vuint64m2_t bindex, vfloat32m1_t value,
+ size_t vl) {
+ return vamoswapei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei64_v_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i64.i32(<vscale x 4 x float>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x float> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei64_v_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i64.i64(<vscale x 4 x float>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x float> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP1]]
+//
+vfloat32m2_t test_vamoswapei64_v_f32m2_m(vbool16_t mask, float *base,
+ vuint64m4_t bindex, vfloat32m2_t value,
+ size_t vl) {
+ return vamoswapei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei64_v_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i64.i32(<vscale x 8 x float>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x float> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei64_v_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i64.i64(<vscale x 8 x float>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x float> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP1]]
+//
+vfloat32m4_t test_vamoswapei64_v_f32m4_m(vbool8_t mask, float *base,
+ vuint64m8_t bindex, vfloat32m4_t value,
+ size_t vl) {
+ return vamoswapei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei8_v_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i8.i32(<vscale x 1 x double>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x double> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei8_v_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i8.i64(<vscale x 1 x double>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x double> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP1]]
+//
+vfloat64m1_t test_vamoswapei8_v_f64m1_m(vbool64_t mask, double *base,
+ vuint8mf8_t bindex, vfloat64m1_t value,
+ size_t vl) {
+ return vamoswapei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei8_v_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i8.i32(<vscale x 2 x double>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x double> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei8_v_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i8.i64(<vscale x 2 x double>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x double> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP1]]
+//
+vfloat64m2_t test_vamoswapei8_v_f64m2_m(vbool32_t mask, double *base,
+ vuint8mf4_t bindex, vfloat64m2_t value,
+ size_t vl) {
+ return vamoswapei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei8_v_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i8.i32(<vscale x 4 x double>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x double> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei8_v_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i8.i64(<vscale x 4 x double>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x double> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP1]]
+//
+vfloat64m4_t test_vamoswapei8_v_f64m4_m(vbool16_t mask, double *base,
+ vuint8mf2_t bindex, vfloat64m4_t value,
+ size_t vl) {
+ return vamoswapei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei8_v_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i8.i32(<vscale x 8 x double>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x double> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei8_v_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i8.i64(<vscale x 8 x double>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x double> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP1]]
+//
+vfloat64m8_t test_vamoswapei8_v_f64m8_m(vbool8_t mask, double *base,
+ vuint8m1_t bindex, vfloat64m8_t value,
+ size_t vl) {
+ return vamoswapei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei16_v_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i16.i32(<vscale x 1 x double>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x double> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei16_v_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i16.i64(<vscale x 1 x double>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x double> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP1]]
+//
+vfloat64m1_t test_vamoswapei16_v_f64m1_m(vbool64_t mask, double *base,
+ vuint16mf4_t bindex,
+ vfloat64m1_t value, size_t vl) {
+ return vamoswapei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei16_v_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i16.i32(<vscale x 2 x double>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x double> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei16_v_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i16.i64(<vscale x 2 x double>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x double> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP1]]
+//
+vfloat64m2_t test_vamoswapei16_v_f64m2_m(vbool32_t mask, double *base,
+ vuint16mf2_t bindex,
+ vfloat64m2_t value, size_t vl) {
+ return vamoswapei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei16_v_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i16.i32(<vscale x 4 x double>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x double> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei16_v_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i16.i64(<vscale x 4 x double>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x double> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP1]]
+//
+vfloat64m4_t test_vamoswapei16_v_f64m4_m(vbool16_t mask, double *base,
+ vuint16m1_t bindex, vfloat64m4_t value,
+ size_t vl) {
+ return vamoswapei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei16_v_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i16.i32(<vscale x 8 x double>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x double> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei16_v_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i16.i64(<vscale x 8 x double>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x double> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP1]]
+//
+vfloat64m8_t test_vamoswapei16_v_f64m8_m(vbool8_t mask, double *base,
+ vuint16m2_t bindex, vfloat64m8_t value,
+ size_t vl) {
+ return vamoswapei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei32_v_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i32.i32(<vscale x 1 x double>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x double> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei32_v_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i32.i64(<vscale x 1 x double>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x double> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP1]]
+//
+vfloat64m1_t test_vamoswapei32_v_f64m1_m(vbool64_t mask, double *base,
+ vuint32mf2_t bindex,
+ vfloat64m1_t value, size_t vl) {
+ return vamoswapei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei32_v_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i32.i32(<vscale x 2 x double>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x double> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei32_v_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i32.i64(<vscale x 2 x double>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x double> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP1]]
+//
+vfloat64m2_t test_vamoswapei32_v_f64m2_m(vbool32_t mask, double *base,
+ vuint32m1_t bindex, vfloat64m2_t value,
+ size_t vl) {
+ return vamoswapei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei32_v_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i32.i32(<vscale x 4 x double>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x double> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei32_v_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i32.i64(<vscale x 4 x double>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x double> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP1]]
+//
+vfloat64m4_t test_vamoswapei32_v_f64m4_m(vbool16_t mask, double *base,
+ vuint32m2_t bindex, vfloat64m4_t value,
+ size_t vl) {
+ return vamoswapei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei32_v_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i32.i32(<vscale x 8 x double>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x double> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei32_v_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i32.i64(<vscale x 8 x double>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x double> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP1]]
+//
+vfloat64m8_t test_vamoswapei32_v_f64m8_m(vbool8_t mask, double *base,
+ vuint32m4_t bindex, vfloat64m8_t value,
+ size_t vl) {
+ return vamoswapei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei64_v_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i64.i32(<vscale x 1 x double>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x double> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei64_v_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i64.i64(<vscale x 1 x double>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x double> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP1]]
+//
+vfloat64m1_t test_vamoswapei64_v_f64m1_m(vbool64_t mask, double *base,
+ vuint64m1_t bindex, vfloat64m1_t value,
+ size_t vl) {
+ return vamoswapei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei64_v_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i64.i32(<vscale x 2 x double>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x double> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei64_v_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i64.i64(<vscale x 2 x double>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x double> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP1]]
+//
+vfloat64m2_t test_vamoswapei64_v_f64m2_m(vbool32_t mask, double *base,
+ vuint64m2_t bindex, vfloat64m2_t value,
+ size_t vl) {
+ return vamoswapei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei64_v_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i64.i32(<vscale x 4 x double>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x double> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei64_v_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i64.i64(<vscale x 4 x double>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x double> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP1]]
+//
+vfloat64m4_t test_vamoswapei64_v_f64m4_m(vbool16_t mask, double *base,
+ vuint64m4_t bindex, vfloat64m4_t value,
+ size_t vl) {
+ return vamoswapei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei64_v_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i64.i32(<vscale x 8 x double>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x double> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei64_v_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i64.i64(<vscale x 8 x double>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x double> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP1]]
+//
+vfloat64m8_t test_vamoswapei64_v_f64m8_m(vbool8_t mask, double *base,
+ vuint64m8_t bindex, vfloat64m8_t value,
+ size_t vl) {
+ return vamoswapei64(mask, base, bindex, value, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoxor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoxor.c
new file mode 100644
index 000000000000..ff5b26736862
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoxor.c
@@ -0,0 +1,2247 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +m -target-feature +experimental-v -target-feature +experimental-zvamo -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -target-feature +experimental-zvamo -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vamoxorei8_v_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoxor.nxv1i32.nxv1i8.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei8_v_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoxor.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamoxorei8_v_i32mf2 (int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) {
+ return vamoxorei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei8_v_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoxor.nxv2i32.nxv2i8.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei8_v_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoxor.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamoxorei8_v_i32m1 (int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) {
+ return vamoxorei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei8_v_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoxor.nxv4i32.nxv4i8.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei8_v_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoxor.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamoxorei8_v_i32m2 (int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) {
+ return vamoxorei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei8_v_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoxor.nxv8i32.nxv8i8.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei8_v_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoxor.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamoxorei8_v_i32m4 (int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) {
+ return vamoxorei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei8_v_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoxor.nxv16i32.nxv16i8.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei8_v_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoxor.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t test_vamoxorei8_v_i32m8 (int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) {
+ return vamoxorei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei16_v_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoxor.nxv1i32.nxv1i16.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei16_v_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoxor.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamoxorei16_v_i32mf2 (int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) {
+ return vamoxorei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei16_v_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoxor.nxv2i32.nxv2i16.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei16_v_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoxor.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamoxorei16_v_i32m1 (int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) {
+ return vamoxorei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei16_v_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoxor.nxv4i32.nxv4i16.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei16_v_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoxor.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamoxorei16_v_i32m2 (int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) {
+ return vamoxorei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei16_v_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoxor.nxv8i32.nxv8i16.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei16_v_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoxor.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamoxorei16_v_i32m4 (int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) {
+ return vamoxorei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei16_v_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoxor.nxv16i32.nxv16i16.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei16_v_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoxor.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t test_vamoxorei16_v_i32m8 (int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) {
+ return vamoxorei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei32_v_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoxor.nxv1i32.nxv1i32.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei32_v_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoxor.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamoxorei32_v_i32mf2 (int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) {
+ return vamoxorei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei32_v_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoxor.nxv2i32.nxv2i32.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei32_v_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoxor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamoxorei32_v_i32m1 (int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) {
+ return vamoxorei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei32_v_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoxor.nxv4i32.nxv4i32.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei32_v_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoxor.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamoxorei32_v_i32m2 (int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) {
+ return vamoxorei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei32_v_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoxor.nxv8i32.nxv8i32.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei32_v_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoxor.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamoxorei32_v_i32m4 (int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) {
+ return vamoxorei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei32_v_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoxor.nxv16i32.nxv16i32.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei32_v_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoxor.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t test_vamoxorei32_v_i32m8 (int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) {
+ return vamoxorei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei64_v_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoxor.nxv1i32.nxv1i64.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei64_v_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoxor.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamoxorei64_v_i32mf2 (int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) {
+ return vamoxorei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei64_v_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoxor.nxv2i32.nxv2i64.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei64_v_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoxor.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamoxorei64_v_i32m1 (int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) {
+ return vamoxorei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei64_v_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoxor.nxv4i32.nxv4i64.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei64_v_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoxor.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamoxorei64_v_i32m2 (int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) {
+ return vamoxorei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei64_v_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoxor.nxv8i32.nxv8i64.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei64_v_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoxor.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamoxorei64_v_i32m4 (int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) {
+ return vamoxorei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei8_v_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoxor.nxv1i64.nxv1i8.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei8_v_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoxor.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamoxorei8_v_i64m1 (int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) {
+ return vamoxorei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei8_v_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoxor.nxv2i64.nxv2i8.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei8_v_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoxor.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamoxorei8_v_i64m2 (int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) {
+ return vamoxorei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei8_v_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoxor.nxv4i64.nxv4i8.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei8_v_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoxor.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamoxorei8_v_i64m4 (int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) {
+ return vamoxorei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei8_v_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoxor.nxv8i64.nxv8i8.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei8_v_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoxor.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamoxorei8_v_i64m8 (int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) {
+ return vamoxorei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei16_v_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoxor.nxv1i64.nxv1i16.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei16_v_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoxor.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamoxorei16_v_i64m1 (int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) {
+ return vamoxorei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei16_v_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoxor.nxv2i64.nxv2i16.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei16_v_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoxor.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamoxorei16_v_i64m2 (int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) {
+ return vamoxorei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei16_v_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoxor.nxv4i64.nxv4i16.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei16_v_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoxor.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamoxorei16_v_i64m4 (int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) {
+ return vamoxorei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei16_v_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoxor.nxv8i64.nxv8i16.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei16_v_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoxor.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamoxorei16_v_i64m8 (int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) {
+ return vamoxorei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei32_v_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoxor.nxv1i64.nxv1i32.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei32_v_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoxor.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamoxorei32_v_i64m1 (int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) {
+ return vamoxorei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei32_v_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoxor.nxv2i64.nxv2i32.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei32_v_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoxor.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamoxorei32_v_i64m2 (int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) {
+ return vamoxorei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei32_v_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoxor.nxv4i64.nxv4i32.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei32_v_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoxor.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamoxorei32_v_i64m4 (int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) {
+ return vamoxorei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei32_v_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoxor.nxv8i64.nxv8i32.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei32_v_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoxor.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamoxorei32_v_i64m8 (int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) {
+ return vamoxorei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei64_v_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoxor.nxv1i64.nxv1i64.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei64_v_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoxor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamoxorei64_v_i64m1 (int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) {
+ return vamoxorei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei64_v_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoxor.nxv2i64.nxv2i64.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei64_v_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoxor.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamoxorei64_v_i64m2 (int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) {
+ return vamoxorei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei64_v_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoxor.nxv4i64.nxv4i64.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei64_v_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoxor.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamoxorei64_v_i64m4 (int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) {
+ return vamoxorei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei64_v_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoxor.nxv8i64.nxv8i64.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei64_v_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoxor.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamoxorei64_v_i64m8 (int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) {
+ return vamoxorei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei8_v_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoxor.nxv1i32.nxv1i8.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei8_v_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoxor.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamoxorei8_v_u32mf2 (uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) {
+ return vamoxorei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei8_v_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoxor.nxv2i32.nxv2i8.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei8_v_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoxor.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamoxorei8_v_u32m1 (uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) {
+ return vamoxorei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei8_v_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoxor.nxv4i32.nxv4i8.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei8_v_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoxor.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamoxorei8_v_u32m2 (uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) {
+ return vamoxorei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei8_v_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoxor.nxv8i32.nxv8i8.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei8_v_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoxor.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamoxorei8_v_u32m4 (uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) {
+ return vamoxorei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei8_v_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoxor.nxv16i32.nxv16i8.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei8_v_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoxor.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t test_vamoxorei8_v_u32m8 (uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) {
+ return vamoxorei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei16_v_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoxor.nxv1i32.nxv1i16.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei16_v_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoxor.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamoxorei16_v_u32mf2 (uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) {
+ return vamoxorei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei16_v_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoxor.nxv2i32.nxv2i16.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei16_v_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoxor.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamoxorei16_v_u32m1 (uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) {
+ return vamoxorei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei16_v_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoxor.nxv4i32.nxv4i16.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei16_v_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoxor.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamoxorei16_v_u32m2 (uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) {
+ return vamoxorei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei16_v_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoxor.nxv8i32.nxv8i16.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei16_v_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoxor.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamoxorei16_v_u32m4 (uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) {
+ return vamoxorei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei16_v_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoxor.nxv16i32.nxv16i16.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei16_v_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoxor.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t test_vamoxorei16_v_u32m8 (uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) {
+ return vamoxorei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei32_v_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoxor.nxv1i32.nxv1i32.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei32_v_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoxor.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamoxorei32_v_u32mf2 (uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) {
+ return vamoxorei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei32_v_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoxor.nxv2i32.nxv2i32.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei32_v_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoxor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamoxorei32_v_u32m1 (uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) {
+ return vamoxorei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei32_v_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoxor.nxv4i32.nxv4i32.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei32_v_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoxor.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamoxorei32_v_u32m2 (uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) {
+ return vamoxorei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei32_v_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoxor.nxv8i32.nxv8i32.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei32_v_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoxor.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamoxorei32_v_u32m4 (uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) {
+ return vamoxorei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei32_v_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoxor.nxv16i32.nxv16i32.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei32_v_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoxor.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t test_vamoxorei32_v_u32m8 (uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) {
+ return vamoxorei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei64_v_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoxor.nxv1i32.nxv1i64.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei64_v_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoxor.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamoxorei64_v_u32mf2 (uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) {
+ return vamoxorei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei64_v_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoxor.nxv2i32.nxv2i64.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei64_v_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoxor.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamoxorei64_v_u32m1 (uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) {
+ return vamoxorei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei64_v_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoxor.nxv4i32.nxv4i64.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei64_v_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoxor.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamoxorei64_v_u32m2 (uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) {
+ return vamoxorei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei64_v_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoxor.nxv8i32.nxv8i64.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei64_v_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoxor.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamoxorei64_v_u32m4 (uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) {
+ return vamoxorei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei8_v_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoxor.nxv1i64.nxv1i8.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei8_v_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoxor.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamoxorei8_v_u64m1 (uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) {
+ return vamoxorei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei8_v_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoxor.nxv2i64.nxv2i8.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei8_v_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoxor.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamoxorei8_v_u64m2 (uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) {
+ return vamoxorei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei8_v_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoxor.nxv4i64.nxv4i8.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei8_v_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoxor.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamoxorei8_v_u64m4 (uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) {
+ return vamoxorei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei8_v_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoxor.nxv8i64.nxv8i8.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei8_v_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoxor.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamoxorei8_v_u64m8 (uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) {
+ return vamoxorei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei16_v_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoxor.nxv1i64.nxv1i16.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei16_v_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoxor.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamoxorei16_v_u64m1 (uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) {
+ return vamoxorei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei16_v_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoxor.nxv2i64.nxv2i16.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei16_v_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoxor.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamoxorei16_v_u64m2 (uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) {
+ return vamoxorei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei16_v_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoxor.nxv4i64.nxv4i16.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei16_v_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoxor.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamoxorei16_v_u64m4 (uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) {
+ return vamoxorei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei16_v_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoxor.nxv8i64.nxv8i16.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei16_v_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoxor.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamoxorei16_v_u64m8 (uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) {
+ return vamoxorei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei32_v_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoxor.nxv1i64.nxv1i32.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei32_v_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoxor.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamoxorei32_v_u64m1 (uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) {
+ return vamoxorei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei32_v_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoxor.nxv2i64.nxv2i32.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei32_v_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoxor.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamoxorei32_v_u64m2 (uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) {
+ return vamoxorei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei32_v_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoxor.nxv4i64.nxv4i32.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei32_v_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoxor.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamoxorei32_v_u64m4 (uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) {
+ return vamoxorei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei32_v_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoxor.nxv8i64.nxv8i32.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei32_v_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoxor.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamoxorei32_v_u64m8 (uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) {
+ return vamoxorei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei64_v_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoxor.nxv1i64.nxv1i64.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei64_v_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoxor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamoxorei64_v_u64m1 (uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) {
+ return vamoxorei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei64_v_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoxor.nxv2i64.nxv2i64.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei64_v_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoxor.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamoxorei64_v_u64m2 (uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) {
+ return vamoxorei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei64_v_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoxor.nxv4i64.nxv4i64.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei64_v_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoxor.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamoxorei64_v_u64m4 (uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) {
+ return vamoxorei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei64_v_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoxor.nxv8i64.nxv8i64.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei64_v_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoxor.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamoxorei64_v_u64m8 (uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) {
+ return vamoxorei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei8_v_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i8.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei8_v_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamoxorei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) {
+ return vamoxorei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei8_v_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i8.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei8_v_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamoxorei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) {
+ return vamoxorei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei8_v_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i8.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei8_v_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamoxorei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) {
+ return vamoxorei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei8_v_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i8.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei8_v_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamoxorei8_v_i32m4_m (vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) {
+ return vamoxorei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei8_v_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i8.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei8_v_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t test_vamoxorei8_v_i32m8_m (vbool4_t mask, int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) {
+ return vamoxorei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei16_v_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i16.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei16_v_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamoxorei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) {
+ return vamoxorei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei16_v_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i16.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei16_v_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamoxorei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) {
+ return vamoxorei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei16_v_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i16.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei16_v_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamoxorei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) {
+ return vamoxorei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei16_v_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i16.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei16_v_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamoxorei16_v_i32m4_m (vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) {
+ return vamoxorei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei16_v_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i16.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei16_v_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t test_vamoxorei16_v_i32m8_m (vbool4_t mask, int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) {
+ return vamoxorei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei32_v_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei32_v_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamoxorei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) {
+ return vamoxorei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei32_v_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei32_v_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamoxorei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) {
+ return vamoxorei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei32_v_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei32_v_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamoxorei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) {
+ return vamoxorei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei32_v_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei32_v_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamoxorei32_v_i32m4_m (vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) {
+ return vamoxorei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei32_v_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei32_v_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t test_vamoxorei32_v_i32m8_m (vbool4_t mask, int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) {
+ return vamoxorei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei64_v_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i64.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei64_v_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamoxorei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) {
+ return vamoxorei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei64_v_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i64.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei64_v_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamoxorei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) {
+ return vamoxorei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei64_v_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i64.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei64_v_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamoxorei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) {
+ return vamoxorei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei64_v_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i64.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei64_v_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamoxorei64_v_i32m4_m (vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) {
+ return vamoxorei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei8_v_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i8.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei8_v_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamoxorei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) {
+ return vamoxorei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei8_v_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i8.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei8_v_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamoxorei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) {
+ return vamoxorei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei8_v_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i8.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei8_v_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamoxorei8_v_i64m4_m (vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) {
+ return vamoxorei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei8_v_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i8.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei8_v_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamoxorei8_v_i64m8_m (vbool8_t mask, int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) {
+ return vamoxorei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei16_v_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i16.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei16_v_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamoxorei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) {
+ return vamoxorei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei16_v_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i16.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei16_v_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamoxorei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) {
+ return vamoxorei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei16_v_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i16.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei16_v_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamoxorei16_v_i64m4_m (vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) {
+ return vamoxorei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei16_v_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i16.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei16_v_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamoxorei16_v_i64m8_m (vbool8_t mask, int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) {
+ return vamoxorei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei32_v_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i32.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei32_v_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamoxorei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) {
+ return vamoxorei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei32_v_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i32.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei32_v_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamoxorei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) {
+ return vamoxorei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei32_v_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i32.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei32_v_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamoxorei32_v_i64m4_m (vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) {
+ return vamoxorei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei32_v_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i32.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei32_v_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamoxorei32_v_i64m8_m (vbool8_t mask, int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) {
+ return vamoxorei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei64_v_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei64_v_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamoxorei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) {
+ return vamoxorei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei64_v_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei64_v_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamoxorei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) {
+ return vamoxorei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei64_v_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei64_v_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamoxorei64_v_i64m4_m (vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) {
+ return vamoxorei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei64_v_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei64_v_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamoxorei64_v_i64m8_m (vbool8_t mask, int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) {
+ return vamoxorei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei8_v_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i8.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei8_v_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamoxorei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) {
+ return vamoxorei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei8_v_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i8.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei8_v_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamoxorei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) {
+ return vamoxorei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei8_v_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i8.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei8_v_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamoxorei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) {
+ return vamoxorei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei8_v_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i8.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei8_v_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamoxorei8_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) {
+ return vamoxorei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei8_v_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i8.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei8_v_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t test_vamoxorei8_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) {
+ return vamoxorei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei16_v_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i16.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei16_v_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamoxorei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) {
+ return vamoxorei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei16_v_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i16.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei16_v_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamoxorei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) {
+ return vamoxorei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei16_v_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i16.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei16_v_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamoxorei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) {
+ return vamoxorei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei16_v_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i16.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei16_v_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamoxorei16_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) {
+ return vamoxorei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei16_v_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i16.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei16_v_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t test_vamoxorei16_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) {
+ return vamoxorei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei32_v_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei32_v_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamoxorei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) {
+ return vamoxorei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei32_v_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei32_v_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamoxorei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) {
+ return vamoxorei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei32_v_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei32_v_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamoxorei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) {
+ return vamoxorei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei32_v_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei32_v_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamoxorei32_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) {
+ return vamoxorei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei32_v_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei32_v_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t test_vamoxorei32_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) {
+ return vamoxorei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei64_v_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i64.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei64_v_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamoxorei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) {
+ return vamoxorei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei64_v_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i64.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei64_v_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamoxorei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) {
+ return vamoxorei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei64_v_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i64.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei64_v_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamoxorei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) {
+ return vamoxorei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei64_v_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i64.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei64_v_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamoxorei64_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) {
+ return vamoxorei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei8_v_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i8.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei8_v_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamoxorei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) {
+ return vamoxorei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei8_v_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i8.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei8_v_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamoxorei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) {
+ return vamoxorei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei8_v_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i8.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei8_v_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamoxorei8_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) {
+ return vamoxorei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei8_v_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i8.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei8_v_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamoxorei8_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) {
+ return vamoxorei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei16_v_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i16.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei16_v_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamoxorei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) {
+ return vamoxorei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei16_v_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i16.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei16_v_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamoxorei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) {
+ return vamoxorei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei16_v_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i16.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei16_v_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamoxorei16_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) {
+ return vamoxorei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei16_v_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i16.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei16_v_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamoxorei16_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) {
+ return vamoxorei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei32_v_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i32.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei32_v_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamoxorei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) {
+ return vamoxorei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei32_v_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i32.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei32_v_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamoxorei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) {
+ return vamoxorei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei32_v_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i32.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei32_v_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamoxorei32_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) {
+ return vamoxorei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei32_v_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i32.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei32_v_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamoxorei32_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) {
+ return vamoxorei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei64_v_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei64_v_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamoxorei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) {
+ return vamoxorei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei64_v_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei64_v_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamoxorei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) {
+ return vamoxorei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei64_v_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei64_v_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamoxorei64_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) {
+ return vamoxorei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei64_v_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei64_v_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamoxorei64_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) {
+ return vamoxorei64(mask, base, bindex, value, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vamoadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vamoadd.c
new file mode 100644
index 000000000000..baebacaaa236
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vamoadd.c
@@ -0,0 +1,2247 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +m -target-feature +experimental-v -target-feature +experimental-zvamo -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -target-feature +experimental-zvamo -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vamoaddei8_v_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.nxv1i32.nxv1i8.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei8_v_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamoaddei8_v_i32mf2 (int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) {
+ return vamoaddei8_v_i32mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei8_v_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.nxv2i32.nxv2i8.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei8_v_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamoaddei8_v_i32m1 (int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) {
+ return vamoaddei8_v_i32m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei8_v_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.nxv4i32.nxv4i8.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei8_v_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamoaddei8_v_i32m2 (int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) {
+ return vamoaddei8_v_i32m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei8_v_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.nxv8i32.nxv8i8.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei8_v_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamoaddei8_v_i32m4 (int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) {
+ return vamoaddei8_v_i32m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei8_v_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoadd.nxv16i32.nxv16i8.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei8_v_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoadd.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t test_vamoaddei8_v_i32m8 (int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) {
+ return vamoaddei8_v_i32m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei16_v_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.nxv1i32.nxv1i16.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei16_v_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamoaddei16_v_i32mf2 (int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) {
+ return vamoaddei16_v_i32mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei16_v_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.nxv2i32.nxv2i16.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei16_v_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamoaddei16_v_i32m1 (int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) {
+ return vamoaddei16_v_i32m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei16_v_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.nxv4i32.nxv4i16.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei16_v_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamoaddei16_v_i32m2 (int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) {
+ return vamoaddei16_v_i32m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei16_v_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.nxv8i32.nxv8i16.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei16_v_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamoaddei16_v_i32m4 (int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) {
+ return vamoaddei16_v_i32m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei16_v_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoadd.nxv16i32.nxv16i16.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei16_v_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoadd.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t test_vamoaddei16_v_i32m8 (int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) {
+ return vamoaddei16_v_i32m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei32_v_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.nxv1i32.nxv1i32.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei32_v_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamoaddei32_v_i32mf2 (int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) {
+ return vamoaddei32_v_i32mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei32_v_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.nxv2i32.nxv2i32.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei32_v_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamoaddei32_v_i32m1 (int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) {
+ return vamoaddei32_v_i32m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei32_v_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.nxv4i32.nxv4i32.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei32_v_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamoaddei32_v_i32m2 (int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) {
+ return vamoaddei32_v_i32m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei32_v_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.nxv8i32.nxv8i32.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei32_v_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamoaddei32_v_i32m4 (int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) {
+ return vamoaddei32_v_i32m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei32_v_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoadd.nxv16i32.nxv16i32.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei32_v_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoadd.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t test_vamoaddei32_v_i32m8 (int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) {
+ return vamoaddei32_v_i32m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei64_v_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.nxv1i32.nxv1i64.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei64_v_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamoaddei64_v_i32mf2 (int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) {
+ return vamoaddei64_v_i32mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei64_v_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.nxv2i32.nxv2i64.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei64_v_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamoaddei64_v_i32m1 (int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) {
+ return vamoaddei64_v_i32m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei64_v_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.nxv4i32.nxv4i64.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei64_v_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamoaddei64_v_i32m2 (int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) {
+ return vamoaddei64_v_i32m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei64_v_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.nxv8i32.nxv8i64.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei64_v_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamoaddei64_v_i32m4 (int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) {
+ return vamoaddei64_v_i32m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei8_v_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.nxv1i64.nxv1i8.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei8_v_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamoaddei8_v_i64m1 (int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) {
+ return vamoaddei8_v_i64m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei8_v_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.nxv2i64.nxv2i8.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei8_v_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamoaddei8_v_i64m2 (int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) {
+ return vamoaddei8_v_i64m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei8_v_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.nxv4i64.nxv4i8.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei8_v_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamoaddei8_v_i64m4 (int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) {
+ return vamoaddei8_v_i64m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei8_v_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.nxv8i64.nxv8i8.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei8_v_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamoaddei8_v_i64m8 (int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) {
+ return vamoaddei8_v_i64m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei16_v_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.nxv1i64.nxv1i16.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei16_v_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamoaddei16_v_i64m1 (int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) {
+ return vamoaddei16_v_i64m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei16_v_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.nxv2i64.nxv2i16.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei16_v_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamoaddei16_v_i64m2 (int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) {
+ return vamoaddei16_v_i64m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei16_v_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.nxv4i64.nxv4i16.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei16_v_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamoaddei16_v_i64m4 (int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) {
+ return vamoaddei16_v_i64m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei16_v_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.nxv8i64.nxv8i16.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei16_v_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamoaddei16_v_i64m8 (int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) {
+ return vamoaddei16_v_i64m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei32_v_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.nxv1i64.nxv1i32.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei32_v_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamoaddei32_v_i64m1 (int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) {
+ return vamoaddei32_v_i64m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei32_v_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.nxv2i64.nxv2i32.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei32_v_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamoaddei32_v_i64m2 (int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) {
+ return vamoaddei32_v_i64m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei32_v_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.nxv4i64.nxv4i32.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei32_v_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamoaddei32_v_i64m4 (int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) {
+ return vamoaddei32_v_i64m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei32_v_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.nxv8i64.nxv8i32.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei32_v_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamoaddei32_v_i64m8 (int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) {
+ return vamoaddei32_v_i64m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei64_v_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.nxv1i64.nxv1i64.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei64_v_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamoaddei64_v_i64m1 (int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) {
+ return vamoaddei64_v_i64m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei64_v_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.nxv2i64.nxv2i64.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei64_v_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamoaddei64_v_i64m2 (int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) {
+ return vamoaddei64_v_i64m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei64_v_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.nxv4i64.nxv4i64.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei64_v_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamoaddei64_v_i64m4 (int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) {
+ return vamoaddei64_v_i64m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei64_v_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.nxv8i64.nxv8i64.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei64_v_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamoaddei64_v_i64m8 (int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) {
+ return vamoaddei64_v_i64m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei8_v_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.nxv1i32.nxv1i8.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei8_v_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamoaddei8_v_u32mf2 (uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) {
+ return vamoaddei8_v_u32mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei8_v_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.nxv2i32.nxv2i8.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei8_v_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamoaddei8_v_u32m1 (uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) {
+ return vamoaddei8_v_u32m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei8_v_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.nxv4i32.nxv4i8.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei8_v_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamoaddei8_v_u32m2 (uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) {
+ return vamoaddei8_v_u32m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei8_v_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.nxv8i32.nxv8i8.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei8_v_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamoaddei8_v_u32m4 (uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) {
+ return vamoaddei8_v_u32m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei8_v_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoadd.nxv16i32.nxv16i8.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei8_v_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoadd.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t test_vamoaddei8_v_u32m8 (uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) {
+ return vamoaddei8_v_u32m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei16_v_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.nxv1i32.nxv1i16.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei16_v_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamoaddei16_v_u32mf2 (uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) {
+ return vamoaddei16_v_u32mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei16_v_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.nxv2i32.nxv2i16.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei16_v_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamoaddei16_v_u32m1 (uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) {
+ return vamoaddei16_v_u32m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei16_v_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.nxv4i32.nxv4i16.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei16_v_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamoaddei16_v_u32m2 (uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) {
+ return vamoaddei16_v_u32m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei16_v_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.nxv8i32.nxv8i16.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei16_v_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamoaddei16_v_u32m4 (uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) {
+ return vamoaddei16_v_u32m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei16_v_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoadd.nxv16i32.nxv16i16.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei16_v_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoadd.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t test_vamoaddei16_v_u32m8 (uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) {
+ return vamoaddei16_v_u32m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei32_v_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.nxv1i32.nxv1i32.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei32_v_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamoaddei32_v_u32mf2 (uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) {
+ return vamoaddei32_v_u32mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei32_v_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.nxv2i32.nxv2i32.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei32_v_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamoaddei32_v_u32m1 (uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) {
+ return vamoaddei32_v_u32m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei32_v_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.nxv4i32.nxv4i32.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei32_v_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamoaddei32_v_u32m2 (uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) {
+ return vamoaddei32_v_u32m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei32_v_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.nxv8i32.nxv8i32.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei32_v_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamoaddei32_v_u32m4 (uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) {
+ return vamoaddei32_v_u32m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei32_v_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoadd.nxv16i32.nxv16i32.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei32_v_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoadd.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t test_vamoaddei32_v_u32m8 (uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) {
+ return vamoaddei32_v_u32m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei64_v_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.nxv1i32.nxv1i64.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei64_v_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamoaddei64_v_u32mf2 (uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) {
+ return vamoaddei64_v_u32mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei64_v_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.nxv2i32.nxv2i64.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei64_v_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamoaddei64_v_u32m1 (uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) {
+ return vamoaddei64_v_u32m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei64_v_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.nxv4i32.nxv4i64.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei64_v_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamoaddei64_v_u32m2 (uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) {
+ return vamoaddei64_v_u32m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei64_v_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.nxv8i32.nxv8i64.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei64_v_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamoaddei64_v_u32m4 (uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) {
+ return vamoaddei64_v_u32m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei8_v_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.nxv1i64.nxv1i8.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei8_v_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamoaddei8_v_u64m1 (uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) {
+ return vamoaddei8_v_u64m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei8_v_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.nxv2i64.nxv2i8.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei8_v_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamoaddei8_v_u64m2 (uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) {
+ return vamoaddei8_v_u64m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei8_v_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.nxv4i64.nxv4i8.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei8_v_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamoaddei8_v_u64m4 (uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) {
+ return vamoaddei8_v_u64m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei8_v_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.nxv8i64.nxv8i8.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei8_v_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamoaddei8_v_u64m8 (uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) {
+ return vamoaddei8_v_u64m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei16_v_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.nxv1i64.nxv1i16.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei16_v_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamoaddei16_v_u64m1 (uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) {
+ return vamoaddei16_v_u64m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei16_v_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.nxv2i64.nxv2i16.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei16_v_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamoaddei16_v_u64m2 (uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) {
+ return vamoaddei16_v_u64m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei16_v_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.nxv4i64.nxv4i16.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei16_v_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamoaddei16_v_u64m4 (uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) {
+ return vamoaddei16_v_u64m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei16_v_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.nxv8i64.nxv8i16.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei16_v_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamoaddei16_v_u64m8 (uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) {
+ return vamoaddei16_v_u64m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei32_v_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.nxv1i64.nxv1i32.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei32_v_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamoaddei32_v_u64m1 (uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) {
+ return vamoaddei32_v_u64m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei32_v_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.nxv2i64.nxv2i32.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei32_v_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamoaddei32_v_u64m2 (uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) {
+ return vamoaddei32_v_u64m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei32_v_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.nxv4i64.nxv4i32.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei32_v_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamoaddei32_v_u64m4 (uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) {
+ return vamoaddei32_v_u64m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei32_v_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.nxv8i64.nxv8i32.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei32_v_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamoaddei32_v_u64m8 (uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) {
+ return vamoaddei32_v_u64m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei64_v_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.nxv1i64.nxv1i64.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei64_v_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamoaddei64_v_u64m1 (uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) {
+ return vamoaddei64_v_u64m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei64_v_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.nxv2i64.nxv2i64.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei64_v_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamoaddei64_v_u64m2 (uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) {
+ return vamoaddei64_v_u64m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei64_v_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.nxv4i64.nxv4i64.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei64_v_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamoaddei64_v_u64m4 (uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) {
+ return vamoaddei64_v_u64m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei64_v_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.nxv8i64.nxv8i64.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei64_v_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamoaddei64_v_u64m8 (uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) {
+ return vamoaddei64_v_u64m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei8_v_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i8.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei8_v_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamoaddei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) {
+ return vamoaddei8_v_i32mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei8_v_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i8.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei8_v_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamoaddei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) {
+ return vamoaddei8_v_i32m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei8_v_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i8.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei8_v_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamoaddei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) {
+ return vamoaddei8_v_i32m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei8_v_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i8.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei8_v_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamoaddei8_v_i32m4_m (vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) {
+ return vamoaddei8_v_i32m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei8_v_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i8.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei8_v_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t test_vamoaddei8_v_i32m8_m (vbool4_t mask, int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) {
+ return vamoaddei8_v_i32m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei16_v_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i16.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei16_v_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamoaddei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) {
+ return vamoaddei16_v_i32mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei16_v_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i16.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei16_v_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamoaddei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) {
+ return vamoaddei16_v_i32m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei16_v_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i16.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei16_v_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamoaddei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) {
+ return vamoaddei16_v_i32m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei16_v_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i16.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei16_v_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamoaddei16_v_i32m4_m (vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) {
+ return vamoaddei16_v_i32m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei16_v_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i16.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei16_v_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t test_vamoaddei16_v_i32m8_m (vbool4_t mask, int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) {
+ return vamoaddei16_v_i32m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei32_v_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei32_v_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamoaddei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) {
+ return vamoaddei32_v_i32mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei32_v_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei32_v_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamoaddei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) {
+ return vamoaddei32_v_i32m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei32_v_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei32_v_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamoaddei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) {
+ return vamoaddei32_v_i32m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei32_v_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei32_v_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamoaddei32_v_i32m4_m (vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) {
+ return vamoaddei32_v_i32m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei32_v_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei32_v_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t test_vamoaddei32_v_i32m8_m (vbool4_t mask, int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) {
+ return vamoaddei32_v_i32m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei64_v_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i64.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei64_v_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamoaddei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) {
+ return vamoaddei64_v_i32mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei64_v_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i64.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei64_v_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamoaddei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) {
+ return vamoaddei64_v_i32m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei64_v_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i64.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei64_v_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamoaddei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) {
+ return vamoaddei64_v_i32m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei64_v_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i64.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei64_v_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamoaddei64_v_i32m4_m (vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) {
+ return vamoaddei64_v_i32m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei8_v_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i8.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei8_v_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamoaddei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) {
+ return vamoaddei8_v_i64m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei8_v_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i8.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei8_v_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamoaddei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) {
+ return vamoaddei8_v_i64m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei8_v_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i8.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei8_v_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamoaddei8_v_i64m4_m (vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) {
+ return vamoaddei8_v_i64m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei8_v_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i8.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei8_v_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamoaddei8_v_i64m8_m (vbool8_t mask, int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) {
+ return vamoaddei8_v_i64m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei16_v_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i16.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei16_v_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamoaddei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) {
+ return vamoaddei16_v_i64m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei16_v_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i16.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei16_v_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamoaddei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) {
+ return vamoaddei16_v_i64m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei16_v_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i16.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei16_v_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamoaddei16_v_i64m4_m (vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) {
+ return vamoaddei16_v_i64m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei16_v_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i16.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei16_v_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamoaddei16_v_i64m8_m (vbool8_t mask, int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) {
+ return vamoaddei16_v_i64m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei32_v_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i32.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei32_v_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamoaddei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) {
+ return vamoaddei32_v_i64m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei32_v_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i32.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei32_v_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamoaddei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) {
+ return vamoaddei32_v_i64m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei32_v_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i32.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei32_v_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamoaddei32_v_i64m4_m (vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) {
+ return vamoaddei32_v_i64m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei32_v_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i32.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei32_v_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamoaddei32_v_i64m8_m (vbool8_t mask, int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) {
+ return vamoaddei32_v_i64m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei64_v_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei64_v_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamoaddei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) {
+ return vamoaddei64_v_i64m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei64_v_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei64_v_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamoaddei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) {
+ return vamoaddei64_v_i64m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei64_v_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei64_v_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamoaddei64_v_i64m4_m (vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) {
+ return vamoaddei64_v_i64m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei64_v_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei64_v_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamoaddei64_v_i64m8_m (vbool8_t mask, int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) {
+ return vamoaddei64_v_i64m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei8_v_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i8.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei8_v_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamoaddei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) {
+ return vamoaddei8_v_u32mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei8_v_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i8.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei8_v_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamoaddei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) {
+ return vamoaddei8_v_u32m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei8_v_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i8.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei8_v_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamoaddei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) {
+ return vamoaddei8_v_u32m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei8_v_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i8.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei8_v_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamoaddei8_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) {
+ return vamoaddei8_v_u32m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei8_v_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i8.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei8_v_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t test_vamoaddei8_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) {
+ return vamoaddei8_v_u32m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei16_v_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i16.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei16_v_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamoaddei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) {
+ return vamoaddei16_v_u32mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei16_v_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i16.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei16_v_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamoaddei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) {
+ return vamoaddei16_v_u32m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei16_v_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i16.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei16_v_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamoaddei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) {
+ return vamoaddei16_v_u32m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei16_v_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i16.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei16_v_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamoaddei16_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) {
+ return vamoaddei16_v_u32m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei16_v_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i16.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei16_v_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t test_vamoaddei16_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) {
+ return vamoaddei16_v_u32m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei32_v_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei32_v_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamoaddei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) {
+ return vamoaddei32_v_u32mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei32_v_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei32_v_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamoaddei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) {
+ return vamoaddei32_v_u32m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei32_v_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei32_v_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamoaddei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) {
+ return vamoaddei32_v_u32m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei32_v_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei32_v_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamoaddei32_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) {
+ return vamoaddei32_v_u32m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei32_v_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei32_v_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t test_vamoaddei32_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) {
+ return vamoaddei32_v_u32m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei64_v_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i64.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei64_v_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamoaddei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) {
+ return vamoaddei64_v_u32mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei64_v_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i64.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei64_v_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamoaddei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) {
+ return vamoaddei64_v_u32m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei64_v_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i64.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei64_v_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamoaddei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) {
+ return vamoaddei64_v_u32m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei64_v_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i64.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei64_v_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamoaddei64_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) {
+ return vamoaddei64_v_u32m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei8_v_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i8.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei8_v_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamoaddei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) {
+ return vamoaddei8_v_u64m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei8_v_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i8.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei8_v_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamoaddei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) {
+ return vamoaddei8_v_u64m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei8_v_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i8.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei8_v_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamoaddei8_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) {
+ return vamoaddei8_v_u64m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei8_v_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i8.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei8_v_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamoaddei8_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) {
+ return vamoaddei8_v_u64m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei16_v_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i16.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei16_v_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamoaddei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) {
+ return vamoaddei16_v_u64m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei16_v_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i16.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei16_v_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamoaddei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) {
+ return vamoaddei16_v_u64m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei16_v_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i16.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei16_v_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamoaddei16_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) {
+ return vamoaddei16_v_u64m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei16_v_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i16.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei16_v_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamoaddei16_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) {
+ return vamoaddei16_v_u64m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei32_v_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i32.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei32_v_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamoaddei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) {
+ return vamoaddei32_v_u64m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei32_v_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i32.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei32_v_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamoaddei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) {
+ return vamoaddei32_v_u64m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei32_v_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i32.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei32_v_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamoaddei32_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) {
+ return vamoaddei32_v_u64m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei32_v_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i32.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei32_v_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamoaddei32_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) {
+ return vamoaddei32_v_u64m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei64_v_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei64_v_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamoaddei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) {
+ return vamoaddei64_v_u64m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei64_v_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei64_v_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamoaddei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) {
+ return vamoaddei64_v_u64m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei64_v_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei64_v_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamoaddei64_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) {
+ return vamoaddei64_v_u64m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoaddei64_v_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoaddei64_v_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamoaddei64_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) {
+ return vamoaddei64_v_u64m8_m(mask, base, bindex, value, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vamoand.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vamoand.c
new file mode 100644
index 000000000000..f838cec66435
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vamoand.c
@@ -0,0 +1,2247 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +m -target-feature +experimental-v -target-feature +experimental-zvamo -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -target-feature +experimental-zvamo -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vamoandei8_v_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoand.nxv1i32.nxv1i8.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei8_v_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoand.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamoandei8_v_i32mf2 (int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) {
+ return vamoandei8_v_i32mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei8_v_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoand.nxv2i32.nxv2i8.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei8_v_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoand.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamoandei8_v_i32m1 (int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) {
+ return vamoandei8_v_i32m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei8_v_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoand.nxv4i32.nxv4i8.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei8_v_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoand.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamoandei8_v_i32m2 (int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) {
+ return vamoandei8_v_i32m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei8_v_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoand.nxv8i32.nxv8i8.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei8_v_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoand.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamoandei8_v_i32m4 (int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) {
+ return vamoandei8_v_i32m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei8_v_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoand.nxv16i32.nxv16i8.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei8_v_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoand.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t test_vamoandei8_v_i32m8 (int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) {
+ return vamoandei8_v_i32m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei16_v_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoand.nxv1i32.nxv1i16.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei16_v_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoand.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamoandei16_v_i32mf2 (int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) {
+ return vamoandei16_v_i32mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei16_v_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoand.nxv2i32.nxv2i16.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei16_v_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoand.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamoandei16_v_i32m1 (int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) {
+ return vamoandei16_v_i32m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei16_v_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoand.nxv4i32.nxv4i16.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei16_v_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoand.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamoandei16_v_i32m2 (int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) {
+ return vamoandei16_v_i32m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei16_v_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoand.nxv8i32.nxv8i16.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei16_v_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoand.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamoandei16_v_i32m4 (int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) {
+ return vamoandei16_v_i32m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei16_v_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoand.nxv16i32.nxv16i16.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei16_v_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoand.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t test_vamoandei16_v_i32m8 (int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) {
+ return vamoandei16_v_i32m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei32_v_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoand.nxv1i32.nxv1i32.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei32_v_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoand.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamoandei32_v_i32mf2 (int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) {
+ return vamoandei32_v_i32mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei32_v_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoand.nxv2i32.nxv2i32.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei32_v_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoand.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamoandei32_v_i32m1 (int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) {
+ return vamoandei32_v_i32m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei32_v_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoand.nxv4i32.nxv4i32.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei32_v_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoand.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamoandei32_v_i32m2 (int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) {
+ return vamoandei32_v_i32m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei32_v_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoand.nxv8i32.nxv8i32.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei32_v_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoand.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamoandei32_v_i32m4 (int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) {
+ return vamoandei32_v_i32m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei32_v_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoand.nxv16i32.nxv16i32.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei32_v_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoand.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t test_vamoandei32_v_i32m8 (int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) {
+ return vamoandei32_v_i32m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei64_v_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoand.nxv1i32.nxv1i64.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei64_v_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoand.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamoandei64_v_i32mf2 (int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) {
+ return vamoandei64_v_i32mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei64_v_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoand.nxv2i32.nxv2i64.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei64_v_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoand.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamoandei64_v_i32m1 (int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) {
+ return vamoandei64_v_i32m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei64_v_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoand.nxv4i32.nxv4i64.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei64_v_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoand.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamoandei64_v_i32m2 (int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) {
+ return vamoandei64_v_i32m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei64_v_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoand.nxv8i32.nxv8i64.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei64_v_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoand.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamoandei64_v_i32m4 (int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) {
+ return vamoandei64_v_i32m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei8_v_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoand.nxv1i64.nxv1i8.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei8_v_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoand.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamoandei8_v_i64m1 (int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) {
+ return vamoandei8_v_i64m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei8_v_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoand.nxv2i64.nxv2i8.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei8_v_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoand.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamoandei8_v_i64m2 (int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) {
+ return vamoandei8_v_i64m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei8_v_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoand.nxv4i64.nxv4i8.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei8_v_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoand.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamoandei8_v_i64m4 (int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) {
+ return vamoandei8_v_i64m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei8_v_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoand.nxv8i64.nxv8i8.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei8_v_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoand.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamoandei8_v_i64m8 (int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) {
+ return vamoandei8_v_i64m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei16_v_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoand.nxv1i64.nxv1i16.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei16_v_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoand.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamoandei16_v_i64m1 (int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) {
+ return vamoandei16_v_i64m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei16_v_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoand.nxv2i64.nxv2i16.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei16_v_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoand.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamoandei16_v_i64m2 (int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) {
+ return vamoandei16_v_i64m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei16_v_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoand.nxv4i64.nxv4i16.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei16_v_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoand.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamoandei16_v_i64m4 (int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) {
+ return vamoandei16_v_i64m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei16_v_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoand.nxv8i64.nxv8i16.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei16_v_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoand.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamoandei16_v_i64m8 (int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) {
+ return vamoandei16_v_i64m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei32_v_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoand.nxv1i64.nxv1i32.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei32_v_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoand.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamoandei32_v_i64m1 (int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) {
+ return vamoandei32_v_i64m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei32_v_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoand.nxv2i64.nxv2i32.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei32_v_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoand.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamoandei32_v_i64m2 (int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) {
+ return vamoandei32_v_i64m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei32_v_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoand.nxv4i64.nxv4i32.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei32_v_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoand.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamoandei32_v_i64m4 (int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) {
+ return vamoandei32_v_i64m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei32_v_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoand.nxv8i64.nxv8i32.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei32_v_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoand.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamoandei32_v_i64m8 (int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) {
+ return vamoandei32_v_i64m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei64_v_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoand.nxv1i64.nxv1i64.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei64_v_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoand.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamoandei64_v_i64m1 (int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) {
+ return vamoandei64_v_i64m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei64_v_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoand.nxv2i64.nxv2i64.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei64_v_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoand.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamoandei64_v_i64m2 (int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) {
+ return vamoandei64_v_i64m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei64_v_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoand.nxv4i64.nxv4i64.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei64_v_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoand.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamoandei64_v_i64m4 (int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) {
+ return vamoandei64_v_i64m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei64_v_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoand.nxv8i64.nxv8i64.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei64_v_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoand.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamoandei64_v_i64m8 (int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) {
+ return vamoandei64_v_i64m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei8_v_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoand.nxv1i32.nxv1i8.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei8_v_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoand.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamoandei8_v_u32mf2 (uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) {
+ return vamoandei8_v_u32mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei8_v_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoand.nxv2i32.nxv2i8.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei8_v_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoand.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamoandei8_v_u32m1 (uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) {
+ return vamoandei8_v_u32m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei8_v_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoand.nxv4i32.nxv4i8.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei8_v_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoand.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamoandei8_v_u32m2 (uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) {
+ return vamoandei8_v_u32m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei8_v_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoand.nxv8i32.nxv8i8.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei8_v_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoand.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamoandei8_v_u32m4 (uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) {
+ return vamoandei8_v_u32m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei8_v_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoand.nxv16i32.nxv16i8.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei8_v_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoand.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t test_vamoandei8_v_u32m8 (uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) {
+ return vamoandei8_v_u32m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei16_v_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoand.nxv1i32.nxv1i16.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei16_v_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoand.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamoandei16_v_u32mf2 (uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) {
+ return vamoandei16_v_u32mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei16_v_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoand.nxv2i32.nxv2i16.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei16_v_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoand.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamoandei16_v_u32m1 (uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) {
+ return vamoandei16_v_u32m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei16_v_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoand.nxv4i32.nxv4i16.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei16_v_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoand.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamoandei16_v_u32m2 (uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) {
+ return vamoandei16_v_u32m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei16_v_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoand.nxv8i32.nxv8i16.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei16_v_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoand.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamoandei16_v_u32m4 (uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) {
+ return vamoandei16_v_u32m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei16_v_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoand.nxv16i32.nxv16i16.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei16_v_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoand.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t test_vamoandei16_v_u32m8 (uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) {
+ return vamoandei16_v_u32m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei32_v_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoand.nxv1i32.nxv1i32.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei32_v_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoand.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamoandei32_v_u32mf2 (uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) {
+ return vamoandei32_v_u32mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei32_v_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoand.nxv2i32.nxv2i32.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei32_v_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoand.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamoandei32_v_u32m1 (uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) {
+ return vamoandei32_v_u32m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei32_v_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoand.nxv4i32.nxv4i32.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei32_v_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoand.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamoandei32_v_u32m2 (uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) {
+ return vamoandei32_v_u32m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei32_v_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoand.nxv8i32.nxv8i32.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei32_v_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoand.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamoandei32_v_u32m4 (uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) {
+ return vamoandei32_v_u32m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei32_v_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoand.nxv16i32.nxv16i32.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei32_v_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoand.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t test_vamoandei32_v_u32m8 (uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) {
+ return vamoandei32_v_u32m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei64_v_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoand.nxv1i32.nxv1i64.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei64_v_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoand.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamoandei64_v_u32mf2 (uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) {
+ return vamoandei64_v_u32mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei64_v_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoand.nxv2i32.nxv2i64.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei64_v_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoand.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamoandei64_v_u32m1 (uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) {
+ return vamoandei64_v_u32m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei64_v_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoand.nxv4i32.nxv4i64.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei64_v_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoand.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamoandei64_v_u32m2 (uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) {
+ return vamoandei64_v_u32m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei64_v_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoand.nxv8i32.nxv8i64.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei64_v_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoand.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamoandei64_v_u32m4 (uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) {
+ return vamoandei64_v_u32m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei8_v_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoand.nxv1i64.nxv1i8.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei8_v_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoand.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamoandei8_v_u64m1 (uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) {
+ return vamoandei8_v_u64m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei8_v_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoand.nxv2i64.nxv2i8.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei8_v_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoand.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamoandei8_v_u64m2 (uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) {
+ return vamoandei8_v_u64m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei8_v_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoand.nxv4i64.nxv4i8.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei8_v_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoand.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamoandei8_v_u64m4 (uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) {
+ return vamoandei8_v_u64m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei8_v_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoand.nxv8i64.nxv8i8.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei8_v_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoand.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamoandei8_v_u64m8 (uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) {
+ return vamoandei8_v_u64m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei16_v_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoand.nxv1i64.nxv1i16.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei16_v_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoand.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamoandei16_v_u64m1 (uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) {
+ return vamoandei16_v_u64m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei16_v_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoand.nxv2i64.nxv2i16.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei16_v_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoand.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamoandei16_v_u64m2 (uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) {
+ return vamoandei16_v_u64m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei16_v_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoand.nxv4i64.nxv4i16.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei16_v_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoand.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamoandei16_v_u64m4 (uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) {
+ return vamoandei16_v_u64m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei16_v_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoand.nxv8i64.nxv8i16.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei16_v_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoand.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamoandei16_v_u64m8 (uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) {
+ return vamoandei16_v_u64m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei32_v_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoand.nxv1i64.nxv1i32.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei32_v_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoand.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamoandei32_v_u64m1 (uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) {
+ return vamoandei32_v_u64m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei32_v_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoand.nxv2i64.nxv2i32.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei32_v_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoand.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamoandei32_v_u64m2 (uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) {
+ return vamoandei32_v_u64m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei32_v_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoand.nxv4i64.nxv4i32.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei32_v_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoand.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamoandei32_v_u64m4 (uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) {
+ return vamoandei32_v_u64m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei32_v_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoand.nxv8i64.nxv8i32.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei32_v_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoand.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamoandei32_v_u64m8 (uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) {
+ return vamoandei32_v_u64m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei64_v_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoand.nxv1i64.nxv1i64.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei64_v_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoand.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamoandei64_v_u64m1 (uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) {
+ return vamoandei64_v_u64m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei64_v_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoand.nxv2i64.nxv2i64.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei64_v_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoand.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamoandei64_v_u64m2 (uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) {
+ return vamoandei64_v_u64m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei64_v_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoand.nxv4i64.nxv4i64.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei64_v_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoand.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamoandei64_v_u64m4 (uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) {
+ return vamoandei64_v_u64m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei64_v_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoand.nxv8i64.nxv8i64.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei64_v_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoand.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamoandei64_v_u64m8 (uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) {
+ return vamoandei64_v_u64m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei8_v_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoand.mask.nxv1i32.nxv1i8.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei8_v_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoand.mask.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamoandei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) {
+ return vamoandei8_v_i32mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei8_v_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoand.mask.nxv2i32.nxv2i8.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei8_v_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoand.mask.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamoandei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) {
+ return vamoandei8_v_i32m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei8_v_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoand.mask.nxv4i32.nxv4i8.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei8_v_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoand.mask.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamoandei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) {
+ return vamoandei8_v_i32m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei8_v_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoand.mask.nxv8i32.nxv8i8.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei8_v_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoand.mask.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamoandei8_v_i32m4_m (vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) {
+ return vamoandei8_v_i32m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei8_v_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoand.mask.nxv16i32.nxv16i8.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei8_v_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoand.mask.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t test_vamoandei8_v_i32m8_m (vbool4_t mask, int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) {
+ return vamoandei8_v_i32m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei16_v_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoand.mask.nxv1i32.nxv1i16.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei16_v_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoand.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamoandei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) {
+ return vamoandei16_v_i32mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei16_v_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoand.mask.nxv2i32.nxv2i16.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei16_v_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoand.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamoandei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) {
+ return vamoandei16_v_i32m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei16_v_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoand.mask.nxv4i32.nxv4i16.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei16_v_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoand.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamoandei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) {
+ return vamoandei16_v_i32m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei16_v_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoand.mask.nxv8i32.nxv8i16.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei16_v_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoand.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamoandei16_v_i32m4_m (vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) {
+ return vamoandei16_v_i32m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei16_v_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoand.mask.nxv16i32.nxv16i16.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei16_v_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoand.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t test_vamoandei16_v_i32m8_m (vbool4_t mask, int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) {
+ return vamoandei16_v_i32m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei32_v_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoand.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei32_v_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoand.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamoandei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) {
+ return vamoandei32_v_i32mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei32_v_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoand.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei32_v_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoand.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamoandei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) {
+ return vamoandei32_v_i32m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei32_v_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoand.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei32_v_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoand.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamoandei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) {
+ return vamoandei32_v_i32m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei32_v_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoand.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei32_v_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoand.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamoandei32_v_i32m4_m (vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) {
+ return vamoandei32_v_i32m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei32_v_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoand.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei32_v_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoand.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t test_vamoandei32_v_i32m8_m (vbool4_t mask, int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) {
+ return vamoandei32_v_i32m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei64_v_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoand.mask.nxv1i32.nxv1i64.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei64_v_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoand.mask.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamoandei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) {
+ return vamoandei64_v_i32mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei64_v_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoand.mask.nxv2i32.nxv2i64.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei64_v_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoand.mask.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamoandei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) {
+ return vamoandei64_v_i32m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei64_v_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoand.mask.nxv4i32.nxv4i64.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei64_v_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoand.mask.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamoandei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) {
+ return vamoandei64_v_i32m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei64_v_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoand.mask.nxv8i32.nxv8i64.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei64_v_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoand.mask.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamoandei64_v_i32m4_m (vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) {
+ return vamoandei64_v_i32m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei8_v_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoand.mask.nxv1i64.nxv1i8.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei8_v_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoand.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamoandei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) {
+ return vamoandei8_v_i64m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei8_v_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoand.mask.nxv2i64.nxv2i8.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei8_v_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoand.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamoandei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) {
+ return vamoandei8_v_i64m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei8_v_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoand.mask.nxv4i64.nxv4i8.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei8_v_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoand.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamoandei8_v_i64m4_m (vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) {
+ return vamoandei8_v_i64m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei8_v_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoand.mask.nxv8i64.nxv8i8.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei8_v_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoand.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamoandei8_v_i64m8_m (vbool8_t mask, int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) {
+ return vamoandei8_v_i64m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei16_v_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoand.mask.nxv1i64.nxv1i16.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei16_v_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoand.mask.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamoandei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) {
+ return vamoandei16_v_i64m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei16_v_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoand.mask.nxv2i64.nxv2i16.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei16_v_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoand.mask.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamoandei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) {
+ return vamoandei16_v_i64m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei16_v_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoand.mask.nxv4i64.nxv4i16.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei16_v_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoand.mask.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamoandei16_v_i64m4_m (vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) {
+ return vamoandei16_v_i64m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei16_v_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoand.mask.nxv8i64.nxv8i16.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei16_v_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoand.mask.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamoandei16_v_i64m8_m (vbool8_t mask, int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) {
+ return vamoandei16_v_i64m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei32_v_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoand.mask.nxv1i64.nxv1i32.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei32_v_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoand.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamoandei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) {
+ return vamoandei32_v_i64m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei32_v_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoand.mask.nxv2i64.nxv2i32.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei32_v_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoand.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamoandei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) {
+ return vamoandei32_v_i64m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei32_v_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoand.mask.nxv4i64.nxv4i32.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei32_v_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoand.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamoandei32_v_i64m4_m (vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) {
+ return vamoandei32_v_i64m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei32_v_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoand.mask.nxv8i64.nxv8i32.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei32_v_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoand.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamoandei32_v_i64m8_m (vbool8_t mask, int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) {
+ return vamoandei32_v_i64m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei64_v_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoand.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei64_v_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoand.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamoandei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) {
+ return vamoandei64_v_i64m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei64_v_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoand.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei64_v_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoand.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamoandei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) {
+ return vamoandei64_v_i64m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei64_v_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoand.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei64_v_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoand.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamoandei64_v_i64m4_m (vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) {
+ return vamoandei64_v_i64m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei64_v_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoand.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei64_v_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoand.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamoandei64_v_i64m8_m (vbool8_t mask, int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) {
+ return vamoandei64_v_i64m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei8_v_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoand.mask.nxv1i32.nxv1i8.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei8_v_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoand.mask.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamoandei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) {
+ return vamoandei8_v_u32mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei8_v_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoand.mask.nxv2i32.nxv2i8.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei8_v_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoand.mask.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamoandei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) {
+ return vamoandei8_v_u32m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei8_v_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoand.mask.nxv4i32.nxv4i8.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei8_v_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoand.mask.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamoandei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) {
+ return vamoandei8_v_u32m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei8_v_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoand.mask.nxv8i32.nxv8i8.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei8_v_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoand.mask.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamoandei8_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) {
+ return vamoandei8_v_u32m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei8_v_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoand.mask.nxv16i32.nxv16i8.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei8_v_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoand.mask.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t test_vamoandei8_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) {
+ return vamoandei8_v_u32m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei16_v_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoand.mask.nxv1i32.nxv1i16.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei16_v_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoand.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamoandei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) {
+ return vamoandei16_v_u32mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei16_v_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoand.mask.nxv2i32.nxv2i16.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei16_v_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoand.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamoandei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) {
+ return vamoandei16_v_u32m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei16_v_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoand.mask.nxv4i32.nxv4i16.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei16_v_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoand.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamoandei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) {
+ return vamoandei16_v_u32m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei16_v_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoand.mask.nxv8i32.nxv8i16.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei16_v_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoand.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamoandei16_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) {
+ return vamoandei16_v_u32m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei16_v_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoand.mask.nxv16i32.nxv16i16.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei16_v_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoand.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t test_vamoandei16_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) {
+ return vamoandei16_v_u32m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei32_v_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoand.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei32_v_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoand.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamoandei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) {
+ return vamoandei32_v_u32mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei32_v_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoand.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei32_v_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoand.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamoandei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) {
+ return vamoandei32_v_u32m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei32_v_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoand.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei32_v_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoand.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamoandei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) {
+ return vamoandei32_v_u32m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei32_v_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoand.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei32_v_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoand.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamoandei32_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) {
+ return vamoandei32_v_u32m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei32_v_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoand.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei32_v_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoand.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t test_vamoandei32_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) {
+ return vamoandei32_v_u32m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei64_v_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoand.mask.nxv1i32.nxv1i64.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei64_v_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoand.mask.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamoandei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) {
+ return vamoandei64_v_u32mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei64_v_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoand.mask.nxv2i32.nxv2i64.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei64_v_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoand.mask.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamoandei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) {
+ return vamoandei64_v_u32m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei64_v_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoand.mask.nxv4i32.nxv4i64.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei64_v_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoand.mask.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamoandei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) {
+ return vamoandei64_v_u32m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei64_v_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoand.mask.nxv8i32.nxv8i64.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei64_v_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoand.mask.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamoandei64_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) {
+ return vamoandei64_v_u32m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei8_v_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoand.mask.nxv1i64.nxv1i8.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei8_v_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoand.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamoandei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) {
+ return vamoandei8_v_u64m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei8_v_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoand.mask.nxv2i64.nxv2i8.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei8_v_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoand.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamoandei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) {
+ return vamoandei8_v_u64m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei8_v_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoand.mask.nxv4i64.nxv4i8.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei8_v_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoand.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamoandei8_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) {
+ return vamoandei8_v_u64m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei8_v_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoand.mask.nxv8i64.nxv8i8.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei8_v_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoand.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamoandei8_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) {
+ return vamoandei8_v_u64m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei16_v_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoand.mask.nxv1i64.nxv1i16.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei16_v_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoand.mask.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamoandei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) {
+ return vamoandei16_v_u64m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei16_v_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoand.mask.nxv2i64.nxv2i16.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei16_v_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoand.mask.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamoandei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) {
+ return vamoandei16_v_u64m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei16_v_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoand.mask.nxv4i64.nxv4i16.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei16_v_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoand.mask.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamoandei16_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) {
+ return vamoandei16_v_u64m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei16_v_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoand.mask.nxv8i64.nxv8i16.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei16_v_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoand.mask.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamoandei16_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) {
+ return vamoandei16_v_u64m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei32_v_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoand.mask.nxv1i64.nxv1i32.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei32_v_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoand.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamoandei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) {
+ return vamoandei32_v_u64m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei32_v_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoand.mask.nxv2i64.nxv2i32.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei32_v_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoand.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamoandei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) {
+ return vamoandei32_v_u64m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei32_v_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoand.mask.nxv4i64.nxv4i32.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei32_v_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoand.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamoandei32_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) {
+ return vamoandei32_v_u64m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei32_v_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoand.mask.nxv8i64.nxv8i32.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei32_v_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoand.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamoandei32_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) {
+ return vamoandei32_v_u64m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei64_v_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoand.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei64_v_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoand.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamoandei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) {
+ return vamoandei64_v_u64m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei64_v_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoand.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei64_v_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoand.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamoandei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) {
+ return vamoandei64_v_u64m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei64_v_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoand.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei64_v_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoand.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamoandei64_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) {
+ return vamoandei64_v_u64m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoandei64_v_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoand.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoandei64_v_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoand.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamoandei64_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) {
+ return vamoandei64_v_u64m8_m(mask, base, bindex, value, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vamomax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vamomax.c
new file mode 100644
index 000000000000..3c2f0c82532c
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vamomax.c
@@ -0,0 +1,2247 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +m -target-feature +experimental-v -target-feature +experimental-zvamo -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -target-feature +experimental-zvamo -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vamomaxei8_v_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomax.nxv1i32.nxv1i8.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei8_v_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomax.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamomaxei8_v_i32mf2 (int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) {
+ return vamomaxei8_v_i32mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei8_v_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomax.nxv2i32.nxv2i8.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei8_v_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomax.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamomaxei8_v_i32m1 (int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) {
+ return vamomaxei8_v_i32m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei8_v_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomax.nxv4i32.nxv4i8.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei8_v_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomax.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamomaxei8_v_i32m2 (int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) {
+ return vamomaxei8_v_i32m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei8_v_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomax.nxv8i32.nxv8i8.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei8_v_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomax.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamomaxei8_v_i32m4 (int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) {
+ return vamomaxei8_v_i32m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei8_v_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomax.nxv16i32.nxv16i8.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei8_v_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomax.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t test_vamomaxei8_v_i32m8 (int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) {
+ return vamomaxei8_v_i32m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei16_v_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomax.nxv1i32.nxv1i16.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei16_v_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomax.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamomaxei16_v_i32mf2 (int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) {
+ return vamomaxei16_v_i32mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei16_v_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomax.nxv2i32.nxv2i16.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei16_v_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomax.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamomaxei16_v_i32m1 (int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) {
+ return vamomaxei16_v_i32m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei16_v_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomax.nxv4i32.nxv4i16.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei16_v_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomax.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamomaxei16_v_i32m2 (int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) {
+ return vamomaxei16_v_i32m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei16_v_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomax.nxv8i32.nxv8i16.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei16_v_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomax.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamomaxei16_v_i32m4 (int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) {
+ return vamomaxei16_v_i32m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei16_v_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomax.nxv16i32.nxv16i16.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei16_v_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomax.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t test_vamomaxei16_v_i32m8 (int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) {
+ return vamomaxei16_v_i32m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei32_v_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomax.nxv1i32.nxv1i32.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei32_v_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomax.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamomaxei32_v_i32mf2 (int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) {
+ return vamomaxei32_v_i32mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei32_v_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomax.nxv2i32.nxv2i32.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei32_v_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomax.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamomaxei32_v_i32m1 (int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) {
+ return vamomaxei32_v_i32m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei32_v_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomax.nxv4i32.nxv4i32.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei32_v_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomax.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamomaxei32_v_i32m2 (int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) {
+ return vamomaxei32_v_i32m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei32_v_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomax.nxv8i32.nxv8i32.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei32_v_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomax.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamomaxei32_v_i32m4 (int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) {
+ return vamomaxei32_v_i32m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei32_v_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomax.nxv16i32.nxv16i32.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei32_v_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomax.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t test_vamomaxei32_v_i32m8 (int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) {
+ return vamomaxei32_v_i32m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei64_v_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomax.nxv1i32.nxv1i64.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei64_v_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomax.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamomaxei64_v_i32mf2 (int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) {
+ return vamomaxei64_v_i32mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei64_v_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomax.nxv2i32.nxv2i64.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei64_v_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomax.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamomaxei64_v_i32m1 (int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) {
+ return vamomaxei64_v_i32m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei64_v_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomax.nxv4i32.nxv4i64.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei64_v_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomax.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamomaxei64_v_i32m2 (int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) {
+ return vamomaxei64_v_i32m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei64_v_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomax.nxv8i32.nxv8i64.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei64_v_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomax.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamomaxei64_v_i32m4 (int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) {
+ return vamomaxei64_v_i32m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei8_v_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomax.nxv1i64.nxv1i8.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei8_v_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomax.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamomaxei8_v_i64m1 (int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) {
+ return vamomaxei8_v_i64m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei8_v_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomax.nxv2i64.nxv2i8.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei8_v_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomax.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamomaxei8_v_i64m2 (int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) {
+ return vamomaxei8_v_i64m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei8_v_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomax.nxv4i64.nxv4i8.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei8_v_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomax.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamomaxei8_v_i64m4 (int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) {
+ return vamomaxei8_v_i64m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei8_v_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomax.nxv8i64.nxv8i8.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei8_v_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomax.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamomaxei8_v_i64m8 (int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) {
+ return vamomaxei8_v_i64m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei16_v_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomax.nxv1i64.nxv1i16.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei16_v_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomax.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamomaxei16_v_i64m1 (int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) {
+ return vamomaxei16_v_i64m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei16_v_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomax.nxv2i64.nxv2i16.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei16_v_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomax.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamomaxei16_v_i64m2 (int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) {
+ return vamomaxei16_v_i64m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei16_v_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomax.nxv4i64.nxv4i16.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei16_v_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomax.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamomaxei16_v_i64m4 (int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) {
+ return vamomaxei16_v_i64m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei16_v_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomax.nxv8i64.nxv8i16.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei16_v_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomax.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamomaxei16_v_i64m8 (int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) {
+ return vamomaxei16_v_i64m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei32_v_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomax.nxv1i64.nxv1i32.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei32_v_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomax.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamomaxei32_v_i64m1 (int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) {
+ return vamomaxei32_v_i64m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei32_v_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomax.nxv2i64.nxv2i32.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei32_v_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomax.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamomaxei32_v_i64m2 (int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) {
+ return vamomaxei32_v_i64m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei32_v_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomax.nxv4i64.nxv4i32.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei32_v_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomax.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamomaxei32_v_i64m4 (int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) {
+ return vamomaxei32_v_i64m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei32_v_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomax.nxv8i64.nxv8i32.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei32_v_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomax.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamomaxei32_v_i64m8 (int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) {
+ return vamomaxei32_v_i64m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei64_v_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomax.nxv1i64.nxv1i64.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei64_v_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomax.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamomaxei64_v_i64m1 (int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) {
+ return vamomaxei64_v_i64m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei64_v_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomax.nxv2i64.nxv2i64.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei64_v_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomax.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamomaxei64_v_i64m2 (int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) {
+ return vamomaxei64_v_i64m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei64_v_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomax.nxv4i64.nxv4i64.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei64_v_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomax.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamomaxei64_v_i64m4 (int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) {
+ return vamomaxei64_v_i64m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei64_v_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomax.nxv8i64.nxv8i64.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei64_v_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomax.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamomaxei64_v_i64m8 (int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) {
+ return vamomaxei64_v_i64m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei8_v_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.nxv1i32.nxv1i8.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei8_v_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamomaxuei8_v_u32mf2 (uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) {
+ return vamomaxuei8_v_u32mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei8_v_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.nxv2i32.nxv2i8.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei8_v_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamomaxuei8_v_u32m1 (uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) {
+ return vamomaxuei8_v_u32m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei8_v_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.nxv4i32.nxv4i8.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei8_v_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamomaxuei8_v_u32m2 (uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) {
+ return vamomaxuei8_v_u32m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei8_v_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.nxv8i32.nxv8i8.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei8_v_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamomaxuei8_v_u32m4 (uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) {
+ return vamomaxuei8_v_u32m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei8_v_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomaxu.nxv16i32.nxv16i8.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei8_v_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomaxu.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t test_vamomaxuei8_v_u32m8 (uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) {
+ return vamomaxuei8_v_u32m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei16_v_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.nxv1i32.nxv1i16.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei16_v_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamomaxuei16_v_u32mf2 (uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) {
+ return vamomaxuei16_v_u32mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei16_v_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.nxv2i32.nxv2i16.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei16_v_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamomaxuei16_v_u32m1 (uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) {
+ return vamomaxuei16_v_u32m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei16_v_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.nxv4i32.nxv4i16.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei16_v_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamomaxuei16_v_u32m2 (uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) {
+ return vamomaxuei16_v_u32m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei16_v_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.nxv8i32.nxv8i16.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei16_v_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamomaxuei16_v_u32m4 (uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) {
+ return vamomaxuei16_v_u32m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei16_v_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomaxu.nxv16i32.nxv16i16.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei16_v_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomaxu.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t test_vamomaxuei16_v_u32m8 (uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) {
+ return vamomaxuei16_v_u32m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei32_v_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.nxv1i32.nxv1i32.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei32_v_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamomaxuei32_v_u32mf2 (uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) {
+ return vamomaxuei32_v_u32mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei32_v_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.nxv2i32.nxv2i32.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei32_v_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamomaxuei32_v_u32m1 (uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) {
+ return vamomaxuei32_v_u32m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei32_v_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.nxv4i32.nxv4i32.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei32_v_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamomaxuei32_v_u32m2 (uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) {
+ return vamomaxuei32_v_u32m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei32_v_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.nxv8i32.nxv8i32.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei32_v_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamomaxuei32_v_u32m4 (uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) {
+ return vamomaxuei32_v_u32m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei32_v_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomaxu.nxv16i32.nxv16i32.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei32_v_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomaxu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t test_vamomaxuei32_v_u32m8 (uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) {
+ return vamomaxuei32_v_u32m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei64_v_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.nxv1i32.nxv1i64.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei64_v_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamomaxuei64_v_u32mf2 (uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) {
+ return vamomaxuei64_v_u32mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei64_v_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.nxv2i32.nxv2i64.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei64_v_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamomaxuei64_v_u32m1 (uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) {
+ return vamomaxuei64_v_u32m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei64_v_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.nxv4i32.nxv4i64.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei64_v_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamomaxuei64_v_u32m2 (uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) {
+ return vamomaxuei64_v_u32m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei64_v_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.nxv8i32.nxv8i64.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei64_v_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamomaxuei64_v_u32m4 (uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) {
+ return vamomaxuei64_v_u32m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei8_v_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.nxv1i64.nxv1i8.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei8_v_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamomaxuei8_v_u64m1 (uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) {
+ return vamomaxuei8_v_u64m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei8_v_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.nxv2i64.nxv2i8.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei8_v_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamomaxuei8_v_u64m2 (uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) {
+ return vamomaxuei8_v_u64m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei8_v_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.nxv4i64.nxv4i8.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei8_v_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamomaxuei8_v_u64m4 (uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) {
+ return vamomaxuei8_v_u64m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei8_v_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.nxv8i64.nxv8i8.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei8_v_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamomaxuei8_v_u64m8 (uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) {
+ return vamomaxuei8_v_u64m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei16_v_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.nxv1i64.nxv1i16.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei16_v_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamomaxuei16_v_u64m1 (uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) {
+ return vamomaxuei16_v_u64m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei16_v_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.nxv2i64.nxv2i16.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei16_v_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamomaxuei16_v_u64m2 (uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) {
+ return vamomaxuei16_v_u64m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei16_v_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.nxv4i64.nxv4i16.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei16_v_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamomaxuei16_v_u64m4 (uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) {
+ return vamomaxuei16_v_u64m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei16_v_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.nxv8i64.nxv8i16.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei16_v_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamomaxuei16_v_u64m8 (uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) {
+ return vamomaxuei16_v_u64m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei32_v_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.nxv1i64.nxv1i32.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei32_v_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamomaxuei32_v_u64m1 (uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) {
+ return vamomaxuei32_v_u64m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei32_v_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.nxv2i64.nxv2i32.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei32_v_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamomaxuei32_v_u64m2 (uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) {
+ return vamomaxuei32_v_u64m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei32_v_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.nxv4i64.nxv4i32.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei32_v_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamomaxuei32_v_u64m4 (uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) {
+ return vamomaxuei32_v_u64m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei32_v_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.nxv8i64.nxv8i32.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei32_v_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamomaxuei32_v_u64m8 (uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) {
+ return vamomaxuei32_v_u64m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei64_v_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.nxv1i64.nxv1i64.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei64_v_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamomaxuei64_v_u64m1 (uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) {
+ return vamomaxuei64_v_u64m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei64_v_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.nxv2i64.nxv2i64.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei64_v_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamomaxuei64_v_u64m2 (uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) {
+ return vamomaxuei64_v_u64m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei64_v_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.nxv4i64.nxv4i64.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei64_v_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamomaxuei64_v_u64m4 (uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) {
+ return vamomaxuei64_v_u64m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei64_v_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.nxv8i64.nxv8i64.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei64_v_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamomaxuei64_v_u64m8 (uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) {
+ return vamomaxuei64_v_u64m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei8_v_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomax.mask.nxv1i32.nxv1i8.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei8_v_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomax.mask.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamomaxei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) {
+ return vamomaxei8_v_i32mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei8_v_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomax.mask.nxv2i32.nxv2i8.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei8_v_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomax.mask.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamomaxei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) {
+ return vamomaxei8_v_i32m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei8_v_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomax.mask.nxv4i32.nxv4i8.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei8_v_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomax.mask.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamomaxei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) {
+ return vamomaxei8_v_i32m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei8_v_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomax.mask.nxv8i32.nxv8i8.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei8_v_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomax.mask.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamomaxei8_v_i32m4_m (vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) {
+ return vamomaxei8_v_i32m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei8_v_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomax.mask.nxv16i32.nxv16i8.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei8_v_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomax.mask.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t test_vamomaxei8_v_i32m8_m (vbool4_t mask, int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) {
+ return vamomaxei8_v_i32m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei16_v_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomax.mask.nxv1i32.nxv1i16.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei16_v_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomax.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamomaxei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) {
+ return vamomaxei16_v_i32mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei16_v_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomax.mask.nxv2i32.nxv2i16.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei16_v_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomax.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamomaxei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) {
+ return vamomaxei16_v_i32m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei16_v_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomax.mask.nxv4i32.nxv4i16.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei16_v_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomax.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamomaxei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) {
+ return vamomaxei16_v_i32m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei16_v_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomax.mask.nxv8i32.nxv8i16.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei16_v_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomax.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamomaxei16_v_i32m4_m (vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) {
+ return vamomaxei16_v_i32m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei16_v_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomax.mask.nxv16i32.nxv16i16.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei16_v_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomax.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t test_vamomaxei16_v_i32m8_m (vbool4_t mask, int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) {
+ return vamomaxei16_v_i32m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei32_v_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomax.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei32_v_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomax.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamomaxei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) {
+ return vamomaxei32_v_i32mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei32_v_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomax.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei32_v_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomax.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamomaxei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) {
+ return vamomaxei32_v_i32m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei32_v_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomax.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei32_v_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomax.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamomaxei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) {
+ return vamomaxei32_v_i32m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei32_v_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomax.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei32_v_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomax.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamomaxei32_v_i32m4_m (vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) {
+ return vamomaxei32_v_i32m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei32_v_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomax.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei32_v_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomax.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t test_vamomaxei32_v_i32m8_m (vbool4_t mask, int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) {
+ return vamomaxei32_v_i32m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei64_v_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomax.mask.nxv1i32.nxv1i64.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei64_v_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomax.mask.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamomaxei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) {
+ return vamomaxei64_v_i32mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei64_v_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomax.mask.nxv2i32.nxv2i64.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei64_v_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomax.mask.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamomaxei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) {
+ return vamomaxei64_v_i32m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei64_v_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomax.mask.nxv4i32.nxv4i64.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei64_v_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomax.mask.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamomaxei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) {
+ return vamomaxei64_v_i32m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei64_v_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomax.mask.nxv8i32.nxv8i64.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei64_v_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomax.mask.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamomaxei64_v_i32m4_m (vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) {
+ return vamomaxei64_v_i32m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei8_v_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomax.mask.nxv1i64.nxv1i8.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei8_v_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomax.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamomaxei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) {
+ return vamomaxei8_v_i64m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei8_v_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomax.mask.nxv2i64.nxv2i8.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei8_v_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomax.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamomaxei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) {
+ return vamomaxei8_v_i64m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei8_v_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomax.mask.nxv4i64.nxv4i8.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei8_v_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomax.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamomaxei8_v_i64m4_m (vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) {
+ return vamomaxei8_v_i64m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei8_v_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomax.mask.nxv8i64.nxv8i8.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei8_v_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomax.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamomaxei8_v_i64m8_m (vbool8_t mask, int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) {
+ return vamomaxei8_v_i64m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei16_v_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomax.mask.nxv1i64.nxv1i16.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei16_v_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomax.mask.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamomaxei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) {
+ return vamomaxei16_v_i64m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei16_v_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomax.mask.nxv2i64.nxv2i16.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei16_v_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomax.mask.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamomaxei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) {
+ return vamomaxei16_v_i64m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei16_v_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomax.mask.nxv4i64.nxv4i16.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei16_v_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomax.mask.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamomaxei16_v_i64m4_m (vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) {
+ return vamomaxei16_v_i64m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei16_v_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomax.mask.nxv8i64.nxv8i16.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei16_v_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomax.mask.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamomaxei16_v_i64m8_m (vbool8_t mask, int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) {
+ return vamomaxei16_v_i64m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei32_v_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomax.mask.nxv1i64.nxv1i32.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei32_v_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomax.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamomaxei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) {
+ return vamomaxei32_v_i64m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei32_v_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomax.mask.nxv2i64.nxv2i32.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei32_v_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomax.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamomaxei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) {
+ return vamomaxei32_v_i64m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei32_v_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomax.mask.nxv4i64.nxv4i32.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei32_v_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomax.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamomaxei32_v_i64m4_m (vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) {
+ return vamomaxei32_v_i64m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei32_v_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomax.mask.nxv8i64.nxv8i32.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei32_v_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomax.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamomaxei32_v_i64m8_m (vbool8_t mask, int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) {
+ return vamomaxei32_v_i64m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei64_v_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomax.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei64_v_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomax.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamomaxei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) {
+ return vamomaxei64_v_i64m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei64_v_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomax.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei64_v_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomax.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamomaxei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) {
+ return vamomaxei64_v_i64m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei64_v_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomax.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei64_v_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomax.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamomaxei64_v_i64m4_m (vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) {
+ return vamomaxei64_v_i64m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxei64_v_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomax.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxei64_v_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomax.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamomaxei64_v_i64m8_m (vbool8_t mask, int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) {
+ return vamomaxei64_v_i64m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei8_v_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i8.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei8_v_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamomaxuei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) {
+ return vamomaxuei8_v_u32mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei8_v_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i8.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei8_v_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamomaxuei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) {
+ return vamomaxuei8_v_u32m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei8_v_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i8.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei8_v_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamomaxuei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) {
+ return vamomaxuei8_v_u32m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei8_v_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i8.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei8_v_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamomaxuei8_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) {
+ return vamomaxuei8_v_u32m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei8_v_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomaxu.mask.nxv16i32.nxv16i8.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei8_v_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomaxu.mask.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t test_vamomaxuei8_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) {
+ return vamomaxuei8_v_u32m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei16_v_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i16.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei16_v_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamomaxuei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) {
+ return vamomaxuei16_v_u32mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei16_v_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i16.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei16_v_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamomaxuei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) {
+ return vamomaxuei16_v_u32m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei16_v_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i16.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei16_v_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamomaxuei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) {
+ return vamomaxuei16_v_u32m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei16_v_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i16.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei16_v_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamomaxuei16_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) {
+ return vamomaxuei16_v_u32m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei16_v_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomaxu.mask.nxv16i32.nxv16i16.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei16_v_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomaxu.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t test_vamomaxuei16_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) {
+ return vamomaxuei16_v_u32m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei32_v_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei32_v_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamomaxuei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) {
+ return vamomaxuei32_v_u32mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei32_v_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei32_v_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamomaxuei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) {
+ return vamomaxuei32_v_u32m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei32_v_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei32_v_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamomaxuei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) {
+ return vamomaxuei32_v_u32m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei32_v_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei32_v_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamomaxuei32_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) {
+ return vamomaxuei32_v_u32m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei32_v_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomaxu.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei32_v_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomaxu.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t test_vamomaxuei32_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) {
+ return vamomaxuei32_v_u32m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei64_v_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i64.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei64_v_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamomaxuei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) {
+ return vamomaxuei64_v_u32mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei64_v_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i64.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei64_v_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamomaxuei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) {
+ return vamomaxuei64_v_u32m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei64_v_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i64.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei64_v_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamomaxuei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) {
+ return vamomaxuei64_v_u32m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei64_v_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i64.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei64_v_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamomaxuei64_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) {
+ return vamomaxuei64_v_u32m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei8_v_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i8.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei8_v_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamomaxuei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) {
+ return vamomaxuei8_v_u64m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei8_v_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i8.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei8_v_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamomaxuei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) {
+ return vamomaxuei8_v_u64m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei8_v_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i8.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei8_v_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamomaxuei8_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) {
+ return vamomaxuei8_v_u64m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei8_v_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i8.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei8_v_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamomaxuei8_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) {
+ return vamomaxuei8_v_u64m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei16_v_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i16.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei16_v_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamomaxuei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) {
+ return vamomaxuei16_v_u64m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei16_v_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i16.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei16_v_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamomaxuei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) {
+ return vamomaxuei16_v_u64m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei16_v_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i16.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei16_v_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamomaxuei16_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) {
+ return vamomaxuei16_v_u64m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei16_v_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i16.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei16_v_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamomaxuei16_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) {
+ return vamomaxuei16_v_u64m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei32_v_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i32.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei32_v_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamomaxuei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) {
+ return vamomaxuei32_v_u64m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei32_v_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i32.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei32_v_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamomaxuei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) {
+ return vamomaxuei32_v_u64m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei32_v_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i32.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei32_v_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamomaxuei32_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) {
+ return vamomaxuei32_v_u64m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei32_v_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i32.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei32_v_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamomaxuei32_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) {
+ return vamomaxuei32_v_u64m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei64_v_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei64_v_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamomaxuei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) {
+ return vamomaxuei64_v_u64m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei64_v_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei64_v_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamomaxuei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) {
+ return vamomaxuei64_v_u64m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei64_v_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei64_v_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamomaxuei64_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) {
+ return vamomaxuei64_v_u64m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamomaxuei64_v_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamomaxuei64_v_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamomaxuei64_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) {
+ return vamomaxuei64_v_u64m8_m(mask, base, bindex, value, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vamomin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vamomin.c
new file mode 100644
index 000000000000..215740e504b9
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vamomin.c
@@ -0,0 +1,2247 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +m -target-feature +experimental-v -target-feature +experimental-zvamo -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -target-feature +experimental-zvamo -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vamominei8_v_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomin.nxv1i32.nxv1i8.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei8_v_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomin.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamominei8_v_i32mf2 (int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) {
+ return vamominei8_v_i32mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei8_v_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomin.nxv2i32.nxv2i8.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei8_v_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomin.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamominei8_v_i32m1 (int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) {
+ return vamominei8_v_i32m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei8_v_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomin.nxv4i32.nxv4i8.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei8_v_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomin.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamominei8_v_i32m2 (int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) {
+ return vamominei8_v_i32m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei8_v_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomin.nxv8i32.nxv8i8.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei8_v_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomin.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamominei8_v_i32m4 (int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) {
+ return vamominei8_v_i32m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei8_v_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomin.nxv16i32.nxv16i8.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei8_v_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomin.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t test_vamominei8_v_i32m8 (int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) {
+ return vamominei8_v_i32m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei16_v_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomin.nxv1i32.nxv1i16.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei16_v_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomin.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamominei16_v_i32mf2 (int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) {
+ return vamominei16_v_i32mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei16_v_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomin.nxv2i32.nxv2i16.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei16_v_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomin.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamominei16_v_i32m1 (int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) {
+ return vamominei16_v_i32m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei16_v_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomin.nxv4i32.nxv4i16.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei16_v_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomin.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamominei16_v_i32m2 (int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) {
+ return vamominei16_v_i32m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei16_v_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomin.nxv8i32.nxv8i16.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei16_v_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomin.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamominei16_v_i32m4 (int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) {
+ return vamominei16_v_i32m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei16_v_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomin.nxv16i32.nxv16i16.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei16_v_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomin.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t test_vamominei16_v_i32m8 (int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) {
+ return vamominei16_v_i32m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei32_v_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomin.nxv1i32.nxv1i32.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei32_v_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomin.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamominei32_v_i32mf2 (int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) {
+ return vamominei32_v_i32mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei32_v_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomin.nxv2i32.nxv2i32.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei32_v_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomin.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamominei32_v_i32m1 (int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) {
+ return vamominei32_v_i32m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei32_v_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomin.nxv4i32.nxv4i32.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei32_v_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomin.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamominei32_v_i32m2 (int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) {
+ return vamominei32_v_i32m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei32_v_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomin.nxv8i32.nxv8i32.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei32_v_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomin.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamominei32_v_i32m4 (int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) {
+ return vamominei32_v_i32m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei32_v_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomin.nxv16i32.nxv16i32.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei32_v_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomin.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t test_vamominei32_v_i32m8 (int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) {
+ return vamominei32_v_i32m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei64_v_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomin.nxv1i32.nxv1i64.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei64_v_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomin.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamominei64_v_i32mf2 (int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) {
+ return vamominei64_v_i32mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei64_v_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomin.nxv2i32.nxv2i64.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei64_v_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomin.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamominei64_v_i32m1 (int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) {
+ return vamominei64_v_i32m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei64_v_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomin.nxv4i32.nxv4i64.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei64_v_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomin.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamominei64_v_i32m2 (int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) {
+ return vamominei64_v_i32m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei64_v_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomin.nxv8i32.nxv8i64.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei64_v_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomin.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamominei64_v_i32m4 (int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) {
+ return vamominei64_v_i32m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei8_v_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomin.nxv1i64.nxv1i8.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei8_v_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomin.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamominei8_v_i64m1 (int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) {
+ return vamominei8_v_i64m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei8_v_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomin.nxv2i64.nxv2i8.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei8_v_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomin.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamominei8_v_i64m2 (int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) {
+ return vamominei8_v_i64m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei8_v_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomin.nxv4i64.nxv4i8.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei8_v_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomin.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamominei8_v_i64m4 (int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) {
+ return vamominei8_v_i64m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei8_v_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomin.nxv8i64.nxv8i8.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei8_v_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomin.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamominei8_v_i64m8 (int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) {
+ return vamominei8_v_i64m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei16_v_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomin.nxv1i64.nxv1i16.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei16_v_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomin.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamominei16_v_i64m1 (int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) {
+ return vamominei16_v_i64m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei16_v_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomin.nxv2i64.nxv2i16.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei16_v_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomin.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamominei16_v_i64m2 (int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) {
+ return vamominei16_v_i64m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei16_v_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomin.nxv4i64.nxv4i16.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei16_v_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomin.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamominei16_v_i64m4 (int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) {
+ return vamominei16_v_i64m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei16_v_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomin.nxv8i64.nxv8i16.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei16_v_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomin.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamominei16_v_i64m8 (int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) {
+ return vamominei16_v_i64m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei32_v_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomin.nxv1i64.nxv1i32.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei32_v_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomin.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamominei32_v_i64m1 (int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) {
+ return vamominei32_v_i64m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei32_v_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomin.nxv2i64.nxv2i32.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei32_v_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomin.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamominei32_v_i64m2 (int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) {
+ return vamominei32_v_i64m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei32_v_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomin.nxv4i64.nxv4i32.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei32_v_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomin.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamominei32_v_i64m4 (int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) {
+ return vamominei32_v_i64m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei32_v_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomin.nxv8i64.nxv8i32.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei32_v_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomin.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamominei32_v_i64m8 (int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) {
+ return vamominei32_v_i64m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei64_v_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomin.nxv1i64.nxv1i64.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei64_v_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomin.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamominei64_v_i64m1 (int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) {
+ return vamominei64_v_i64m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei64_v_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomin.nxv2i64.nxv2i64.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei64_v_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomin.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamominei64_v_i64m2 (int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) {
+ return vamominei64_v_i64m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei64_v_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomin.nxv4i64.nxv4i64.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei64_v_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomin.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamominei64_v_i64m4 (int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) {
+ return vamominei64_v_i64m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei64_v_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomin.nxv8i64.nxv8i64.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei64_v_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomin.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamominei64_v_i64m8 (int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) {
+ return vamominei64_v_i64m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei8_v_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamominu.nxv1i32.nxv1i8.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei8_v_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamominu.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamominuei8_v_u32mf2 (uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) {
+ return vamominuei8_v_u32mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei8_v_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamominu.nxv2i32.nxv2i8.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei8_v_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamominu.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamominuei8_v_u32m1 (uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) {
+ return vamominuei8_v_u32m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei8_v_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamominu.nxv4i32.nxv4i8.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei8_v_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamominu.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamominuei8_v_u32m2 (uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) {
+ return vamominuei8_v_u32m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei8_v_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamominu.nxv8i32.nxv8i8.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei8_v_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamominu.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamominuei8_v_u32m4 (uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) {
+ return vamominuei8_v_u32m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei8_v_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamominu.nxv16i32.nxv16i8.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei8_v_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamominu.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t test_vamominuei8_v_u32m8 (uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) {
+ return vamominuei8_v_u32m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei16_v_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamominu.nxv1i32.nxv1i16.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei16_v_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamominu.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamominuei16_v_u32mf2 (uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) {
+ return vamominuei16_v_u32mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei16_v_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamominu.nxv2i32.nxv2i16.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei16_v_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamominu.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamominuei16_v_u32m1 (uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) {
+ return vamominuei16_v_u32m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei16_v_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamominu.nxv4i32.nxv4i16.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei16_v_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamominu.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamominuei16_v_u32m2 (uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) {
+ return vamominuei16_v_u32m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei16_v_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamominu.nxv8i32.nxv8i16.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei16_v_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamominu.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamominuei16_v_u32m4 (uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) {
+ return vamominuei16_v_u32m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei16_v_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamominu.nxv16i32.nxv16i16.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei16_v_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamominu.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t test_vamominuei16_v_u32m8 (uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) {
+ return vamominuei16_v_u32m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei32_v_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamominu.nxv1i32.nxv1i32.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei32_v_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamominu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamominuei32_v_u32mf2 (uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) {
+ return vamominuei32_v_u32mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei32_v_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamominu.nxv2i32.nxv2i32.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei32_v_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamominu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamominuei32_v_u32m1 (uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) {
+ return vamominuei32_v_u32m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei32_v_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamominu.nxv4i32.nxv4i32.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei32_v_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamominu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamominuei32_v_u32m2 (uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) {
+ return vamominuei32_v_u32m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei32_v_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamominu.nxv8i32.nxv8i32.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei32_v_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamominu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamominuei32_v_u32m4 (uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) {
+ return vamominuei32_v_u32m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei32_v_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamominu.nxv16i32.nxv16i32.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei32_v_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamominu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t test_vamominuei32_v_u32m8 (uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) {
+ return vamominuei32_v_u32m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei64_v_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamominu.nxv1i32.nxv1i64.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei64_v_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamominu.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamominuei64_v_u32mf2 (uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) {
+ return vamominuei64_v_u32mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei64_v_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamominu.nxv2i32.nxv2i64.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei64_v_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamominu.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamominuei64_v_u32m1 (uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) {
+ return vamominuei64_v_u32m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei64_v_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamominu.nxv4i32.nxv4i64.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei64_v_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamominu.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamominuei64_v_u32m2 (uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) {
+ return vamominuei64_v_u32m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei64_v_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamominu.nxv8i32.nxv8i64.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei64_v_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamominu.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamominuei64_v_u32m4 (uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) {
+ return vamominuei64_v_u32m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei8_v_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamominu.nxv1i64.nxv1i8.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei8_v_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamominu.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamominuei8_v_u64m1 (uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) {
+ return vamominuei8_v_u64m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei8_v_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamominu.nxv2i64.nxv2i8.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei8_v_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamominu.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamominuei8_v_u64m2 (uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) {
+ return vamominuei8_v_u64m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei8_v_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamominu.nxv4i64.nxv4i8.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei8_v_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamominu.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamominuei8_v_u64m4 (uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) {
+ return vamominuei8_v_u64m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei8_v_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamominu.nxv8i64.nxv8i8.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei8_v_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamominu.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamominuei8_v_u64m8 (uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) {
+ return vamominuei8_v_u64m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei16_v_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamominu.nxv1i64.nxv1i16.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei16_v_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamominu.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamominuei16_v_u64m1 (uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) {
+ return vamominuei16_v_u64m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei16_v_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamominu.nxv2i64.nxv2i16.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei16_v_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamominu.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamominuei16_v_u64m2 (uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) {
+ return vamominuei16_v_u64m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei16_v_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamominu.nxv4i64.nxv4i16.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei16_v_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamominu.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamominuei16_v_u64m4 (uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) {
+ return vamominuei16_v_u64m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei16_v_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamominu.nxv8i64.nxv8i16.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei16_v_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamominu.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamominuei16_v_u64m8 (uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) {
+ return vamominuei16_v_u64m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei32_v_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamominu.nxv1i64.nxv1i32.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei32_v_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamominu.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamominuei32_v_u64m1 (uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) {
+ return vamominuei32_v_u64m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei32_v_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamominu.nxv2i64.nxv2i32.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei32_v_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamominu.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamominuei32_v_u64m2 (uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) {
+ return vamominuei32_v_u64m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei32_v_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamominu.nxv4i64.nxv4i32.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei32_v_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamominu.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamominuei32_v_u64m4 (uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) {
+ return vamominuei32_v_u64m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei32_v_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamominu.nxv8i64.nxv8i32.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei32_v_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamominu.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamominuei32_v_u64m8 (uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) {
+ return vamominuei32_v_u64m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei64_v_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamominu.nxv1i64.nxv1i64.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei64_v_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamominu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamominuei64_v_u64m1 (uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) {
+ return vamominuei64_v_u64m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei64_v_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamominu.nxv2i64.nxv2i64.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei64_v_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamominu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamominuei64_v_u64m2 (uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) {
+ return vamominuei64_v_u64m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei64_v_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamominu.nxv4i64.nxv4i64.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei64_v_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamominu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamominuei64_v_u64m4 (uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) {
+ return vamominuei64_v_u64m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei64_v_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamominu.nxv8i64.nxv8i64.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei64_v_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamominu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamominuei64_v_u64m8 (uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) {
+ return vamominuei64_v_u64m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei8_v_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomin.mask.nxv1i32.nxv1i8.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei8_v_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomin.mask.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamominei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) {
+ return vamominei8_v_i32mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei8_v_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomin.mask.nxv2i32.nxv2i8.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei8_v_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomin.mask.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamominei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) {
+ return vamominei8_v_i32m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei8_v_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomin.mask.nxv4i32.nxv4i8.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei8_v_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomin.mask.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamominei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) {
+ return vamominei8_v_i32m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei8_v_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomin.mask.nxv8i32.nxv8i8.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei8_v_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomin.mask.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamominei8_v_i32m4_m (vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) {
+ return vamominei8_v_i32m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei8_v_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomin.mask.nxv16i32.nxv16i8.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei8_v_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomin.mask.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t test_vamominei8_v_i32m8_m (vbool4_t mask, int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) {
+ return vamominei8_v_i32m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei16_v_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomin.mask.nxv1i32.nxv1i16.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei16_v_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomin.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamominei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) {
+ return vamominei16_v_i32mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei16_v_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomin.mask.nxv2i32.nxv2i16.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei16_v_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomin.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamominei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) {
+ return vamominei16_v_i32m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei16_v_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomin.mask.nxv4i32.nxv4i16.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei16_v_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomin.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamominei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) {
+ return vamominei16_v_i32m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei16_v_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomin.mask.nxv8i32.nxv8i16.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei16_v_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomin.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamominei16_v_i32m4_m (vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) {
+ return vamominei16_v_i32m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei16_v_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomin.mask.nxv16i32.nxv16i16.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei16_v_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomin.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t test_vamominei16_v_i32m8_m (vbool4_t mask, int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) {
+ return vamominei16_v_i32m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei32_v_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomin.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei32_v_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomin.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamominei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) {
+ return vamominei32_v_i32mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei32_v_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomin.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei32_v_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomin.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamominei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) {
+ return vamominei32_v_i32m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei32_v_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomin.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei32_v_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomin.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamominei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) {
+ return vamominei32_v_i32m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei32_v_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomin.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei32_v_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomin.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamominei32_v_i32m4_m (vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) {
+ return vamominei32_v_i32m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei32_v_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomin.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei32_v_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomin.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t test_vamominei32_v_i32m8_m (vbool4_t mask, int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) {
+ return vamominei32_v_i32m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei64_v_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomin.mask.nxv1i32.nxv1i64.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei64_v_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomin.mask.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamominei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) {
+ return vamominei64_v_i32mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei64_v_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomin.mask.nxv2i32.nxv2i64.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei64_v_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomin.mask.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamominei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) {
+ return vamominei64_v_i32m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei64_v_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomin.mask.nxv4i32.nxv4i64.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei64_v_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomin.mask.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamominei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) {
+ return vamominei64_v_i32m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei64_v_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomin.mask.nxv8i32.nxv8i64.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei64_v_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomin.mask.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamominei64_v_i32m4_m (vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) {
+ return vamominei64_v_i32m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei8_v_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomin.mask.nxv1i64.nxv1i8.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei8_v_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomin.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamominei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) {
+ return vamominei8_v_i64m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei8_v_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomin.mask.nxv2i64.nxv2i8.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei8_v_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomin.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamominei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) {
+ return vamominei8_v_i64m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei8_v_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomin.mask.nxv4i64.nxv4i8.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei8_v_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomin.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamominei8_v_i64m4_m (vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) {
+ return vamominei8_v_i64m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei8_v_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomin.mask.nxv8i64.nxv8i8.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei8_v_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomin.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamominei8_v_i64m8_m (vbool8_t mask, int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) {
+ return vamominei8_v_i64m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei16_v_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomin.mask.nxv1i64.nxv1i16.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei16_v_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomin.mask.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamominei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) {
+ return vamominei16_v_i64m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei16_v_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomin.mask.nxv2i64.nxv2i16.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei16_v_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomin.mask.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamominei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) {
+ return vamominei16_v_i64m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei16_v_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomin.mask.nxv4i64.nxv4i16.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei16_v_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomin.mask.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamominei16_v_i64m4_m (vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) {
+ return vamominei16_v_i64m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei16_v_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomin.mask.nxv8i64.nxv8i16.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei16_v_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomin.mask.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamominei16_v_i64m8_m (vbool8_t mask, int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) {
+ return vamominei16_v_i64m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei32_v_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomin.mask.nxv1i64.nxv1i32.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei32_v_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomin.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamominei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) {
+ return vamominei32_v_i64m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei32_v_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomin.mask.nxv2i64.nxv2i32.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei32_v_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomin.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamominei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) {
+ return vamominei32_v_i64m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei32_v_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomin.mask.nxv4i64.nxv4i32.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei32_v_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomin.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamominei32_v_i64m4_m (vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) {
+ return vamominei32_v_i64m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei32_v_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomin.mask.nxv8i64.nxv8i32.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei32_v_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomin.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamominei32_v_i64m8_m (vbool8_t mask, int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) {
+ return vamominei32_v_i64m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei64_v_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomin.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei64_v_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomin.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamominei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) {
+ return vamominei64_v_i64m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei64_v_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomin.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei64_v_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomin.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamominei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) {
+ return vamominei64_v_i64m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei64_v_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomin.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei64_v_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomin.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamominei64_v_i64m4_m (vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) {
+ return vamominei64_v_i64m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominei64_v_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomin.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominei64_v_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomin.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamominei64_v_i64m8_m (vbool8_t mask, int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) {
+ return vamominei64_v_i64m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei8_v_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamominu.mask.nxv1i32.nxv1i8.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei8_v_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamominu.mask.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamominuei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) {
+ return vamominuei8_v_u32mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei8_v_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamominu.mask.nxv2i32.nxv2i8.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei8_v_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamominu.mask.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamominuei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) {
+ return vamominuei8_v_u32m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei8_v_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamominu.mask.nxv4i32.nxv4i8.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei8_v_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamominu.mask.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamominuei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) {
+ return vamominuei8_v_u32m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei8_v_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamominu.mask.nxv8i32.nxv8i8.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei8_v_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamominu.mask.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamominuei8_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) {
+ return vamominuei8_v_u32m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei8_v_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamominu.mask.nxv16i32.nxv16i8.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei8_v_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamominu.mask.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t test_vamominuei8_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) {
+ return vamominuei8_v_u32m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei16_v_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamominu.mask.nxv1i32.nxv1i16.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei16_v_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamominu.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamominuei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) {
+ return vamominuei16_v_u32mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei16_v_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamominu.mask.nxv2i32.nxv2i16.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei16_v_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamominu.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamominuei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) {
+ return vamominuei16_v_u32m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei16_v_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamominu.mask.nxv4i32.nxv4i16.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei16_v_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamominu.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamominuei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) {
+ return vamominuei16_v_u32m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei16_v_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamominu.mask.nxv8i32.nxv8i16.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei16_v_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamominu.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamominuei16_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) {
+ return vamominuei16_v_u32m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei16_v_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamominu.mask.nxv16i32.nxv16i16.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei16_v_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamominu.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t test_vamominuei16_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) {
+ return vamominuei16_v_u32m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei32_v_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamominu.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei32_v_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamominu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamominuei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) {
+ return vamominuei32_v_u32mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei32_v_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamominu.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei32_v_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamominu.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamominuei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) {
+ return vamominuei32_v_u32m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei32_v_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamominu.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei32_v_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamominu.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamominuei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) {
+ return vamominuei32_v_u32m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei32_v_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamominu.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei32_v_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamominu.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamominuei32_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) {
+ return vamominuei32_v_u32m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei32_v_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamominu.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei32_v_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamominu.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t test_vamominuei32_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) {
+ return vamominuei32_v_u32m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei64_v_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamominu.mask.nxv1i32.nxv1i64.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei64_v_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamominu.mask.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamominuei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) {
+ return vamominuei64_v_u32mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei64_v_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamominu.mask.nxv2i32.nxv2i64.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei64_v_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamominu.mask.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamominuei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) {
+ return vamominuei64_v_u32m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei64_v_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamominu.mask.nxv4i32.nxv4i64.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei64_v_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamominu.mask.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamominuei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) {
+ return vamominuei64_v_u32m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei64_v_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamominu.mask.nxv8i32.nxv8i64.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei64_v_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamominu.mask.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamominuei64_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) {
+ return vamominuei64_v_u32m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei8_v_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamominu.mask.nxv1i64.nxv1i8.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei8_v_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamominu.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamominuei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) {
+ return vamominuei8_v_u64m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei8_v_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamominu.mask.nxv2i64.nxv2i8.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei8_v_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamominu.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamominuei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) {
+ return vamominuei8_v_u64m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei8_v_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamominu.mask.nxv4i64.nxv4i8.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei8_v_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamominu.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamominuei8_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) {
+ return vamominuei8_v_u64m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei8_v_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamominu.mask.nxv8i64.nxv8i8.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei8_v_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamominu.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamominuei8_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) {
+ return vamominuei8_v_u64m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei16_v_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamominu.mask.nxv1i64.nxv1i16.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei16_v_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamominu.mask.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamominuei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) {
+ return vamominuei16_v_u64m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei16_v_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamominu.mask.nxv2i64.nxv2i16.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei16_v_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamominu.mask.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamominuei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) {
+ return vamominuei16_v_u64m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei16_v_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamominu.mask.nxv4i64.nxv4i16.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei16_v_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamominu.mask.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamominuei16_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) {
+ return vamominuei16_v_u64m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei16_v_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamominu.mask.nxv8i64.nxv8i16.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei16_v_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamominu.mask.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamominuei16_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) {
+ return vamominuei16_v_u64m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei32_v_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamominu.mask.nxv1i64.nxv1i32.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei32_v_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamominu.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamominuei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) {
+ return vamominuei32_v_u64m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei32_v_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamominu.mask.nxv2i64.nxv2i32.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei32_v_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamominu.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamominuei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) {
+ return vamominuei32_v_u64m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei32_v_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamominu.mask.nxv4i64.nxv4i32.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei32_v_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamominu.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamominuei32_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) {
+ return vamominuei32_v_u64m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei32_v_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamominu.mask.nxv8i64.nxv8i32.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei32_v_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamominu.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamominuei32_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) {
+ return vamominuei32_v_u64m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei64_v_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamominu.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei64_v_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamominu.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamominuei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) {
+ return vamominuei64_v_u64m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei64_v_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamominu.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei64_v_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamominu.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamominuei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) {
+ return vamominuei64_v_u64m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei64_v_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamominu.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei64_v_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamominu.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamominuei64_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) {
+ return vamominuei64_v_u64m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamominuei64_v_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamominu.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamominuei64_v_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamominu.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamominuei64_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) {
+ return vamominuei64_v_u64m8_m(mask, base, bindex, value, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vamoor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vamoor.c
new file mode 100644
index 000000000000..a08c641a90db
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vamoor.c
@@ -0,0 +1,2247 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +m -target-feature +experimental-v -target-feature +experimental-zvamo -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -target-feature +experimental-zvamo -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vamoorei8_v_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoor.nxv1i32.nxv1i8.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei8_v_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoor.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamoorei8_v_i32mf2 (int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) {
+ return vamoorei8_v_i32mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei8_v_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoor.nxv2i32.nxv2i8.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei8_v_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoor.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamoorei8_v_i32m1 (int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) {
+ return vamoorei8_v_i32m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei8_v_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoor.nxv4i32.nxv4i8.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei8_v_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoor.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamoorei8_v_i32m2 (int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) {
+ return vamoorei8_v_i32m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei8_v_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoor.nxv8i32.nxv8i8.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei8_v_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoor.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamoorei8_v_i32m4 (int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) {
+ return vamoorei8_v_i32m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei8_v_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoor.nxv16i32.nxv16i8.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei8_v_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoor.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t test_vamoorei8_v_i32m8 (int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) {
+ return vamoorei8_v_i32m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei16_v_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoor.nxv1i32.nxv1i16.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei16_v_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoor.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamoorei16_v_i32mf2 (int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) {
+ return vamoorei16_v_i32mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei16_v_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoor.nxv2i32.nxv2i16.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei16_v_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoor.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamoorei16_v_i32m1 (int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) {
+ return vamoorei16_v_i32m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei16_v_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoor.nxv4i32.nxv4i16.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei16_v_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoor.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamoorei16_v_i32m2 (int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) {
+ return vamoorei16_v_i32m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei16_v_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoor.nxv8i32.nxv8i16.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei16_v_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoor.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamoorei16_v_i32m4 (int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) {
+ return vamoorei16_v_i32m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei16_v_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoor.nxv16i32.nxv16i16.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei16_v_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoor.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t test_vamoorei16_v_i32m8 (int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) {
+ return vamoorei16_v_i32m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei32_v_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoor.nxv1i32.nxv1i32.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei32_v_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoor.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamoorei32_v_i32mf2 (int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) {
+ return vamoorei32_v_i32mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei32_v_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoor.nxv2i32.nxv2i32.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei32_v_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamoorei32_v_i32m1 (int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) {
+ return vamoorei32_v_i32m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei32_v_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoor.nxv4i32.nxv4i32.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei32_v_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoor.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamoorei32_v_i32m2 (int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) {
+ return vamoorei32_v_i32m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei32_v_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoor.nxv8i32.nxv8i32.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei32_v_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoor.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamoorei32_v_i32m4 (int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) {
+ return vamoorei32_v_i32m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei32_v_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoor.nxv16i32.nxv16i32.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei32_v_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoor.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t test_vamoorei32_v_i32m8 (int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) {
+ return vamoorei32_v_i32m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei64_v_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoor.nxv1i32.nxv1i64.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei64_v_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoor.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamoorei64_v_i32mf2 (int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) {
+ return vamoorei64_v_i32mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei64_v_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoor.nxv2i32.nxv2i64.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei64_v_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoor.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamoorei64_v_i32m1 (int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) {
+ return vamoorei64_v_i32m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei64_v_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoor.nxv4i32.nxv4i64.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei64_v_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoor.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamoorei64_v_i32m2 (int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) {
+ return vamoorei64_v_i32m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei64_v_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoor.nxv8i32.nxv8i64.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei64_v_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoor.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamoorei64_v_i32m4 (int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) {
+ return vamoorei64_v_i32m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei8_v_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoor.nxv1i64.nxv1i8.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei8_v_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoor.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamoorei8_v_i64m1 (int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) {
+ return vamoorei8_v_i64m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei8_v_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoor.nxv2i64.nxv2i8.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei8_v_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoor.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamoorei8_v_i64m2 (int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) {
+ return vamoorei8_v_i64m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei8_v_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoor.nxv4i64.nxv4i8.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei8_v_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoor.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamoorei8_v_i64m4 (int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) {
+ return vamoorei8_v_i64m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei8_v_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoor.nxv8i64.nxv8i8.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei8_v_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoor.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamoorei8_v_i64m8 (int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) {
+ return vamoorei8_v_i64m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei16_v_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoor.nxv1i64.nxv1i16.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei16_v_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoor.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamoorei16_v_i64m1 (int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) {
+ return vamoorei16_v_i64m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei16_v_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoor.nxv2i64.nxv2i16.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei16_v_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoor.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamoorei16_v_i64m2 (int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) {
+ return vamoorei16_v_i64m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei16_v_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoor.nxv4i64.nxv4i16.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei16_v_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoor.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamoorei16_v_i64m4 (int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) {
+ return vamoorei16_v_i64m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei16_v_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoor.nxv8i64.nxv8i16.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei16_v_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoor.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamoorei16_v_i64m8 (int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) {
+ return vamoorei16_v_i64m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei32_v_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoor.nxv1i64.nxv1i32.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei32_v_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoor.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamoorei32_v_i64m1 (int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) {
+ return vamoorei32_v_i64m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei32_v_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoor.nxv2i64.nxv2i32.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei32_v_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoor.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamoorei32_v_i64m2 (int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) {
+ return vamoorei32_v_i64m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei32_v_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoor.nxv4i64.nxv4i32.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei32_v_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoor.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamoorei32_v_i64m4 (int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) {
+ return vamoorei32_v_i64m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei32_v_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoor.nxv8i64.nxv8i32.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei32_v_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoor.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamoorei32_v_i64m8 (int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) {
+ return vamoorei32_v_i64m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei64_v_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoor.nxv1i64.nxv1i64.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei64_v_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamoorei64_v_i64m1 (int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) {
+ return vamoorei64_v_i64m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei64_v_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoor.nxv2i64.nxv2i64.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei64_v_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoor.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamoorei64_v_i64m2 (int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) {
+ return vamoorei64_v_i64m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei64_v_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoor.nxv4i64.nxv4i64.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei64_v_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoor.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamoorei64_v_i64m4 (int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) {
+ return vamoorei64_v_i64m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei64_v_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoor.nxv8i64.nxv8i64.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei64_v_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoor.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamoorei64_v_i64m8 (int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) {
+ return vamoorei64_v_i64m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei8_v_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoor.nxv1i32.nxv1i8.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei8_v_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoor.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamoorei8_v_u32mf2 (uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) {
+ return vamoorei8_v_u32mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei8_v_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoor.nxv2i32.nxv2i8.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei8_v_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoor.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamoorei8_v_u32m1 (uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) {
+ return vamoorei8_v_u32m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei8_v_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoor.nxv4i32.nxv4i8.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei8_v_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoor.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamoorei8_v_u32m2 (uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) {
+ return vamoorei8_v_u32m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei8_v_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoor.nxv8i32.nxv8i8.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei8_v_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoor.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamoorei8_v_u32m4 (uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) {
+ return vamoorei8_v_u32m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei8_v_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoor.nxv16i32.nxv16i8.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei8_v_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoor.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t test_vamoorei8_v_u32m8 (uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) {
+ return vamoorei8_v_u32m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei16_v_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoor.nxv1i32.nxv1i16.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei16_v_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoor.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamoorei16_v_u32mf2 (uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) {
+ return vamoorei16_v_u32mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei16_v_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoor.nxv2i32.nxv2i16.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei16_v_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoor.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamoorei16_v_u32m1 (uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) {
+ return vamoorei16_v_u32m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei16_v_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoor.nxv4i32.nxv4i16.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei16_v_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoor.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamoorei16_v_u32m2 (uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) {
+ return vamoorei16_v_u32m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei16_v_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoor.nxv8i32.nxv8i16.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei16_v_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoor.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamoorei16_v_u32m4 (uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) {
+ return vamoorei16_v_u32m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei16_v_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoor.nxv16i32.nxv16i16.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei16_v_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoor.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t test_vamoorei16_v_u32m8 (uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) {
+ return vamoorei16_v_u32m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei32_v_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoor.nxv1i32.nxv1i32.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei32_v_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoor.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamoorei32_v_u32mf2 (uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) {
+ return vamoorei32_v_u32mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei32_v_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoor.nxv2i32.nxv2i32.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei32_v_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamoorei32_v_u32m1 (uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) {
+ return vamoorei32_v_u32m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei32_v_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoor.nxv4i32.nxv4i32.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei32_v_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoor.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamoorei32_v_u32m2 (uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) {
+ return vamoorei32_v_u32m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei32_v_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoor.nxv8i32.nxv8i32.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei32_v_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoor.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamoorei32_v_u32m4 (uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) {
+ return vamoorei32_v_u32m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei32_v_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoor.nxv16i32.nxv16i32.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei32_v_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoor.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t test_vamoorei32_v_u32m8 (uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) {
+ return vamoorei32_v_u32m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei64_v_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoor.nxv1i32.nxv1i64.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei64_v_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoor.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamoorei64_v_u32mf2 (uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) {
+ return vamoorei64_v_u32mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei64_v_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoor.nxv2i32.nxv2i64.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei64_v_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoor.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamoorei64_v_u32m1 (uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) {
+ return vamoorei64_v_u32m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei64_v_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoor.nxv4i32.nxv4i64.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei64_v_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoor.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamoorei64_v_u32m2 (uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) {
+ return vamoorei64_v_u32m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei64_v_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoor.nxv8i32.nxv8i64.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei64_v_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoor.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamoorei64_v_u32m4 (uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) {
+ return vamoorei64_v_u32m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei8_v_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoor.nxv1i64.nxv1i8.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei8_v_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoor.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamoorei8_v_u64m1 (uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) {
+ return vamoorei8_v_u64m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei8_v_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoor.nxv2i64.nxv2i8.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei8_v_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoor.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamoorei8_v_u64m2 (uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) {
+ return vamoorei8_v_u64m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei8_v_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoor.nxv4i64.nxv4i8.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei8_v_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoor.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamoorei8_v_u64m4 (uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) {
+ return vamoorei8_v_u64m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei8_v_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoor.nxv8i64.nxv8i8.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei8_v_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoor.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamoorei8_v_u64m8 (uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) {
+ return vamoorei8_v_u64m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei16_v_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoor.nxv1i64.nxv1i16.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei16_v_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoor.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamoorei16_v_u64m1 (uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) {
+ return vamoorei16_v_u64m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei16_v_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoor.nxv2i64.nxv2i16.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei16_v_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoor.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamoorei16_v_u64m2 (uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) {
+ return vamoorei16_v_u64m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei16_v_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoor.nxv4i64.nxv4i16.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei16_v_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoor.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamoorei16_v_u64m4 (uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) {
+ return vamoorei16_v_u64m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei16_v_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoor.nxv8i64.nxv8i16.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei16_v_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoor.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamoorei16_v_u64m8 (uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) {
+ return vamoorei16_v_u64m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei32_v_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoor.nxv1i64.nxv1i32.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei32_v_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoor.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamoorei32_v_u64m1 (uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) {
+ return vamoorei32_v_u64m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei32_v_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoor.nxv2i64.nxv2i32.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei32_v_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoor.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamoorei32_v_u64m2 (uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) {
+ return vamoorei32_v_u64m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei32_v_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoor.nxv4i64.nxv4i32.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei32_v_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoor.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamoorei32_v_u64m4 (uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) {
+ return vamoorei32_v_u64m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei32_v_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoor.nxv8i64.nxv8i32.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei32_v_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoor.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamoorei32_v_u64m8 (uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) {
+ return vamoorei32_v_u64m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei64_v_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoor.nxv1i64.nxv1i64.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei64_v_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamoorei64_v_u64m1 (uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) {
+ return vamoorei64_v_u64m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei64_v_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoor.nxv2i64.nxv2i64.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei64_v_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoor.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamoorei64_v_u64m2 (uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) {
+ return vamoorei64_v_u64m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei64_v_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoor.nxv4i64.nxv4i64.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei64_v_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoor.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamoorei64_v_u64m4 (uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) {
+ return vamoorei64_v_u64m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei64_v_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoor.nxv8i64.nxv8i64.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei64_v_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoor.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamoorei64_v_u64m8 (uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) {
+ return vamoorei64_v_u64m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei8_v_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoor.mask.nxv1i32.nxv1i8.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei8_v_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoor.mask.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamoorei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) {
+ return vamoorei8_v_i32mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei8_v_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoor.mask.nxv2i32.nxv2i8.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei8_v_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoor.mask.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamoorei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) {
+ return vamoorei8_v_i32m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei8_v_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoor.mask.nxv4i32.nxv4i8.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei8_v_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoor.mask.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamoorei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) {
+ return vamoorei8_v_i32m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei8_v_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoor.mask.nxv8i32.nxv8i8.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei8_v_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoor.mask.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamoorei8_v_i32m4_m (vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) {
+ return vamoorei8_v_i32m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei8_v_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoor.mask.nxv16i32.nxv16i8.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei8_v_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoor.mask.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t test_vamoorei8_v_i32m8_m (vbool4_t mask, int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) {
+ return vamoorei8_v_i32m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei16_v_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoor.mask.nxv1i32.nxv1i16.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei16_v_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoor.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamoorei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) {
+ return vamoorei16_v_i32mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei16_v_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoor.mask.nxv2i32.nxv2i16.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei16_v_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoor.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamoorei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) {
+ return vamoorei16_v_i32m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei16_v_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoor.mask.nxv4i32.nxv4i16.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei16_v_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoor.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamoorei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) {
+ return vamoorei16_v_i32m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei16_v_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoor.mask.nxv8i32.nxv8i16.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei16_v_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoor.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamoorei16_v_i32m4_m (vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) {
+ return vamoorei16_v_i32m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei16_v_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoor.mask.nxv16i32.nxv16i16.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei16_v_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoor.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t test_vamoorei16_v_i32m8_m (vbool4_t mask, int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) {
+ return vamoorei16_v_i32m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei32_v_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoor.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei32_v_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoor.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamoorei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) {
+ return vamoorei32_v_i32mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei32_v_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoor.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei32_v_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoor.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamoorei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) {
+ return vamoorei32_v_i32m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei32_v_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoor.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei32_v_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoor.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamoorei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) {
+ return vamoorei32_v_i32m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei32_v_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoor.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei32_v_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoor.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamoorei32_v_i32m4_m (vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) {
+ return vamoorei32_v_i32m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei32_v_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoor.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei32_v_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoor.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t test_vamoorei32_v_i32m8_m (vbool4_t mask, int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) {
+ return vamoorei32_v_i32m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei64_v_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoor.mask.nxv1i32.nxv1i64.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei64_v_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoor.mask.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamoorei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) {
+ return vamoorei64_v_i32mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei64_v_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoor.mask.nxv2i32.nxv2i64.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei64_v_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoor.mask.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamoorei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) {
+ return vamoorei64_v_i32m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei64_v_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoor.mask.nxv4i32.nxv4i64.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei64_v_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoor.mask.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamoorei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) {
+ return vamoorei64_v_i32m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei64_v_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoor.mask.nxv8i32.nxv8i64.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei64_v_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoor.mask.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamoorei64_v_i32m4_m (vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) {
+ return vamoorei64_v_i32m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei8_v_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoor.mask.nxv1i64.nxv1i8.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei8_v_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoor.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamoorei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) {
+ return vamoorei8_v_i64m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei8_v_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoor.mask.nxv2i64.nxv2i8.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei8_v_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoor.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamoorei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) {
+ return vamoorei8_v_i64m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei8_v_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoor.mask.nxv4i64.nxv4i8.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei8_v_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoor.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamoorei8_v_i64m4_m (vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) {
+ return vamoorei8_v_i64m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei8_v_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoor.mask.nxv8i64.nxv8i8.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei8_v_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoor.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamoorei8_v_i64m8_m (vbool8_t mask, int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) {
+ return vamoorei8_v_i64m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei16_v_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoor.mask.nxv1i64.nxv1i16.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei16_v_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoor.mask.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamoorei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) {
+ return vamoorei16_v_i64m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei16_v_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoor.mask.nxv2i64.nxv2i16.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei16_v_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoor.mask.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamoorei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) {
+ return vamoorei16_v_i64m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei16_v_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoor.mask.nxv4i64.nxv4i16.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei16_v_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoor.mask.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamoorei16_v_i64m4_m (vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) {
+ return vamoorei16_v_i64m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei16_v_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoor.mask.nxv8i64.nxv8i16.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei16_v_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoor.mask.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamoorei16_v_i64m8_m (vbool8_t mask, int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) {
+ return vamoorei16_v_i64m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei32_v_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoor.mask.nxv1i64.nxv1i32.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei32_v_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoor.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamoorei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) {
+ return vamoorei32_v_i64m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei32_v_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoor.mask.nxv2i64.nxv2i32.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei32_v_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoor.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamoorei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) {
+ return vamoorei32_v_i64m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei32_v_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoor.mask.nxv4i64.nxv4i32.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei32_v_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoor.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamoorei32_v_i64m4_m (vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) {
+ return vamoorei32_v_i64m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei32_v_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoor.mask.nxv8i64.nxv8i32.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei32_v_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoor.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamoorei32_v_i64m8_m (vbool8_t mask, int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) {
+ return vamoorei32_v_i64m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei64_v_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoor.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei64_v_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoor.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamoorei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) {
+ return vamoorei64_v_i64m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei64_v_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoor.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei64_v_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoor.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamoorei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) {
+ return vamoorei64_v_i64m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei64_v_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoor.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei64_v_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoor.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamoorei64_v_i64m4_m (vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) {
+ return vamoorei64_v_i64m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei64_v_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoor.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei64_v_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoor.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamoorei64_v_i64m8_m (vbool8_t mask, int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) {
+ return vamoorei64_v_i64m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei8_v_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoor.mask.nxv1i32.nxv1i8.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei8_v_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoor.mask.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamoorei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) {
+ return vamoorei8_v_u32mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei8_v_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoor.mask.nxv2i32.nxv2i8.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei8_v_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoor.mask.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamoorei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) {
+ return vamoorei8_v_u32m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei8_v_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoor.mask.nxv4i32.nxv4i8.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei8_v_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoor.mask.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamoorei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) {
+ return vamoorei8_v_u32m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei8_v_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoor.mask.nxv8i32.nxv8i8.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei8_v_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoor.mask.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamoorei8_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) {
+ return vamoorei8_v_u32m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei8_v_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoor.mask.nxv16i32.nxv16i8.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei8_v_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoor.mask.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t test_vamoorei8_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) {
+ return vamoorei8_v_u32m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei16_v_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoor.mask.nxv1i32.nxv1i16.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei16_v_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoor.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamoorei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) {
+ return vamoorei16_v_u32mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei16_v_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoor.mask.nxv2i32.nxv2i16.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei16_v_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoor.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamoorei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) {
+ return vamoorei16_v_u32m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei16_v_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoor.mask.nxv4i32.nxv4i16.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei16_v_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoor.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamoorei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) {
+ return vamoorei16_v_u32m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei16_v_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoor.mask.nxv8i32.nxv8i16.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei16_v_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoor.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamoorei16_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) {
+ return vamoorei16_v_u32m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei16_v_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoor.mask.nxv16i32.nxv16i16.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei16_v_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoor.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t test_vamoorei16_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) {
+ return vamoorei16_v_u32m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei32_v_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoor.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei32_v_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoor.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamoorei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) {
+ return vamoorei32_v_u32mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei32_v_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoor.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei32_v_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoor.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamoorei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) {
+ return vamoorei32_v_u32m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei32_v_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoor.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei32_v_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoor.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamoorei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) {
+ return vamoorei32_v_u32m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei32_v_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoor.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei32_v_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoor.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamoorei32_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) {
+ return vamoorei32_v_u32m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei32_v_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoor.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei32_v_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoor.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t test_vamoorei32_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) {
+ return vamoorei32_v_u32m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei64_v_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoor.mask.nxv1i32.nxv1i64.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei64_v_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoor.mask.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamoorei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) {
+ return vamoorei64_v_u32mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei64_v_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoor.mask.nxv2i32.nxv2i64.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei64_v_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoor.mask.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamoorei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) {
+ return vamoorei64_v_u32m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei64_v_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoor.mask.nxv4i32.nxv4i64.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei64_v_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoor.mask.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamoorei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) {
+ return vamoorei64_v_u32m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei64_v_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoor.mask.nxv8i32.nxv8i64.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei64_v_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoor.mask.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamoorei64_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) {
+ return vamoorei64_v_u32m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei8_v_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoor.mask.nxv1i64.nxv1i8.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei8_v_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoor.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamoorei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) {
+ return vamoorei8_v_u64m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei8_v_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoor.mask.nxv2i64.nxv2i8.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei8_v_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoor.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamoorei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) {
+ return vamoorei8_v_u64m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei8_v_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoor.mask.nxv4i64.nxv4i8.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei8_v_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoor.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamoorei8_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) {
+ return vamoorei8_v_u64m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei8_v_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoor.mask.nxv8i64.nxv8i8.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei8_v_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoor.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamoorei8_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) {
+ return vamoorei8_v_u64m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei16_v_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoor.mask.nxv1i64.nxv1i16.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei16_v_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoor.mask.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamoorei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) {
+ return vamoorei16_v_u64m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei16_v_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoor.mask.nxv2i64.nxv2i16.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei16_v_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoor.mask.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamoorei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) {
+ return vamoorei16_v_u64m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei16_v_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoor.mask.nxv4i64.nxv4i16.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei16_v_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoor.mask.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamoorei16_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) {
+ return vamoorei16_v_u64m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei16_v_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoor.mask.nxv8i64.nxv8i16.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei16_v_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoor.mask.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamoorei16_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) {
+ return vamoorei16_v_u64m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei32_v_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoor.mask.nxv1i64.nxv1i32.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei32_v_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoor.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamoorei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) {
+ return vamoorei32_v_u64m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei32_v_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoor.mask.nxv2i64.nxv2i32.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei32_v_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoor.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamoorei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) {
+ return vamoorei32_v_u64m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei32_v_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoor.mask.nxv4i64.nxv4i32.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei32_v_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoor.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamoorei32_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) {
+ return vamoorei32_v_u64m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei32_v_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoor.mask.nxv8i64.nxv8i32.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei32_v_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoor.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamoorei32_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) {
+ return vamoorei32_v_u64m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei64_v_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoor.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei64_v_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoor.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamoorei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) {
+ return vamoorei64_v_u64m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei64_v_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoor.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei64_v_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoor.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamoorei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) {
+ return vamoorei64_v_u64m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei64_v_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoor.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei64_v_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoor.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamoorei64_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) {
+ return vamoorei64_v_u64m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoorei64_v_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoor.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoorei64_v_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoor.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamoorei64_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) {
+ return vamoorei64_v_u64m8_m(mask, base, bindex, value, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vamoswap.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vamoswap.c
new file mode 100644
index 000000000000..a114988bebfd
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vamoswap.c
@@ -0,0 +1,3369 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -target-feature +experimental-zfh -target-feature +experimental-zvamo -disable-O0-optnone -fallow-half-arguments-and-returns -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -target-feature +experimental-zfh -target-feature +experimental-zvamo -disable-O0-optnone -fallow-half-arguments-and-returns -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vamoswapei8_v_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.nxv1i32.nxv1i8.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei8_v_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamoswapei8_v_i32mf2 (int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) {
+ return vamoswapei8_v_i32mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei8_v_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.nxv2i32.nxv2i8.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei8_v_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamoswapei8_v_i32m1 (int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) {
+ return vamoswapei8_v_i32m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei8_v_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.nxv4i32.nxv4i8.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei8_v_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamoswapei8_v_i32m2 (int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) {
+ return vamoswapei8_v_i32m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei8_v_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.nxv8i32.nxv8i8.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei8_v_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamoswapei8_v_i32m4 (int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) {
+ return vamoswapei8_v_i32m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei8_v_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoswap.nxv16i32.nxv16i8.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei8_v_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoswap.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t test_vamoswapei8_v_i32m8 (int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) {
+ return vamoswapei8_v_i32m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei16_v_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.nxv1i32.nxv1i16.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei16_v_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamoswapei16_v_i32mf2 (int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) {
+ return vamoswapei16_v_i32mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei16_v_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.nxv2i32.nxv2i16.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei16_v_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamoswapei16_v_i32m1 (int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) {
+ return vamoswapei16_v_i32m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei16_v_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.nxv4i32.nxv4i16.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei16_v_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamoswapei16_v_i32m2 (int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) {
+ return vamoswapei16_v_i32m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei16_v_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.nxv8i32.nxv8i16.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei16_v_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamoswapei16_v_i32m4 (int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) {
+ return vamoswapei16_v_i32m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei16_v_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoswap.nxv16i32.nxv16i16.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei16_v_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoswap.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t test_vamoswapei16_v_i32m8 (int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) {
+ return vamoswapei16_v_i32m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei32_v_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.nxv1i32.nxv1i32.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei32_v_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamoswapei32_v_i32mf2 (int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) {
+ return vamoswapei32_v_i32mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei32_v_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.nxv2i32.nxv2i32.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei32_v_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamoswapei32_v_i32m1 (int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) {
+ return vamoswapei32_v_i32m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei32_v_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.nxv4i32.nxv4i32.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei32_v_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamoswapei32_v_i32m2 (int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) {
+ return vamoswapei32_v_i32m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei32_v_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.nxv8i32.nxv8i32.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei32_v_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamoswapei32_v_i32m4 (int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) {
+ return vamoswapei32_v_i32m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei32_v_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoswap.nxv16i32.nxv16i32.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei32_v_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoswap.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t test_vamoswapei32_v_i32m8 (int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) {
+ return vamoswapei32_v_i32m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei64_v_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.nxv1i32.nxv1i64.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei64_v_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamoswapei64_v_i32mf2 (int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) {
+ return vamoswapei64_v_i32mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei64_v_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.nxv2i32.nxv2i64.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei64_v_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamoswapei64_v_i32m1 (int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) {
+ return vamoswapei64_v_i32m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei64_v_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.nxv4i32.nxv4i64.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei64_v_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamoswapei64_v_i32m2 (int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) {
+ return vamoswapei64_v_i32m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei64_v_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.nxv8i32.nxv8i64.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei64_v_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamoswapei64_v_i32m4 (int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) {
+ return vamoswapei64_v_i32m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei8_v_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.nxv1i64.nxv1i8.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei8_v_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamoswapei8_v_i64m1 (int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) {
+ return vamoswapei8_v_i64m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei8_v_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.nxv2i64.nxv2i8.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei8_v_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamoswapei8_v_i64m2 (int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) {
+ return vamoswapei8_v_i64m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei8_v_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.nxv4i64.nxv4i8.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei8_v_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamoswapei8_v_i64m4 (int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) {
+ return vamoswapei8_v_i64m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei8_v_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.nxv8i64.nxv8i8.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei8_v_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamoswapei8_v_i64m8 (int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) {
+ return vamoswapei8_v_i64m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei16_v_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.nxv1i64.nxv1i16.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei16_v_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamoswapei16_v_i64m1 (int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) {
+ return vamoswapei16_v_i64m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei16_v_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.nxv2i64.nxv2i16.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei16_v_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamoswapei16_v_i64m2 (int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) {
+ return vamoswapei16_v_i64m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei16_v_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.nxv4i64.nxv4i16.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei16_v_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamoswapei16_v_i64m4 (int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) {
+ return vamoswapei16_v_i64m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei16_v_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.nxv8i64.nxv8i16.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei16_v_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamoswapei16_v_i64m8 (int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) {
+ return vamoswapei16_v_i64m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei32_v_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.nxv1i64.nxv1i32.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei32_v_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamoswapei32_v_i64m1 (int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) {
+ return vamoswapei32_v_i64m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei32_v_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.nxv2i64.nxv2i32.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei32_v_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamoswapei32_v_i64m2 (int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) {
+ return vamoswapei32_v_i64m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei32_v_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.nxv4i64.nxv4i32.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei32_v_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamoswapei32_v_i64m4 (int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) {
+ return vamoswapei32_v_i64m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei32_v_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.nxv8i64.nxv8i32.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei32_v_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamoswapei32_v_i64m8 (int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) {
+ return vamoswapei32_v_i64m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei64_v_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.nxv1i64.nxv1i64.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei64_v_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamoswapei64_v_i64m1 (int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) {
+ return vamoswapei64_v_i64m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei64_v_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.nxv2i64.nxv2i64.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei64_v_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamoswapei64_v_i64m2 (int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) {
+ return vamoswapei64_v_i64m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei64_v_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.nxv4i64.nxv4i64.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei64_v_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamoswapei64_v_i64m4 (int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) {
+ return vamoswapei64_v_i64m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei64_v_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.nxv8i64.nxv8i64.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei64_v_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamoswapei64_v_i64m8 (int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) {
+ return vamoswapei64_v_i64m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei8_v_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.nxv1i32.nxv1i8.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei8_v_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamoswapei8_v_u32mf2 (uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) {
+ return vamoswapei8_v_u32mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei8_v_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.nxv2i32.nxv2i8.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei8_v_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamoswapei8_v_u32m1 (uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) {
+ return vamoswapei8_v_u32m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei8_v_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.nxv4i32.nxv4i8.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei8_v_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamoswapei8_v_u32m2 (uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) {
+ return vamoswapei8_v_u32m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei8_v_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.nxv8i32.nxv8i8.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei8_v_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamoswapei8_v_u32m4 (uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) {
+ return vamoswapei8_v_u32m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei8_v_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoswap.nxv16i32.nxv16i8.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei8_v_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoswap.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t test_vamoswapei8_v_u32m8 (uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) {
+ return vamoswapei8_v_u32m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei16_v_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.nxv1i32.nxv1i16.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei16_v_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamoswapei16_v_u32mf2 (uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) {
+ return vamoswapei16_v_u32mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei16_v_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.nxv2i32.nxv2i16.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei16_v_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamoswapei16_v_u32m1 (uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) {
+ return vamoswapei16_v_u32m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei16_v_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.nxv4i32.nxv4i16.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei16_v_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamoswapei16_v_u32m2 (uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) {
+ return vamoswapei16_v_u32m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei16_v_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.nxv8i32.nxv8i16.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei16_v_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamoswapei16_v_u32m4 (uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) {
+ return vamoswapei16_v_u32m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei16_v_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoswap.nxv16i32.nxv16i16.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei16_v_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoswap.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t test_vamoswapei16_v_u32m8 (uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) {
+ return vamoswapei16_v_u32m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei32_v_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.nxv1i32.nxv1i32.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei32_v_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamoswapei32_v_u32mf2 (uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) {
+ return vamoswapei32_v_u32mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei32_v_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.nxv2i32.nxv2i32.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei32_v_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamoswapei32_v_u32m1 (uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) {
+ return vamoswapei32_v_u32m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei32_v_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.nxv4i32.nxv4i32.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei32_v_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamoswapei32_v_u32m2 (uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) {
+ return vamoswapei32_v_u32m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei32_v_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.nxv8i32.nxv8i32.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei32_v_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamoswapei32_v_u32m4 (uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) {
+ return vamoswapei32_v_u32m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei32_v_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoswap.nxv16i32.nxv16i32.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei32_v_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoswap.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t test_vamoswapei32_v_u32m8 (uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) {
+ return vamoswapei32_v_u32m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei64_v_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.nxv1i32.nxv1i64.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei64_v_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamoswapei64_v_u32mf2 (uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) {
+ return vamoswapei64_v_u32mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei64_v_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.nxv2i32.nxv2i64.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei64_v_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamoswapei64_v_u32m1 (uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) {
+ return vamoswapei64_v_u32m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei64_v_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.nxv4i32.nxv4i64.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei64_v_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamoswapei64_v_u32m2 (uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) {
+ return vamoswapei64_v_u32m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei64_v_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.nxv8i32.nxv8i64.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei64_v_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamoswapei64_v_u32m4 (uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) {
+ return vamoswapei64_v_u32m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei8_v_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.nxv1i64.nxv1i8.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei8_v_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamoswapei8_v_u64m1 (uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) {
+ return vamoswapei8_v_u64m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei8_v_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.nxv2i64.nxv2i8.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei8_v_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamoswapei8_v_u64m2 (uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) {
+ return vamoswapei8_v_u64m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei8_v_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.nxv4i64.nxv4i8.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei8_v_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamoswapei8_v_u64m4 (uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) {
+ return vamoswapei8_v_u64m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei8_v_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.nxv8i64.nxv8i8.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei8_v_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamoswapei8_v_u64m8 (uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) {
+ return vamoswapei8_v_u64m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei16_v_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.nxv1i64.nxv1i16.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei16_v_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamoswapei16_v_u64m1 (uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) {
+ return vamoswapei16_v_u64m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei16_v_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.nxv2i64.nxv2i16.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei16_v_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamoswapei16_v_u64m2 (uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) {
+ return vamoswapei16_v_u64m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei16_v_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.nxv4i64.nxv4i16.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei16_v_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamoswapei16_v_u64m4 (uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) {
+ return vamoswapei16_v_u64m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei16_v_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.nxv8i64.nxv8i16.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei16_v_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamoswapei16_v_u64m8 (uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) {
+ return vamoswapei16_v_u64m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei32_v_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.nxv1i64.nxv1i32.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei32_v_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamoswapei32_v_u64m1 (uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) {
+ return vamoswapei32_v_u64m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei32_v_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.nxv2i64.nxv2i32.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei32_v_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamoswapei32_v_u64m2 (uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) {
+ return vamoswapei32_v_u64m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei32_v_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.nxv4i64.nxv4i32.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei32_v_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamoswapei32_v_u64m4 (uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) {
+ return vamoswapei32_v_u64m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei32_v_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.nxv8i64.nxv8i32.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei32_v_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamoswapei32_v_u64m8 (uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) {
+ return vamoswapei32_v_u64m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei64_v_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.nxv1i64.nxv1i64.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei64_v_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamoswapei64_v_u64m1 (uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) {
+ return vamoswapei64_v_u64m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei64_v_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.nxv2i64.nxv2i64.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei64_v_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamoswapei64_v_u64m2 (uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) {
+ return vamoswapei64_v_u64m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei64_v_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.nxv4i64.nxv4i64.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei64_v_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamoswapei64_v_u64m4 (uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) {
+ return vamoswapei64_v_u64m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei64_v_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.nxv8i64.nxv8i64.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei64_v_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamoswapei64_v_u64m8 (uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) {
+ return vamoswapei64_v_u64m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei8_v_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vamoswap.nxv1f32.nxv1i8.i32(<vscale x 1 x float>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x float> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei8_v_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vamoswap.nxv1f32.nxv1i8.i64(<vscale x 1 x float>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x float> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP1]]
+//
+vfloat32mf2_t test_vamoswapei8_v_f32mf2 (float *base, vuint8mf8_t bindex, vfloat32mf2_t value, size_t vl) {
+ return vamoswapei8_v_f32mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei8_v_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vamoswap.nxv2f32.nxv2i8.i32(<vscale x 2 x float>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x float> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei8_v_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vamoswap.nxv2f32.nxv2i8.i64(<vscale x 2 x float>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x float> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP1]]
+//
+vfloat32m1_t test_vamoswapei8_v_f32m1 (float *base, vuint8mf4_t bindex, vfloat32m1_t value, size_t vl) {
+ return vamoswapei8_v_f32m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei8_v_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vamoswap.nxv4f32.nxv4i8.i32(<vscale x 4 x float>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x float> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei8_v_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vamoswap.nxv4f32.nxv4i8.i64(<vscale x 4 x float>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x float> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP1]]
+//
+vfloat32m2_t test_vamoswapei8_v_f32m2 (float *base, vuint8mf2_t bindex, vfloat32m2_t value, size_t vl) {
+ return vamoswapei8_v_f32m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei8_v_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vamoswap.nxv8f32.nxv8i8.i32(<vscale x 8 x float>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x float> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei8_v_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vamoswap.nxv8f32.nxv8i8.i64(<vscale x 8 x float>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x float> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP1]]
+//
+vfloat32m4_t test_vamoswapei8_v_f32m4 (float *base, vuint8m1_t bindex, vfloat32m4_t value, size_t vl) {
+ return vamoswapei8_v_f32m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei8_v_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vamoswap.nxv16f32.nxv16i8.i32(<vscale x 16 x float>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x float> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei8_v_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vamoswap.nxv16f32.nxv16i8.i64(<vscale x 16 x float>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x float> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP1]]
+//
+vfloat32m8_t test_vamoswapei8_v_f32m8 (float *base, vuint8m2_t bindex, vfloat32m8_t value, size_t vl) {
+ return vamoswapei8_v_f32m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei16_v_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vamoswap.nxv1f32.nxv1i16.i32(<vscale x 1 x float>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x float> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei16_v_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vamoswap.nxv1f32.nxv1i16.i64(<vscale x 1 x float>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x float> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP1]]
+//
+vfloat32mf2_t test_vamoswapei16_v_f32mf2 (float *base, vuint16mf4_t bindex, vfloat32mf2_t value, size_t vl) {
+ return vamoswapei16_v_f32mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei16_v_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vamoswap.nxv2f32.nxv2i16.i32(<vscale x 2 x float>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x float> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei16_v_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vamoswap.nxv2f32.nxv2i16.i64(<vscale x 2 x float>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x float> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP1]]
+//
+vfloat32m1_t test_vamoswapei16_v_f32m1 (float *base, vuint16mf2_t bindex, vfloat32m1_t value, size_t vl) {
+ return vamoswapei16_v_f32m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei16_v_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vamoswap.nxv4f32.nxv4i16.i32(<vscale x 4 x float>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x float> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei16_v_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vamoswap.nxv4f32.nxv4i16.i64(<vscale x 4 x float>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x float> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP1]]
+//
+vfloat32m2_t test_vamoswapei16_v_f32m2 (float *base, vuint16m1_t bindex, vfloat32m2_t value, size_t vl) {
+ return vamoswapei16_v_f32m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei16_v_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vamoswap.nxv8f32.nxv8i16.i32(<vscale x 8 x float>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x float> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei16_v_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vamoswap.nxv8f32.nxv8i16.i64(<vscale x 8 x float>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x float> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP1]]
+//
+vfloat32m4_t test_vamoswapei16_v_f32m4 (float *base, vuint16m2_t bindex, vfloat32m4_t value, size_t vl) {
+ return vamoswapei16_v_f32m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei16_v_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vamoswap.nxv16f32.nxv16i16.i32(<vscale x 16 x float>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x float> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei16_v_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vamoswap.nxv16f32.nxv16i16.i64(<vscale x 16 x float>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x float> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP1]]
+//
+vfloat32m8_t test_vamoswapei16_v_f32m8 (float *base, vuint16m4_t bindex, vfloat32m8_t value, size_t vl) {
+ return vamoswapei16_v_f32m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei32_v_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vamoswap.nxv1f32.nxv1i32.i32(<vscale x 1 x float>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x float> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei32_v_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vamoswap.nxv1f32.nxv1i32.i64(<vscale x 1 x float>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x float> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP1]]
+//
+vfloat32mf2_t test_vamoswapei32_v_f32mf2 (float *base, vuint32mf2_t bindex, vfloat32mf2_t value, size_t vl) {
+ return vamoswapei32_v_f32mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei32_v_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vamoswap.nxv2f32.nxv2i32.i32(<vscale x 2 x float>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x float> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei32_v_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vamoswap.nxv2f32.nxv2i32.i64(<vscale x 2 x float>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x float> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP1]]
+//
+vfloat32m1_t test_vamoswapei32_v_f32m1 (float *base, vuint32m1_t bindex, vfloat32m1_t value, size_t vl) {
+ return vamoswapei32_v_f32m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei32_v_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vamoswap.nxv4f32.nxv4i32.i32(<vscale x 4 x float>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x float> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei32_v_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vamoswap.nxv4f32.nxv4i32.i64(<vscale x 4 x float>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x float> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP1]]
+//
+vfloat32m2_t test_vamoswapei32_v_f32m2 (float *base, vuint32m2_t bindex, vfloat32m2_t value, size_t vl) {
+ return vamoswapei32_v_f32m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei32_v_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vamoswap.nxv8f32.nxv8i32.i32(<vscale x 8 x float>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x float> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei32_v_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vamoswap.nxv8f32.nxv8i32.i64(<vscale x 8 x float>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x float> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP1]]
+//
+vfloat32m4_t test_vamoswapei32_v_f32m4 (float *base, vuint32m4_t bindex, vfloat32m4_t value, size_t vl) {
+ return vamoswapei32_v_f32m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei32_v_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vamoswap.nxv16f32.nxv16i32.i32(<vscale x 16 x float>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x float> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei32_v_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vamoswap.nxv16f32.nxv16i32.i64(<vscale x 16 x float>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x float> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP1]]
+//
+vfloat32m8_t test_vamoswapei32_v_f32m8 (float *base, vuint32m8_t bindex, vfloat32m8_t value, size_t vl) {
+ return vamoswapei32_v_f32m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei64_v_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vamoswap.nxv1f32.nxv1i64.i32(<vscale x 1 x float>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x float> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei64_v_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vamoswap.nxv1f32.nxv1i64.i64(<vscale x 1 x float>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x float> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP1]]
+//
+vfloat32mf2_t test_vamoswapei64_v_f32mf2 (float *base, vuint64m1_t bindex, vfloat32mf2_t value, size_t vl) {
+ return vamoswapei64_v_f32mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei64_v_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vamoswap.nxv2f32.nxv2i64.i32(<vscale x 2 x float>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x float> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei64_v_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vamoswap.nxv2f32.nxv2i64.i64(<vscale x 2 x float>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x float> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP1]]
+//
+vfloat32m1_t test_vamoswapei64_v_f32m1 (float *base, vuint64m2_t bindex, vfloat32m1_t value, size_t vl) {
+ return vamoswapei64_v_f32m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei64_v_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vamoswap.nxv4f32.nxv4i64.i32(<vscale x 4 x float>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x float> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei64_v_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vamoswap.nxv4f32.nxv4i64.i64(<vscale x 4 x float>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x float> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP1]]
+//
+vfloat32m2_t test_vamoswapei64_v_f32m2 (float *base, vuint64m4_t bindex, vfloat32m2_t value, size_t vl) {
+ return vamoswapei64_v_f32m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei64_v_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vamoswap.nxv8f32.nxv8i64.i32(<vscale x 8 x float>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x float> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei64_v_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vamoswap.nxv8f32.nxv8i64.i64(<vscale x 8 x float>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x float> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP1]]
+//
+vfloat32m4_t test_vamoswapei64_v_f32m4 (float *base, vuint64m8_t bindex, vfloat32m4_t value, size_t vl) {
+ return vamoswapei64_v_f32m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei8_v_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vamoswap.nxv1f64.nxv1i8.i32(<vscale x 1 x double>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x double> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei8_v_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vamoswap.nxv1f64.nxv1i8.i64(<vscale x 1 x double>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x double> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP1]]
+//
+vfloat64m1_t test_vamoswapei8_v_f64m1 (double *base, vuint8mf8_t bindex, vfloat64m1_t value, size_t vl) {
+ return vamoswapei8_v_f64m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei8_v_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vamoswap.nxv2f64.nxv2i8.i32(<vscale x 2 x double>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x double> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei8_v_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vamoswap.nxv2f64.nxv2i8.i64(<vscale x 2 x double>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x double> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP1]]
+//
+vfloat64m2_t test_vamoswapei8_v_f64m2 (double *base, vuint8mf4_t bindex, vfloat64m2_t value, size_t vl) {
+ return vamoswapei8_v_f64m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei8_v_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vamoswap.nxv4f64.nxv4i8.i32(<vscale x 4 x double>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x double> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei8_v_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vamoswap.nxv4f64.nxv4i8.i64(<vscale x 4 x double>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x double> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP1]]
+//
+vfloat64m4_t test_vamoswapei8_v_f64m4 (double *base, vuint8mf2_t bindex, vfloat64m4_t value, size_t vl) {
+ return vamoswapei8_v_f64m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei8_v_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vamoswap.nxv8f64.nxv8i8.i32(<vscale x 8 x double>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x double> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei8_v_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vamoswap.nxv8f64.nxv8i8.i64(<vscale x 8 x double>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x double> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP1]]
+//
+vfloat64m8_t test_vamoswapei8_v_f64m8 (double *base, vuint8m1_t bindex, vfloat64m8_t value, size_t vl) {
+ return vamoswapei8_v_f64m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei16_v_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vamoswap.nxv1f64.nxv1i16.i32(<vscale x 1 x double>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x double> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei16_v_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vamoswap.nxv1f64.nxv1i16.i64(<vscale x 1 x double>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x double> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP1]]
+//
+vfloat64m1_t test_vamoswapei16_v_f64m1 (double *base, vuint16mf4_t bindex, vfloat64m1_t value, size_t vl) {
+ return vamoswapei16_v_f64m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei16_v_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vamoswap.nxv2f64.nxv2i16.i32(<vscale x 2 x double>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x double> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei16_v_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vamoswap.nxv2f64.nxv2i16.i64(<vscale x 2 x double>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x double> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP1]]
+//
+vfloat64m2_t test_vamoswapei16_v_f64m2 (double *base, vuint16mf2_t bindex, vfloat64m2_t value, size_t vl) {
+ return vamoswapei16_v_f64m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei16_v_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vamoswap.nxv4f64.nxv4i16.i32(<vscale x 4 x double>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x double> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei16_v_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vamoswap.nxv4f64.nxv4i16.i64(<vscale x 4 x double>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x double> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP1]]
+//
+vfloat64m4_t test_vamoswapei16_v_f64m4 (double *base, vuint16m1_t bindex, vfloat64m4_t value, size_t vl) {
+ return vamoswapei16_v_f64m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei16_v_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vamoswap.nxv8f64.nxv8i16.i32(<vscale x 8 x double>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x double> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei16_v_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vamoswap.nxv8f64.nxv8i16.i64(<vscale x 8 x double>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x double> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP1]]
+//
+vfloat64m8_t test_vamoswapei16_v_f64m8 (double *base, vuint16m2_t bindex, vfloat64m8_t value, size_t vl) {
+ return vamoswapei16_v_f64m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei32_v_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vamoswap.nxv1f64.nxv1i32.i32(<vscale x 1 x double>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x double> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei32_v_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vamoswap.nxv1f64.nxv1i32.i64(<vscale x 1 x double>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x double> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP1]]
+//
+vfloat64m1_t test_vamoswapei32_v_f64m1 (double *base, vuint32mf2_t bindex, vfloat64m1_t value, size_t vl) {
+ return vamoswapei32_v_f64m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei32_v_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vamoswap.nxv2f64.nxv2i32.i32(<vscale x 2 x double>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x double> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei32_v_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vamoswap.nxv2f64.nxv2i32.i64(<vscale x 2 x double>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x double> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP1]]
+//
+vfloat64m2_t test_vamoswapei32_v_f64m2 (double *base, vuint32m1_t bindex, vfloat64m2_t value, size_t vl) {
+ return vamoswapei32_v_f64m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei32_v_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vamoswap.nxv4f64.nxv4i32.i32(<vscale x 4 x double>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x double> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei32_v_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vamoswap.nxv4f64.nxv4i32.i64(<vscale x 4 x double>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x double> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP1]]
+//
+vfloat64m4_t test_vamoswapei32_v_f64m4 (double *base, vuint32m2_t bindex, vfloat64m4_t value, size_t vl) {
+ return vamoswapei32_v_f64m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei32_v_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vamoswap.nxv8f64.nxv8i32.i32(<vscale x 8 x double>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x double> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei32_v_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vamoswap.nxv8f64.nxv8i32.i64(<vscale x 8 x double>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x double> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP1]]
+//
+vfloat64m8_t test_vamoswapei32_v_f64m8 (double *base, vuint32m4_t bindex, vfloat64m8_t value, size_t vl) {
+ return vamoswapei32_v_f64m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei64_v_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vamoswap.nxv1f64.nxv1i64.i32(<vscale x 1 x double>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x double> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei64_v_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vamoswap.nxv1f64.nxv1i64.i64(<vscale x 1 x double>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x double> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP1]]
+//
+vfloat64m1_t test_vamoswapei64_v_f64m1 (double *base, vuint64m1_t bindex, vfloat64m1_t value, size_t vl) {
+ return vamoswapei64_v_f64m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei64_v_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vamoswap.nxv2f64.nxv2i64.i32(<vscale x 2 x double>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x double> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei64_v_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vamoswap.nxv2f64.nxv2i64.i64(<vscale x 2 x double>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x double> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP1]]
+//
+vfloat64m2_t test_vamoswapei64_v_f64m2 (double *base, vuint64m2_t bindex, vfloat64m2_t value, size_t vl) {
+ return vamoswapei64_v_f64m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei64_v_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vamoswap.nxv4f64.nxv4i64.i32(<vscale x 4 x double>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x double> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei64_v_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vamoswap.nxv4f64.nxv4i64.i64(<vscale x 4 x double>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x double> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP1]]
+//
+vfloat64m4_t test_vamoswapei64_v_f64m4 (double *base, vuint64m4_t bindex, vfloat64m4_t value, size_t vl) {
+ return vamoswapei64_v_f64m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei64_v_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vamoswap.nxv8f64.nxv8i64.i32(<vscale x 8 x double>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x double> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei64_v_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vamoswap.nxv8f64.nxv8i64.i64(<vscale x 8 x double>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x double> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP1]]
+//
+vfloat64m8_t test_vamoswapei64_v_f64m8 (double *base, vuint64m8_t bindex, vfloat64m8_t value, size_t vl) {
+ return vamoswapei64_v_f64m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei8_v_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i8.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei8_v_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamoswapei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) {
+ return vamoswapei8_v_i32mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei8_v_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i8.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei8_v_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamoswapei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) {
+ return vamoswapei8_v_i32m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei8_v_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i8.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei8_v_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamoswapei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) {
+ return vamoswapei8_v_i32m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei8_v_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i8.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei8_v_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamoswapei8_v_i32m4_m (vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) {
+ return vamoswapei8_v_i32m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei8_v_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i8.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei8_v_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t test_vamoswapei8_v_i32m8_m (vbool4_t mask, int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) {
+ return vamoswapei8_v_i32m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei16_v_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i16.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei16_v_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamoswapei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) {
+ return vamoswapei16_v_i32mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei16_v_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i16.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei16_v_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamoswapei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) {
+ return vamoswapei16_v_i32m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei16_v_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i16.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei16_v_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamoswapei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) {
+ return vamoswapei16_v_i32m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei16_v_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i16.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei16_v_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamoswapei16_v_i32m4_m (vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) {
+ return vamoswapei16_v_i32m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei16_v_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i16.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei16_v_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t test_vamoswapei16_v_i32m8_m (vbool4_t mask, int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) {
+ return vamoswapei16_v_i32m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei32_v_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei32_v_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamoswapei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) {
+ return vamoswapei32_v_i32mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei32_v_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei32_v_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamoswapei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) {
+ return vamoswapei32_v_i32m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei32_v_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei32_v_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamoswapei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) {
+ return vamoswapei32_v_i32m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei32_v_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei32_v_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamoswapei32_v_i32m4_m (vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) {
+ return vamoswapei32_v_i32m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei32_v_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei32_v_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t test_vamoswapei32_v_i32m8_m (vbool4_t mask, int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) {
+ return vamoswapei32_v_i32m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei64_v_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i64.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei64_v_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamoswapei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) {
+ return vamoswapei64_v_i32mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei64_v_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i64.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei64_v_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamoswapei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) {
+ return vamoswapei64_v_i32m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei64_v_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i64.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei64_v_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamoswapei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) {
+ return vamoswapei64_v_i32m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei64_v_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i64.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei64_v_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamoswapei64_v_i32m4_m (vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) {
+ return vamoswapei64_v_i32m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei8_v_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i8.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei8_v_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamoswapei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) {
+ return vamoswapei8_v_i64m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei8_v_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i8.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei8_v_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamoswapei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) {
+ return vamoswapei8_v_i64m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei8_v_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i8.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei8_v_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamoswapei8_v_i64m4_m (vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) {
+ return vamoswapei8_v_i64m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei8_v_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i8.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei8_v_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamoswapei8_v_i64m8_m (vbool8_t mask, int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) {
+ return vamoswapei8_v_i64m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei16_v_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i16.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei16_v_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamoswapei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) {
+ return vamoswapei16_v_i64m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei16_v_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i16.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei16_v_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamoswapei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) {
+ return vamoswapei16_v_i64m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei16_v_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i16.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei16_v_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamoswapei16_v_i64m4_m (vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) {
+ return vamoswapei16_v_i64m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei16_v_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i16.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei16_v_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamoswapei16_v_i64m8_m (vbool8_t mask, int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) {
+ return vamoswapei16_v_i64m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei32_v_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i32.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei32_v_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamoswapei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) {
+ return vamoswapei32_v_i64m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei32_v_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i32.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei32_v_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamoswapei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) {
+ return vamoswapei32_v_i64m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei32_v_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i32.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei32_v_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamoswapei32_v_i64m4_m (vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) {
+ return vamoswapei32_v_i64m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei32_v_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i32.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei32_v_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamoswapei32_v_i64m8_m (vbool8_t mask, int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) {
+ return vamoswapei32_v_i64m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei64_v_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei64_v_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamoswapei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) {
+ return vamoswapei64_v_i64m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei64_v_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei64_v_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamoswapei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) {
+ return vamoswapei64_v_i64m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei64_v_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei64_v_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamoswapei64_v_i64m4_m (vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) {
+ return vamoswapei64_v_i64m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei64_v_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei64_v_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamoswapei64_v_i64m8_m (vbool8_t mask, int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) {
+ return vamoswapei64_v_i64m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei8_v_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i8.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei8_v_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamoswapei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) {
+ return vamoswapei8_v_u32mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei8_v_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i8.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei8_v_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamoswapei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) {
+ return vamoswapei8_v_u32m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei8_v_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i8.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei8_v_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamoswapei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) {
+ return vamoswapei8_v_u32m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei8_v_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i8.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei8_v_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamoswapei8_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) {
+ return vamoswapei8_v_u32m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei8_v_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i8.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei8_v_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t test_vamoswapei8_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) {
+ return vamoswapei8_v_u32m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei16_v_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i16.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei16_v_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamoswapei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) {
+ return vamoswapei16_v_u32mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei16_v_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i16.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei16_v_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamoswapei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) {
+ return vamoswapei16_v_u32m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei16_v_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i16.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei16_v_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamoswapei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) {
+ return vamoswapei16_v_u32m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei16_v_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i16.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei16_v_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamoswapei16_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) {
+ return vamoswapei16_v_u32m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei16_v_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i16.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei16_v_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t test_vamoswapei16_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) {
+ return vamoswapei16_v_u32m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei32_v_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei32_v_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamoswapei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) {
+ return vamoswapei32_v_u32mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei32_v_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei32_v_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamoswapei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) {
+ return vamoswapei32_v_u32m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei32_v_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei32_v_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamoswapei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) {
+ return vamoswapei32_v_u32m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei32_v_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei32_v_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamoswapei32_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) {
+ return vamoswapei32_v_u32m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei32_v_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei32_v_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t test_vamoswapei32_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) {
+ return vamoswapei32_v_u32m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei64_v_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i64.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei64_v_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamoswapei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) {
+ return vamoswapei64_v_u32mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei64_v_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i64.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei64_v_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamoswapei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) {
+ return vamoswapei64_v_u32m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei64_v_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i64.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei64_v_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamoswapei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) {
+ return vamoswapei64_v_u32m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei64_v_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i64.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei64_v_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamoswapei64_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) {
+ return vamoswapei64_v_u32m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei8_v_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i8.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei8_v_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamoswapei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) {
+ return vamoswapei8_v_u64m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei8_v_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i8.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei8_v_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamoswapei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) {
+ return vamoswapei8_v_u64m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei8_v_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i8.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei8_v_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamoswapei8_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) {
+ return vamoswapei8_v_u64m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei8_v_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i8.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei8_v_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamoswapei8_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) {
+ return vamoswapei8_v_u64m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei16_v_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i16.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei16_v_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamoswapei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) {
+ return vamoswapei16_v_u64m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei16_v_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i16.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei16_v_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamoswapei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) {
+ return vamoswapei16_v_u64m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei16_v_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i16.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei16_v_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamoswapei16_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) {
+ return vamoswapei16_v_u64m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei16_v_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i16.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei16_v_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamoswapei16_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) {
+ return vamoswapei16_v_u64m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei32_v_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i32.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei32_v_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamoswapei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) {
+ return vamoswapei32_v_u64m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei32_v_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i32.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei32_v_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamoswapei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) {
+ return vamoswapei32_v_u64m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei32_v_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i32.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei32_v_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamoswapei32_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) {
+ return vamoswapei32_v_u64m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei32_v_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i32.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei32_v_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamoswapei32_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) {
+ return vamoswapei32_v_u64m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei64_v_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei64_v_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamoswapei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) {
+ return vamoswapei64_v_u64m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei64_v_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei64_v_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamoswapei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) {
+ return vamoswapei64_v_u64m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei64_v_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei64_v_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamoswapei64_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) {
+ return vamoswapei64_v_u64m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei64_v_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei64_v_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamoswapei64_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) {
+ return vamoswapei64_v_u64m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei8_v_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i8.i32(<vscale x 1 x float>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x float> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei8_v_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i8.i64(<vscale x 1 x float>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x float> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP1]]
+//
+vfloat32mf2_t test_vamoswapei8_v_f32mf2_m (vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t value, size_t vl) {
+ return vamoswapei8_v_f32mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei8_v_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i8.i32(<vscale x 2 x float>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x float> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei8_v_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i8.i64(<vscale x 2 x float>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x float> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP1]]
+//
+vfloat32m1_t test_vamoswapei8_v_f32m1_m (vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t value, size_t vl) {
+ return vamoswapei8_v_f32m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei8_v_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i8.i32(<vscale x 4 x float>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x float> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei8_v_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i8.i64(<vscale x 4 x float>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x float> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP1]]
+//
+vfloat32m2_t test_vamoswapei8_v_f32m2_m (vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2_t value, size_t vl) {
+ return vamoswapei8_v_f32m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei8_v_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i8.i32(<vscale x 8 x float>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x float> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei8_v_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i8.i64(<vscale x 8 x float>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x float> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP1]]
+//
+vfloat32m4_t test_vamoswapei8_v_f32m4_m (vbool8_t mask, float *base, vuint8m1_t bindex, vfloat32m4_t value, size_t vl) {
+ return vamoswapei8_v_f32m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei8_v_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vamoswap.mask.nxv16f32.nxv16i8.i32(<vscale x 16 x float>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x float> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei8_v_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vamoswap.mask.nxv16f32.nxv16i8.i64(<vscale x 16 x float>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x float> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP1]]
+//
+vfloat32m8_t test_vamoswapei8_v_f32m8_m (vbool4_t mask, float *base, vuint8m2_t bindex, vfloat32m8_t value, size_t vl) {
+ return vamoswapei8_v_f32m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei16_v_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i16.i32(<vscale x 1 x float>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x float> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei16_v_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i16.i64(<vscale x 1 x float>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x float> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP1]]
+//
+vfloat32mf2_t test_vamoswapei16_v_f32mf2_m (vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t value, size_t vl) {
+ return vamoswapei16_v_f32mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei16_v_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i16.i32(<vscale x 2 x float>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x float> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei16_v_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i16.i64(<vscale x 2 x float>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x float> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP1]]
+//
+vfloat32m1_t test_vamoswapei16_v_f32m1_m (vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t value, size_t vl) {
+ return vamoswapei16_v_f32m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei16_v_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i16.i32(<vscale x 4 x float>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x float> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei16_v_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i16.i64(<vscale x 4 x float>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x float> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP1]]
+//
+vfloat32m2_t test_vamoswapei16_v_f32m2_m (vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2_t value, size_t vl) {
+ return vamoswapei16_v_f32m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei16_v_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i16.i32(<vscale x 8 x float>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x float> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei16_v_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i16.i64(<vscale x 8 x float>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x float> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP1]]
+//
+vfloat32m4_t test_vamoswapei16_v_f32m4_m (vbool8_t mask, float *base, vuint16m2_t bindex, vfloat32m4_t value, size_t vl) {
+ return vamoswapei16_v_f32m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei16_v_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vamoswap.mask.nxv16f32.nxv16i16.i32(<vscale x 16 x float>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x float> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei16_v_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vamoswap.mask.nxv16f32.nxv16i16.i64(<vscale x 16 x float>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x float> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP1]]
+//
+vfloat32m8_t test_vamoswapei16_v_f32m8_m (vbool4_t mask, float *base, vuint16m4_t bindex, vfloat32m8_t value, size_t vl) {
+ return vamoswapei16_v_f32m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei32_v_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i32.i32(<vscale x 1 x float>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x float> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei32_v_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i32.i64(<vscale x 1 x float>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x float> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP1]]
+//
+vfloat32mf2_t test_vamoswapei32_v_f32mf2_m (vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t value, size_t vl) {
+ return vamoswapei32_v_f32mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei32_v_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i32.i32(<vscale x 2 x float>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x float> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei32_v_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i32.i64(<vscale x 2 x float>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x float> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP1]]
+//
+vfloat32m1_t test_vamoswapei32_v_f32m1_m (vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t value, size_t vl) {
+ return vamoswapei32_v_f32m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei32_v_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i32.i32(<vscale x 4 x float>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x float> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei32_v_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i32.i64(<vscale x 4 x float>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x float> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP1]]
+//
+vfloat32m2_t test_vamoswapei32_v_f32m2_m (vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2_t value, size_t vl) {
+ return vamoswapei32_v_f32m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei32_v_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i32.i32(<vscale x 8 x float>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x float> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei32_v_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i32.i64(<vscale x 8 x float>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x float> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP1]]
+//
+vfloat32m4_t test_vamoswapei32_v_f32m4_m (vbool8_t mask, float *base, vuint32m4_t bindex, vfloat32m4_t value, size_t vl) {
+ return vamoswapei32_v_f32m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei32_v_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vamoswap.mask.nxv16f32.nxv16i32.i32(<vscale x 16 x float>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x float> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei32_v_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vamoswap.mask.nxv16f32.nxv16i32.i64(<vscale x 16 x float>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x float> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP1]]
+//
+vfloat32m8_t test_vamoswapei32_v_f32m8_m (vbool4_t mask, float *base, vuint32m8_t bindex, vfloat32m8_t value, size_t vl) {
+ return vamoswapei32_v_f32m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei64_v_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i64.i32(<vscale x 1 x float>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x float> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei64_v_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i64.i64(<vscale x 1 x float>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x float> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP1]]
+//
+vfloat32mf2_t test_vamoswapei64_v_f32mf2_m (vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t value, size_t vl) {
+ return vamoswapei64_v_f32mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei64_v_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i64.i32(<vscale x 2 x float>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x float> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei64_v_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i64.i64(<vscale x 2 x float>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x float> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP1]]
+//
+vfloat32m1_t test_vamoswapei64_v_f32m1_m (vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t value, size_t vl) {
+ return vamoswapei64_v_f32m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei64_v_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i64.i32(<vscale x 4 x float>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x float> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei64_v_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i64.i64(<vscale x 4 x float>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x float> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP1]]
+//
+vfloat32m2_t test_vamoswapei64_v_f32m2_m (vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2_t value, size_t vl) {
+ return vamoswapei64_v_f32m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei64_v_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i64.i32(<vscale x 8 x float>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x float> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei64_v_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i64.i64(<vscale x 8 x float>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x float> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP1]]
+//
+vfloat32m4_t test_vamoswapei64_v_f32m4_m (vbool8_t mask, float *base, vuint64m8_t bindex, vfloat32m4_t value, size_t vl) {
+ return vamoswapei64_v_f32m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei8_v_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i8.i32(<vscale x 1 x double>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x double> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei8_v_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i8.i64(<vscale x 1 x double>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x double> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP1]]
+//
+vfloat64m1_t test_vamoswapei8_v_f64m1_m (vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t value, size_t vl) {
+ return vamoswapei8_v_f64m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei8_v_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i8.i32(<vscale x 2 x double>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x double> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei8_v_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i8.i64(<vscale x 2 x double>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x double> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP1]]
+//
+vfloat64m2_t test_vamoswapei8_v_f64m2_m (vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2_t value, size_t vl) {
+ return vamoswapei8_v_f64m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei8_v_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i8.i32(<vscale x 4 x double>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x double> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei8_v_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i8.i64(<vscale x 4 x double>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x double> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP1]]
+//
+vfloat64m4_t test_vamoswapei8_v_f64m4_m (vbool16_t mask, double *base, vuint8mf2_t bindex, vfloat64m4_t value, size_t vl) {
+ return vamoswapei8_v_f64m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei8_v_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i8.i32(<vscale x 8 x double>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x double> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei8_v_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i8.i64(<vscale x 8 x double>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x double> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP1]]
+//
+vfloat64m8_t test_vamoswapei8_v_f64m8_m (vbool8_t mask, double *base, vuint8m1_t bindex, vfloat64m8_t value, size_t vl) {
+ return vamoswapei8_v_f64m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei16_v_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i16.i32(<vscale x 1 x double>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x double> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei16_v_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i16.i64(<vscale x 1 x double>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x double> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP1]]
+//
+vfloat64m1_t test_vamoswapei16_v_f64m1_m (vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t value, size_t vl) {
+ return vamoswapei16_v_f64m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei16_v_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i16.i32(<vscale x 2 x double>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x double> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei16_v_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i16.i64(<vscale x 2 x double>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x double> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP1]]
+//
+vfloat64m2_t test_vamoswapei16_v_f64m2_m (vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2_t value, size_t vl) {
+ return vamoswapei16_v_f64m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei16_v_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i16.i32(<vscale x 4 x double>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x double> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei16_v_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i16.i64(<vscale x 4 x double>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x double> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP1]]
+//
+vfloat64m4_t test_vamoswapei16_v_f64m4_m (vbool16_t mask, double *base, vuint16m1_t bindex, vfloat64m4_t value, size_t vl) {
+ return vamoswapei16_v_f64m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei16_v_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i16.i32(<vscale x 8 x double>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x double> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei16_v_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i16.i64(<vscale x 8 x double>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x double> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP1]]
+//
+vfloat64m8_t test_vamoswapei16_v_f64m8_m (vbool8_t mask, double *base, vuint16m2_t bindex, vfloat64m8_t value, size_t vl) {
+ return vamoswapei16_v_f64m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei32_v_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i32.i32(<vscale x 1 x double>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x double> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei32_v_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i32.i64(<vscale x 1 x double>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x double> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP1]]
+//
+vfloat64m1_t test_vamoswapei32_v_f64m1_m (vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t value, size_t vl) {
+ return vamoswapei32_v_f64m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei32_v_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i32.i32(<vscale x 2 x double>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x double> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei32_v_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i32.i64(<vscale x 2 x double>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x double> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP1]]
+//
+vfloat64m2_t test_vamoswapei32_v_f64m2_m (vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2_t value, size_t vl) {
+ return vamoswapei32_v_f64m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei32_v_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i32.i32(<vscale x 4 x double>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x double> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei32_v_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i32.i64(<vscale x 4 x double>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x double> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP1]]
+//
+vfloat64m4_t test_vamoswapei32_v_f64m4_m (vbool16_t mask, double *base, vuint32m2_t bindex, vfloat64m4_t value, size_t vl) {
+ return vamoswapei32_v_f64m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei32_v_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i32.i32(<vscale x 8 x double>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x double> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei32_v_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i32.i64(<vscale x 8 x double>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x double> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP1]]
+//
+vfloat64m8_t test_vamoswapei32_v_f64m8_m (vbool8_t mask, double *base, vuint32m4_t bindex, vfloat64m8_t value, size_t vl) {
+ return vamoswapei32_v_f64m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei64_v_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i64.i32(<vscale x 1 x double>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x double> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei64_v_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i64.i64(<vscale x 1 x double>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x double> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP1]]
+//
+vfloat64m1_t test_vamoswapei64_v_f64m1_m (vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t value, size_t vl) {
+ return vamoswapei64_v_f64m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei64_v_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i64.i32(<vscale x 2 x double>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x double> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei64_v_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i64.i64(<vscale x 2 x double>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x double> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP1]]
+//
+vfloat64m2_t test_vamoswapei64_v_f64m2_m (vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2_t value, size_t vl) {
+ return vamoswapei64_v_f64m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei64_v_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i64.i32(<vscale x 4 x double>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x double> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei64_v_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i64.i64(<vscale x 4 x double>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x double> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP1]]
+//
+vfloat64m4_t test_vamoswapei64_v_f64m4_m (vbool16_t mask, double *base, vuint64m4_t bindex, vfloat64m4_t value, size_t vl) {
+ return vamoswapei64_v_f64m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoswapei64_v_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i64.i32(<vscale x 8 x double>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x double> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoswapei64_v_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i64.i64(<vscale x 8 x double>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x double> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP1]]
+//
+vfloat64m8_t test_vamoswapei64_v_f64m8_m (vbool8_t mask, double *base, vuint64m8_t bindex, vfloat64m8_t value, size_t vl) {
+ return vamoswapei64_v_f64m8_m(mask, base, bindex, value, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vamoxor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vamoxor.c
new file mode 100644
index 000000000000..eeabe6b07321
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vamoxor.c
@@ -0,0 +1,2247 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +m -target-feature +experimental-v -target-feature +experimental-zvamo -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -target-feature +experimental-zvamo -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vamoxorei8_v_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoxor.nxv1i32.nxv1i8.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei8_v_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoxor.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamoxorei8_v_i32mf2 (int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) {
+ return vamoxorei8_v_i32mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei8_v_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoxor.nxv2i32.nxv2i8.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei8_v_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoxor.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamoxorei8_v_i32m1 (int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) {
+ return vamoxorei8_v_i32m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei8_v_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoxor.nxv4i32.nxv4i8.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei8_v_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoxor.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamoxorei8_v_i32m2 (int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) {
+ return vamoxorei8_v_i32m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei8_v_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoxor.nxv8i32.nxv8i8.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei8_v_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoxor.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamoxorei8_v_i32m4 (int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) {
+ return vamoxorei8_v_i32m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei8_v_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoxor.nxv16i32.nxv16i8.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei8_v_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoxor.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t test_vamoxorei8_v_i32m8 (int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) {
+ return vamoxorei8_v_i32m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei16_v_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoxor.nxv1i32.nxv1i16.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei16_v_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoxor.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamoxorei16_v_i32mf2 (int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) {
+ return vamoxorei16_v_i32mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei16_v_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoxor.nxv2i32.nxv2i16.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei16_v_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoxor.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamoxorei16_v_i32m1 (int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) {
+ return vamoxorei16_v_i32m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei16_v_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoxor.nxv4i32.nxv4i16.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei16_v_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoxor.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamoxorei16_v_i32m2 (int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) {
+ return vamoxorei16_v_i32m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei16_v_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoxor.nxv8i32.nxv8i16.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei16_v_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoxor.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamoxorei16_v_i32m4 (int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) {
+ return vamoxorei16_v_i32m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei16_v_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoxor.nxv16i32.nxv16i16.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei16_v_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoxor.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t test_vamoxorei16_v_i32m8 (int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) {
+ return vamoxorei16_v_i32m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei32_v_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoxor.nxv1i32.nxv1i32.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei32_v_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoxor.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamoxorei32_v_i32mf2 (int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) {
+ return vamoxorei32_v_i32mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei32_v_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoxor.nxv2i32.nxv2i32.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei32_v_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoxor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamoxorei32_v_i32m1 (int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) {
+ return vamoxorei32_v_i32m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei32_v_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoxor.nxv4i32.nxv4i32.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei32_v_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoxor.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamoxorei32_v_i32m2 (int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) {
+ return vamoxorei32_v_i32m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei32_v_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoxor.nxv8i32.nxv8i32.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei32_v_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoxor.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamoxorei32_v_i32m4 (int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) {
+ return vamoxorei32_v_i32m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei32_v_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoxor.nxv16i32.nxv16i32.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei32_v_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoxor.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t test_vamoxorei32_v_i32m8 (int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) {
+ return vamoxorei32_v_i32m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei64_v_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoxor.nxv1i32.nxv1i64.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei64_v_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoxor.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamoxorei64_v_i32mf2 (int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) {
+ return vamoxorei64_v_i32mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei64_v_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoxor.nxv2i32.nxv2i64.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei64_v_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoxor.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamoxorei64_v_i32m1 (int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) {
+ return vamoxorei64_v_i32m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei64_v_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoxor.nxv4i32.nxv4i64.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei64_v_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoxor.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamoxorei64_v_i32m2 (int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) {
+ return vamoxorei64_v_i32m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei64_v_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoxor.nxv8i32.nxv8i64.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei64_v_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoxor.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamoxorei64_v_i32m4 (int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) {
+ return vamoxorei64_v_i32m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei8_v_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoxor.nxv1i64.nxv1i8.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei8_v_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoxor.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamoxorei8_v_i64m1 (int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) {
+ return vamoxorei8_v_i64m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei8_v_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoxor.nxv2i64.nxv2i8.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei8_v_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoxor.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamoxorei8_v_i64m2 (int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) {
+ return vamoxorei8_v_i64m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei8_v_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoxor.nxv4i64.nxv4i8.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei8_v_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoxor.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamoxorei8_v_i64m4 (int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) {
+ return vamoxorei8_v_i64m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei8_v_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoxor.nxv8i64.nxv8i8.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei8_v_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoxor.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamoxorei8_v_i64m8 (int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) {
+ return vamoxorei8_v_i64m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei16_v_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoxor.nxv1i64.nxv1i16.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei16_v_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoxor.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamoxorei16_v_i64m1 (int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) {
+ return vamoxorei16_v_i64m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei16_v_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoxor.nxv2i64.nxv2i16.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei16_v_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoxor.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamoxorei16_v_i64m2 (int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) {
+ return vamoxorei16_v_i64m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei16_v_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoxor.nxv4i64.nxv4i16.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei16_v_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoxor.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamoxorei16_v_i64m4 (int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) {
+ return vamoxorei16_v_i64m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei16_v_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoxor.nxv8i64.nxv8i16.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei16_v_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoxor.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamoxorei16_v_i64m8 (int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) {
+ return vamoxorei16_v_i64m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei32_v_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoxor.nxv1i64.nxv1i32.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei32_v_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoxor.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamoxorei32_v_i64m1 (int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) {
+ return vamoxorei32_v_i64m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei32_v_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoxor.nxv2i64.nxv2i32.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei32_v_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoxor.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamoxorei32_v_i64m2 (int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) {
+ return vamoxorei32_v_i64m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei32_v_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoxor.nxv4i64.nxv4i32.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei32_v_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoxor.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamoxorei32_v_i64m4 (int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) {
+ return vamoxorei32_v_i64m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei32_v_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoxor.nxv8i64.nxv8i32.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei32_v_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoxor.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamoxorei32_v_i64m8 (int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) {
+ return vamoxorei32_v_i64m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei64_v_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoxor.nxv1i64.nxv1i64.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei64_v_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoxor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamoxorei64_v_i64m1 (int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) {
+ return vamoxorei64_v_i64m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei64_v_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoxor.nxv2i64.nxv2i64.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei64_v_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoxor.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamoxorei64_v_i64m2 (int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) {
+ return vamoxorei64_v_i64m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei64_v_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoxor.nxv4i64.nxv4i64.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei64_v_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoxor.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamoxorei64_v_i64m4 (int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) {
+ return vamoxorei64_v_i64m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei64_v_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoxor.nxv8i64.nxv8i64.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei64_v_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoxor.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamoxorei64_v_i64m8 (int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) {
+ return vamoxorei64_v_i64m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei8_v_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoxor.nxv1i32.nxv1i8.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei8_v_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoxor.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamoxorei8_v_u32mf2 (uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) {
+ return vamoxorei8_v_u32mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei8_v_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoxor.nxv2i32.nxv2i8.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei8_v_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoxor.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamoxorei8_v_u32m1 (uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) {
+ return vamoxorei8_v_u32m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei8_v_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoxor.nxv4i32.nxv4i8.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei8_v_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoxor.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamoxorei8_v_u32m2 (uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) {
+ return vamoxorei8_v_u32m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei8_v_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoxor.nxv8i32.nxv8i8.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei8_v_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoxor.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamoxorei8_v_u32m4 (uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) {
+ return vamoxorei8_v_u32m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei8_v_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoxor.nxv16i32.nxv16i8.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei8_v_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoxor.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t test_vamoxorei8_v_u32m8 (uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) {
+ return vamoxorei8_v_u32m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei16_v_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoxor.nxv1i32.nxv1i16.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei16_v_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoxor.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamoxorei16_v_u32mf2 (uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) {
+ return vamoxorei16_v_u32mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei16_v_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoxor.nxv2i32.nxv2i16.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei16_v_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoxor.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamoxorei16_v_u32m1 (uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) {
+ return vamoxorei16_v_u32m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei16_v_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoxor.nxv4i32.nxv4i16.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei16_v_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoxor.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamoxorei16_v_u32m2 (uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) {
+ return vamoxorei16_v_u32m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei16_v_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoxor.nxv8i32.nxv8i16.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei16_v_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoxor.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamoxorei16_v_u32m4 (uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) {
+ return vamoxorei16_v_u32m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei16_v_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoxor.nxv16i32.nxv16i16.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei16_v_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoxor.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t test_vamoxorei16_v_u32m8 (uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) {
+ return vamoxorei16_v_u32m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei32_v_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoxor.nxv1i32.nxv1i32.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei32_v_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoxor.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamoxorei32_v_u32mf2 (uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) {
+ return vamoxorei32_v_u32mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei32_v_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoxor.nxv2i32.nxv2i32.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei32_v_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoxor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamoxorei32_v_u32m1 (uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) {
+ return vamoxorei32_v_u32m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei32_v_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoxor.nxv4i32.nxv4i32.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei32_v_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoxor.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamoxorei32_v_u32m2 (uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) {
+ return vamoxorei32_v_u32m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei32_v_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoxor.nxv8i32.nxv8i32.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei32_v_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoxor.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamoxorei32_v_u32m4 (uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) {
+ return vamoxorei32_v_u32m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei32_v_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoxor.nxv16i32.nxv16i32.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei32_v_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoxor.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t test_vamoxorei32_v_u32m8 (uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) {
+ return vamoxorei32_v_u32m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei64_v_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoxor.nxv1i32.nxv1i64.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei64_v_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoxor.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamoxorei64_v_u32mf2 (uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) {
+ return vamoxorei64_v_u32mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei64_v_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoxor.nxv2i32.nxv2i64.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei64_v_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoxor.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamoxorei64_v_u32m1 (uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) {
+ return vamoxorei64_v_u32m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei64_v_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoxor.nxv4i32.nxv4i64.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei64_v_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoxor.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamoxorei64_v_u32m2 (uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) {
+ return vamoxorei64_v_u32m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei64_v_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoxor.nxv8i32.nxv8i64.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei64_v_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoxor.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamoxorei64_v_u32m4 (uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) {
+ return vamoxorei64_v_u32m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei8_v_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoxor.nxv1i64.nxv1i8.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei8_v_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoxor.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamoxorei8_v_u64m1 (uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) {
+ return vamoxorei8_v_u64m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei8_v_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoxor.nxv2i64.nxv2i8.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei8_v_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoxor.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamoxorei8_v_u64m2 (uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) {
+ return vamoxorei8_v_u64m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei8_v_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoxor.nxv4i64.nxv4i8.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei8_v_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoxor.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamoxorei8_v_u64m4 (uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) {
+ return vamoxorei8_v_u64m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei8_v_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoxor.nxv8i64.nxv8i8.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei8_v_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoxor.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamoxorei8_v_u64m8 (uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) {
+ return vamoxorei8_v_u64m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei16_v_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoxor.nxv1i64.nxv1i16.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei16_v_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoxor.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamoxorei16_v_u64m1 (uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) {
+ return vamoxorei16_v_u64m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei16_v_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoxor.nxv2i64.nxv2i16.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei16_v_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoxor.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamoxorei16_v_u64m2 (uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) {
+ return vamoxorei16_v_u64m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei16_v_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoxor.nxv4i64.nxv4i16.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei16_v_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoxor.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamoxorei16_v_u64m4 (uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) {
+ return vamoxorei16_v_u64m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei16_v_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoxor.nxv8i64.nxv8i16.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei16_v_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoxor.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamoxorei16_v_u64m8 (uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) {
+ return vamoxorei16_v_u64m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei32_v_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoxor.nxv1i64.nxv1i32.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei32_v_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoxor.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamoxorei32_v_u64m1 (uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) {
+ return vamoxorei32_v_u64m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei32_v_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoxor.nxv2i64.nxv2i32.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei32_v_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoxor.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamoxorei32_v_u64m2 (uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) {
+ return vamoxorei32_v_u64m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei32_v_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoxor.nxv4i64.nxv4i32.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei32_v_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoxor.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamoxorei32_v_u64m4 (uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) {
+ return vamoxorei32_v_u64m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei32_v_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoxor.nxv8i64.nxv8i32.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei32_v_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoxor.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamoxorei32_v_u64m8 (uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) {
+ return vamoxorei32_v_u64m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei64_v_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoxor.nxv1i64.nxv1i64.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei64_v_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoxor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamoxorei64_v_u64m1 (uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) {
+ return vamoxorei64_v_u64m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei64_v_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoxor.nxv2i64.nxv2i64.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei64_v_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoxor.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamoxorei64_v_u64m2 (uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) {
+ return vamoxorei64_v_u64m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei64_v_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoxor.nxv4i64.nxv4i64.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei64_v_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoxor.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamoxorei64_v_u64m4 (uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) {
+ return vamoxorei64_v_u64m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei64_v_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoxor.nxv8i64.nxv8i64.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei64_v_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoxor.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamoxorei64_v_u64m8 (uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) {
+ return vamoxorei64_v_u64m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei8_v_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i8.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei8_v_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamoxorei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) {
+ return vamoxorei8_v_i32mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei8_v_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i8.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei8_v_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamoxorei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) {
+ return vamoxorei8_v_i32m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei8_v_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i8.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei8_v_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamoxorei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) {
+ return vamoxorei8_v_i32m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei8_v_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i8.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei8_v_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamoxorei8_v_i32m4_m (vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) {
+ return vamoxorei8_v_i32m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei8_v_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i8.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei8_v_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t test_vamoxorei8_v_i32m8_m (vbool4_t mask, int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) {
+ return vamoxorei8_v_i32m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei16_v_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i16.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei16_v_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamoxorei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) {
+ return vamoxorei16_v_i32mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei16_v_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i16.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei16_v_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamoxorei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) {
+ return vamoxorei16_v_i32m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei16_v_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i16.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei16_v_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamoxorei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) {
+ return vamoxorei16_v_i32m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei16_v_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i16.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei16_v_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamoxorei16_v_i32m4_m (vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) {
+ return vamoxorei16_v_i32m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei16_v_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i16.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei16_v_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t test_vamoxorei16_v_i32m8_m (vbool4_t mask, int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) {
+ return vamoxorei16_v_i32m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei32_v_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei32_v_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamoxorei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) {
+ return vamoxorei32_v_i32mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei32_v_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei32_v_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamoxorei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) {
+ return vamoxorei32_v_i32m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei32_v_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei32_v_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamoxorei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) {
+ return vamoxorei32_v_i32m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei32_v_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei32_v_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamoxorei32_v_i32m4_m (vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) {
+ return vamoxorei32_v_i32m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei32_v_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei32_v_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t test_vamoxorei32_v_i32m8_m (vbool4_t mask, int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) {
+ return vamoxorei32_v_i32m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei64_v_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i64.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei64_v_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vamoxorei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) {
+ return vamoxorei64_v_i32mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei64_v_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i64.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei64_v_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vamoxorei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) {
+ return vamoxorei64_v_i32m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei64_v_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i64.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei64_v_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vamoxorei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) {
+ return vamoxorei64_v_i32m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei64_v_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i64.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei64_v_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vamoxorei64_v_i32m4_m (vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) {
+ return vamoxorei64_v_i32m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei8_v_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i8.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei8_v_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamoxorei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) {
+ return vamoxorei8_v_i64m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei8_v_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i8.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei8_v_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamoxorei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) {
+ return vamoxorei8_v_i64m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei8_v_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i8.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei8_v_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamoxorei8_v_i64m4_m (vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) {
+ return vamoxorei8_v_i64m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei8_v_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i8.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei8_v_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamoxorei8_v_i64m8_m (vbool8_t mask, int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) {
+ return vamoxorei8_v_i64m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei16_v_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i16.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei16_v_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamoxorei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) {
+ return vamoxorei16_v_i64m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei16_v_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i16.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei16_v_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamoxorei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) {
+ return vamoxorei16_v_i64m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei16_v_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i16.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei16_v_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamoxorei16_v_i64m4_m (vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) {
+ return vamoxorei16_v_i64m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei16_v_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i16.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei16_v_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamoxorei16_v_i64m8_m (vbool8_t mask, int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) {
+ return vamoxorei16_v_i64m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei32_v_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i32.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei32_v_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamoxorei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) {
+ return vamoxorei32_v_i64m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei32_v_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i32.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei32_v_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamoxorei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) {
+ return vamoxorei32_v_i64m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei32_v_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i32.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei32_v_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamoxorei32_v_i64m4_m (vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) {
+ return vamoxorei32_v_i64m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei32_v_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i32.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei32_v_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamoxorei32_v_i64m8_m (vbool8_t mask, int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) {
+ return vamoxorei32_v_i64m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei64_v_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei64_v_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vamoxorei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) {
+ return vamoxorei64_v_i64m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei64_v_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei64_v_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vamoxorei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) {
+ return vamoxorei64_v_i64m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei64_v_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei64_v_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vamoxorei64_v_i64m4_m (vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) {
+ return vamoxorei64_v_i64m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei64_v_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei64_v_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vamoxorei64_v_i64m8_m (vbool8_t mask, int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) {
+ return vamoxorei64_v_i64m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei8_v_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i8.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei8_v_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamoxorei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) {
+ return vamoxorei8_v_u32mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei8_v_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i8.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei8_v_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamoxorei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) {
+ return vamoxorei8_v_u32m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei8_v_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i8.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei8_v_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamoxorei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) {
+ return vamoxorei8_v_u32m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei8_v_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i8.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei8_v_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamoxorei8_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) {
+ return vamoxorei8_v_u32m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei8_v_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i8.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei8_v_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t test_vamoxorei8_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) {
+ return vamoxorei8_v_u32m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei16_v_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i16.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei16_v_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamoxorei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) {
+ return vamoxorei16_v_u32mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei16_v_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i16.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei16_v_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamoxorei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) {
+ return vamoxorei16_v_u32m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei16_v_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i16.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei16_v_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamoxorei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) {
+ return vamoxorei16_v_u32m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei16_v_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i16.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei16_v_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamoxorei16_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) {
+ return vamoxorei16_v_u32m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei16_v_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i16.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei16_v_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t test_vamoxorei16_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) {
+ return vamoxorei16_v_u32m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei32_v_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei32_v_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamoxorei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) {
+ return vamoxorei32_v_u32mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei32_v_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei32_v_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamoxorei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) {
+ return vamoxorei32_v_u32m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei32_v_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei32_v_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamoxorei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) {
+ return vamoxorei32_v_u32m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei32_v_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei32_v_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamoxorei32_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) {
+ return vamoxorei32_v_u32m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei32_v_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei32_v_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t test_vamoxorei32_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) {
+ return vamoxorei32_v_u32m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei64_v_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i64.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei64_v_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vamoxorei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) {
+ return vamoxorei64_v_u32mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei64_v_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i64.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei64_v_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vamoxorei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) {
+ return vamoxorei64_v_u32m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei64_v_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i64.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei64_v_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vamoxorei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) {
+ return vamoxorei64_v_u32m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei64_v_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i64.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei64_v_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vamoxorei64_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) {
+ return vamoxorei64_v_u32m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei8_v_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i8.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei8_v_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamoxorei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) {
+ return vamoxorei8_v_u64m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei8_v_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i8.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei8_v_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamoxorei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) {
+ return vamoxorei8_v_u64m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei8_v_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i8.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei8_v_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamoxorei8_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) {
+ return vamoxorei8_v_u64m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei8_v_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i8.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei8_v_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamoxorei8_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) {
+ return vamoxorei8_v_u64m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei16_v_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i16.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei16_v_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamoxorei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) {
+ return vamoxorei16_v_u64m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei16_v_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i16.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei16_v_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamoxorei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) {
+ return vamoxorei16_v_u64m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei16_v_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i16.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei16_v_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamoxorei16_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) {
+ return vamoxorei16_v_u64m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei16_v_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i16.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei16_v_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamoxorei16_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) {
+ return vamoxorei16_v_u64m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei32_v_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i32.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei32_v_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamoxorei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) {
+ return vamoxorei32_v_u64m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei32_v_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i32.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei32_v_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamoxorei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) {
+ return vamoxorei32_v_u64m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei32_v_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i32.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei32_v_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamoxorei32_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) {
+ return vamoxorei32_v_u64m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei32_v_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i32.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei32_v_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamoxorei32_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) {
+ return vamoxorei32_v_u64m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei64_v_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei64_v_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vamoxorei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) {
+ return vamoxorei64_v_u64m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei64_v_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei64_v_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vamoxorei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) {
+ return vamoxorei64_v_u64m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei64_v_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei64_v_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vamoxorei64_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) {
+ return vamoxorei64_v_u64m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vamoxorei64_v_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vamoxorei64_v_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vamoxorei64_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) {
+ return vamoxorei64_v_u64m8_m(mask, base, bindex, value, vl);
+}
+
diff --git a/clang/utils/TableGen/RISCVVEmitter.cpp b/clang/utils/TableGen/RISCVVEmitter.cpp
index 594e95434e29..dc8b3594f886 100644
--- a/clang/utils/TableGen/RISCVVEmitter.cpp
+++ b/clang/utils/TableGen/RISCVVEmitter.cpp
@@ -136,7 +136,8 @@ enum RISCVExtension : uint8_t {
Basic = 0,
F = 1 << 1,
D = 1 << 2,
- Zfh = 1 << 3
+ Zfh = 1 << 3,
+ Zvamo = 1 << 4,
};
// TODO refactor RVVIntrinsic class design after support all intrinsic
@@ -171,7 +172,8 @@ public:
bool HasMaskedOffOperand, bool HasVL, bool HasNoMaskedOverloaded,
bool HasAutoDef, StringRef ManualCodegen, const RVVTypes &Types,
const std::vector<int64_t> &IntrinsicTypes,
- const std::vector<int64_t> &PermuteOperands);
+ const std::vector<int64_t> &PermuteOperands,
+ StringRef RequiredExtension);
~RVVIntrinsic() = default;
StringRef getName() const { return Name; }
@@ -749,7 +751,8 @@ RVVIntrinsic::RVVIntrinsic(StringRef NewName, StringRef Suffix,
bool HasNoMaskedOverloaded, bool HasAutoDef,
StringRef ManualCodegen, const RVVTypes &OutInTypes,
const std::vector<int64_t> &NewIntrinsicTypes,
- const std::vector<int64_t> &PermuteOperands)
+ const std::vector<int64_t> &PermuteOperands,
+ StringRef RequiredExtension)
: IRName(IRName), HasSideEffects(HasSideEffects), IsMask(IsMask),
HasMaskedOffOperand(HasMaskedOffOperand), HasVL(HasVL),
HasNoMaskedOverloaded(HasNoMaskedOverloaded), HasAutoDef(HasAutoDef),
@@ -775,6 +778,8 @@ RVVIntrinsic::RVVIntrinsic(StringRef NewName, StringRef Suffix,
else if (T->isFloatVector(64))
RISCVExtensions |= RISCVExtension::D;
}
+ if (RequiredExtension == "Zvamo")
+ RISCVExtensions |= RISCVExtension::Zvamo;
// Init OutputType and InputTypes
OutputType = OutInTypes[0];
@@ -1108,6 +1113,7 @@ void RVVEmitter::createRVVIntrinsics(
R->getValueAsListOfInts("IntrinsicTypes");
std::vector<int64_t> PermuteOperands =
R->getValueAsListOfInts("PermuteOperands");
+ StringRef RequiredExtension = R->getValueAsString("RequiredExtension");
StringRef IRName = R->getValueAsString("IRName");
StringRef IRNameMask = R->getValueAsString("IRNameMask");
@@ -1152,7 +1158,7 @@ void RVVEmitter::createRVVIntrinsics(
Name, SuffixStr, MangledName, IRName, HasSideEffects,
/*IsMask=*/false, /*HasMaskedOffOperand=*/false, HasVL,
HasNoMaskedOverloaded, HasAutoDef, ManualCodegen, Types.getValue(),
- IntrinsicTypes, PermuteOperands));
+ IntrinsicTypes, PermuteOperands, RequiredExtension));
if (HasMask) {
// Create a mask intrinsic
Optional<RVVTypes> MaskTypes =
@@ -1161,7 +1167,8 @@ void RVVEmitter::createRVVIntrinsics(
Name, SuffixStr, MangledName, IRNameMask, HasSideEffects,
/*IsMask=*/true, HasMaskedOffOperand, HasVL,
HasNoMaskedOverloaded, HasAutoDef, ManualCodegenMask,
- MaskTypes.getValue(), IntrinsicTypes, PermuteOperands));
+ MaskTypes.getValue(), IntrinsicTypes, PermuteOperands,
+ RequiredExtension));
}
} // end for Log2LMULList
} // end for TypeRange
@@ -1234,6 +1241,8 @@ bool RVVEmitter::emitExtDefStr(uint8_t Extents, raw_ostream &OS) {
OS << LS << "defined(__riscv_d)";
if (Extents & RISCVExtension::Zfh)
OS << LS << "defined(__riscv_zfh)";
+ if (Extents & RISCVExtension::Zvamo)
+ OS << LS << "defined(__riscv_zvamo)";
OS << "\n";
return true;
}