summaryrefslogtreecommitdiff
path: root/gcc/testsuite/gcc.target/riscv
diff options
context:
space:
mode:
Diffstat (limited to 'gcc/testsuite/gcc.target/riscv')
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-100.c4
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-101.c4
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-102.c4
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-103.c28
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-104.c16
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-105.c4
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-106.c4
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-107.c4
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-108.c4
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-109.c28
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-110.c16
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-111.c4
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-112.c4
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-113.c4
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-114.c4
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-115.c16
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-116.c4
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-117.c4
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-118.c4
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-119.c4
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-122.c4
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-97.c28
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-98.c16
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/base/merge_constraint-1.c8
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/base/narrow_constraint-6.c164
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/base/narrow_constraint-7.c166
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/base/narrow_constraint-8.c124
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/base/narrow_constraint-9.c124
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/base/vxrm-2.c12
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/base/vxrm-3.c12
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/base/vxrm-4.c11
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/base/vxrm-5.c11
32 files changed, 445 insertions, 399 deletions
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-100.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-100.c
index 667a7656ce1..c7267fcbfce 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-100.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-100.c
@@ -6,8 +6,8 @@ void f (void * in, void *out, int64_t x, int n)
{
vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
- vint64m1_t v3 = __riscv_vsadd_vx_i64m1 (v2, 0xAAAAAAAAAAAAAAAA, 4);
- vint64m1_t v4 = __riscv_vsadd_vx_i64m1_tu (v3, v2, 0xAAAAAAAAAAAAAAAA, 4);
+ vint64m1_t v3 = __riscv_vsadd_vx_i64m1 (v2, 0xAAAAAAAAAAAAAAAA, 0,4);
+ vint64m1_t v4 = __riscv_vsadd_vx_i64m1_tu (v3, v2, 0xAAAAAAAAAAAAAAAA, 0,4);
__riscv_vse64_v_i64m1 (out + 2, v4, 4);
}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-101.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-101.c
index eefdf455bca..3694a95ed24 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-101.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-101.c
@@ -6,8 +6,8 @@ void f (void * in, void *out, int64_t x, int n)
{
vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
- vint64m1_t v3 = __riscv_vsadd_vx_i64m1 (v2, x, 4);
- vint64m1_t v4 = __riscv_vsadd_vx_i64m1_tu (v3, v2, x, 4);
+ vint64m1_t v3 = __riscv_vsadd_vx_i64m1 (v2, x, 0,4);
+ vint64m1_t v4 = __riscv_vsadd_vx_i64m1_tu (v3, v2, x, 0,4);
__riscv_vse64_v_i64m1 (out + 2, v4, 4);
}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-102.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-102.c
index 4b24b971cba..b65f9935692 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-102.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-102.c
@@ -7,8 +7,8 @@ void f (void * in, void *out, int32_t x, int n)
for (int i = 0; i < n; i++) {
vint64m1_t v = __riscv_vle64_v_i64m1 (in + i + 1, 4);
vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + i + 2, 4);
- vint64m1_t v3 = __riscv_vsadd_vx_i64m1 (v2, x, 4);
- vint64m1_t v4 = __riscv_vsadd_vx_i64m1_tu (v3, v2, x, 4);
+ vint64m1_t v3 = __riscv_vsadd_vx_i64m1 (v2, x, 0,4);
+ vint64m1_t v4 = __riscv_vsadd_vx_i64m1_tu (v3, v2, x, 0,4);
__riscv_vse64_v_i64m1 (out + i + 2, v4, 4);
}
}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-103.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-103.c
index 7ffedd5ceb9..fb401bd1978 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-103.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-103.c
@@ -15,8 +15,8 @@ void f0 (void * in, void *out, int64_t x, int n)
{
vuint64m1_t v = __riscv_vle64_v_u64m1 (in + 1, 4);
vuint64m1_t v2 = __riscv_vle64_v_u64m1_tu (v, in + 2, 4);
- vuint64m1_t v3 = __riscv_vsaddu_vx_u64m1 (v2, -16, 4);
- vuint64m1_t v4 = __riscv_vsaddu_vx_u64m1 (v3, -16, 4);
+ vuint64m1_t v3 = __riscv_vsaddu_vx_u64m1 (v2, -16, 0,4);
+ vuint64m1_t v4 = __riscv_vsaddu_vx_u64m1 (v3, -16, 0,4);
__riscv_vse64_v_u64m1 (out + 2, v4, 4);
}
@@ -32,8 +32,8 @@ void f1 (void * in, void *out, int64_t x, int n)
{
vuint64m1_t v = __riscv_vle64_v_u64m1 (in + 1, 4);
vuint64m1_t v2 = __riscv_vle64_v_u64m1_tu (v, in + 2, 4);
- vuint64m1_t v3 = __riscv_vsaddu_vx_u64m1 (v2, 15, 4);
- vuint64m1_t v4 = __riscv_vsaddu_vx_u64m1 (v3, 15, 4);
+ vuint64m1_t v3 = __riscv_vsaddu_vx_u64m1 (v2, 15, 0,4);
+ vuint64m1_t v4 = __riscv_vsaddu_vx_u64m1 (v3, 15, 0,4);
__riscv_vse64_v_u64m1 (out + 2, v4, 4);
}
@@ -49,8 +49,8 @@ void f2 (void * in, void *out, int64_t x, int n)
{
vuint64m1_t v = __riscv_vle64_v_u64m1 (in + 1, 4);
vuint64m1_t v2 = __riscv_vle64_v_u64m1_tu (v, in + 2, 4);
- vuint64m1_t v3 = __riscv_vsaddu_vx_u64m1 (v2, 16, 4);
- vuint64m1_t v4 = __riscv_vsaddu_vx_u64m1 (v3, 16, 4);
+ vuint64m1_t v3 = __riscv_vsaddu_vx_u64m1 (v2, 16, 0,4);
+ vuint64m1_t v4 = __riscv_vsaddu_vx_u64m1 (v3, 16, 0,4);
__riscv_vse64_v_u64m1 (out + 2, v4, 4);
}
@@ -66,8 +66,8 @@ void f3 (void * in, void *out, int64_t x, int n)
{
vuint64m1_t v = __riscv_vle64_v_u64m1 (in + 1, 4);
vuint64m1_t v2 = __riscv_vle64_v_u64m1_tu (v, in + 2, 4);
- vuint64m1_t v3 = __riscv_vsaddu_vx_u64m1 (v2, 0xAAAAAAAA, 4);
- vuint64m1_t v4 = __riscv_vsaddu_vx_u64m1 (v3, 0xAAAAAAAA, 4);
+ vuint64m1_t v3 = __riscv_vsaddu_vx_u64m1 (v2, 0xAAAAAAAA, 0,4);
+ vuint64m1_t v4 = __riscv_vsaddu_vx_u64m1 (v3, 0xAAAAAAAA, 0,4);
__riscv_vse64_v_u64m1 (out + 2, v4, 4);
}
@@ -83,8 +83,8 @@ void f4 (void * in, void *out, int64_t x, int n)
{
vuint64m1_t v = __riscv_vle64_v_u64m1 (in + 1, 4);
vuint64m1_t v2 = __riscv_vle64_v_u64m1_tu (v, in + 2, 4);
- vuint64m1_t v3 = __riscv_vsaddu_vx_u64m1 (v2, 0xAAAAAAAAAAAAAAAA, 4);
- vuint64m1_t v4 = __riscv_vsaddu_vx_u64m1 (v3, 0xAAAAAAAAAAAAAAAA, 4);
+ vuint64m1_t v3 = __riscv_vsaddu_vx_u64m1 (v2, 0xAAAAAAAAAAAAAAAA, 0,4);
+ vuint64m1_t v4 = __riscv_vsaddu_vx_u64m1 (v3, 0xAAAAAAAAAAAAAAAA, 0,4);
__riscv_vse64_v_u64m1 (out + 2, v4, 4);
}
@@ -100,8 +100,8 @@ void f5 (void * in, void *out, int64_t x, int n)
{
vuint64m1_t v = __riscv_vle64_v_u64m1 (in + 1, 4);
vuint64m1_t v2 = __riscv_vle64_v_u64m1_tu (v, in + 2, 4);
- vuint64m1_t v3 = __riscv_vsaddu_vx_u64m1 (v2, 0xAAAAAAAAAAAAAAAA, 4);
- vuint64m1_t v4 = __riscv_vsaddu_vx_u64m1 (v3, 0xAAAAAAAAAAAAAAAA, 4);
+ vuint64m1_t v3 = __riscv_vsaddu_vx_u64m1 (v2, 0xAAAAAAAAAAAAAAAA, 0,4);
+ vuint64m1_t v4 = __riscv_vsaddu_vx_u64m1 (v3, 0xAAAAAAAAAAAAAAAA, 0,4);
__riscv_vse64_v_u64m1 (out + 2, v4, 4);
}
@@ -117,7 +117,7 @@ void f6 (void * in, void *out, int64_t x, int n)
{
vuint64m1_t v = __riscv_vle64_v_u64m1 (in + 1, 4);
vuint64m1_t v2 = __riscv_vle64_v_u64m1_tu (v, in + 2, 4);
- vuint64m1_t v3 = __riscv_vsaddu_vx_u64m1 (v2, x, 4);
- vuint64m1_t v4 = __riscv_vsaddu_vx_u64m1 (v3, x, 4);
+ vuint64m1_t v3 = __riscv_vsaddu_vx_u64m1 (v2, x, 0,4);
+ vuint64m1_t v4 = __riscv_vsaddu_vx_u64m1 (v3, x, 0,4);
__riscv_vse64_v_u64m1 (out + 2, v4, 4);
}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-104.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-104.c
index 612213a6036..408359f29c4 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-104.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-104.c
@@ -15,8 +15,8 @@ void f0 (void * in, void *out, int64_t x, int n)
{
vuint64m1_t v = __riscv_vle64_v_u64m1 (in + 1, 4);
vuint64m1_t v2 = __riscv_vle64_v_u64m1_tu (v, in + 2, 4);
- vuint64m1_t v3 = __riscv_vsaddu_vx_u64m1 (v2, -16, 4);
- vuint64m1_t v4 = __riscv_vsaddu_vx_u64m1 (v3, -16, 4);
+ vuint64m1_t v3 = __riscv_vsaddu_vx_u64m1 (v2, -16, 0,4);
+ vuint64m1_t v4 = __riscv_vsaddu_vx_u64m1 (v3, -16, 0,4);
__riscv_vse64_v_u64m1 (out + 2, v4, 4);
}
@@ -32,8 +32,8 @@ void f1 (void * in, void *out, int64_t x, int n)
{
vuint64m1_t v = __riscv_vle64_v_u64m1 (in + 1, 4);
vuint64m1_t v2 = __riscv_vle64_v_u64m1_tu (v, in + 2, 4);
- vuint64m1_t v3 = __riscv_vsaddu_vx_u64m1 (v2, 15, 4);
- vuint64m1_t v4 = __riscv_vsaddu_vx_u64m1 (v3, 15, 4);
+ vuint64m1_t v3 = __riscv_vsaddu_vx_u64m1 (v2, 15, 0,4);
+ vuint64m1_t v4 = __riscv_vsaddu_vx_u64m1 (v3, 15, 0,4);
__riscv_vse64_v_u64m1 (out + 2, v4, 4);
}
@@ -49,8 +49,8 @@ void f2 (void * in, void *out, int64_t x, int n)
{
vuint64m1_t v = __riscv_vle64_v_u64m1 (in + 1, 4);
vuint64m1_t v2 = __riscv_vle64_v_u64m1_tu (v, in + 2, 4);
- vuint64m1_t v3 = __riscv_vsaddu_vx_u64m1 (v2, 16, 4);
- vuint64m1_t v4 = __riscv_vsaddu_vx_u64m1 (v3, 16, 4);
+ vuint64m1_t v3 = __riscv_vsaddu_vx_u64m1 (v2, 16, 0,4);
+ vuint64m1_t v4 = __riscv_vsaddu_vx_u64m1 (v3, 16, 0,4);
__riscv_vse64_v_u64m1 (out + 2, v4, 4);
}
@@ -66,7 +66,7 @@ void f3 (void * in, void *out, int64_t x, int n)
{
vuint64m1_t v = __riscv_vle64_v_u64m1 (in + 1, 4);
vuint64m1_t v2 = __riscv_vle64_v_u64m1_tu (v, in + 2, 4);
- vuint64m1_t v3 = __riscv_vsaddu_vx_u64m1 (v2, 0xAAAAAAA, 4);
- vuint64m1_t v4 = __riscv_vsaddu_vx_u64m1 (v3, 0xAAAAAAA, 4);
+ vuint64m1_t v3 = __riscv_vsaddu_vx_u64m1 (v2, 0xAAAAAAA, 0,4);
+ vuint64m1_t v4 = __riscv_vsaddu_vx_u64m1 (v3, 0xAAAAAAA, 0,4);
__riscv_vse64_v_u64m1 (out + 2, v4, 4);
}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-105.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-105.c
index 86825c088b1..c6d3981fd83 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-105.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-105.c
@@ -6,8 +6,8 @@ void f (void * in, void *out, int64_t x, int n)
{
vuint64m1_t v = __riscv_vle64_v_u64m1 (in + 1, 4);
vuint64m1_t v2 = __riscv_vle64_v_u64m1_tu (v, in + 2, 4);
- vuint64m1_t v3 = __riscv_vsaddu_vx_u64m1 (v2, 0xAAAAAAAA, 4);
- vuint64m1_t v4 = __riscv_vsaddu_vx_u64m1_tu (v3, v2, 0xAAAAAAAA, 4);
+ vuint64m1_t v3 = __riscv_vsaddu_vx_u64m1 (v2, 0xAAAAAAAA, 0, 4);
+ vuint64m1_t v4 = __riscv_vsaddu_vx_u64m1_tu (v3, v2, 0xAAAAAAAA, 0, 4);
__riscv_vse64_v_u64m1 (out + 2, v4, 4);
}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-106.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-106.c
index 94bff68ba5e..963a1579504 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-106.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-106.c
@@ -6,8 +6,8 @@ void f (void * in, void *out, int64_t x, int n)
{
vuint64m1_t v = __riscv_vle64_v_u64m1 (in + 1, 4);
vuint64m1_t v2 = __riscv_vle64_v_u64m1_tu (v, in + 2, 4);
- vuint64m1_t v3 = __riscv_vsaddu_vx_u64m1 (v2, 0xAAAAAAAAAAAAAAAA, 4);
- vuint64m1_t v4 = __riscv_vsaddu_vx_u64m1_tu (v3, v2, 0xAAAAAAAAAAAAAAAA, 4);
+ vuint64m1_t v3 = __riscv_vsaddu_vx_u64m1 (v2, 0xAAAAAAAAAAAAAAAA, 0, 4);
+ vuint64m1_t v4 = __riscv_vsaddu_vx_u64m1_tu (v3, v2, 0xAAAAAAAAAAAAAAAA, 0, 4);
__riscv_vse64_v_u64m1 (out + 2, v4, 4);
}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-107.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-107.c
index a3d08de06c4..b04e03b5894 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-107.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-107.c
@@ -6,8 +6,8 @@ void f (void * in, void *out, int64_t x, int n)
{
vuint64m1_t v = __riscv_vle64_v_u64m1 (in + 1, 4);
vuint64m1_t v2 = __riscv_vle64_v_u64m1_tu (v, in + 2, 4);
- vuint64m1_t v3 = __riscv_vsaddu_vx_u64m1 (v2, x, 4);
- vuint64m1_t v4 = __riscv_vsaddu_vx_u64m1_tu (v3, v2, x, 4);
+ vuint64m1_t v3 = __riscv_vsaddu_vx_u64m1 (v2, x, 0, 4);
+ vuint64m1_t v4 = __riscv_vsaddu_vx_u64m1_tu (v3, v2, x, 0, 4);
__riscv_vse64_v_u64m1 (out + 2, v4, 4);
}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-108.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-108.c
index 99acc51b4ff..3df45b64480 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-108.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-108.c
@@ -7,8 +7,8 @@ void f (void * in, void *out, int32_t x, int n)
for (int i = 0; i < n; i++) {
vuint64m1_t v = __riscv_vle64_v_u64m1 (in + i + 1, 4);
vuint64m1_t v2 = __riscv_vle64_v_u64m1_tu (v, in + i + 2, 4);
- vuint64m1_t v3 = __riscv_vsaddu_vx_u64m1 (v2, x, 4);
- vuint64m1_t v4 = __riscv_vsaddu_vx_u64m1_tu (v3, v2, x, 4);
+ vuint64m1_t v3 = __riscv_vsaddu_vx_u64m1 (v2, x, 0, 4);
+ vuint64m1_t v4 = __riscv_vsaddu_vx_u64m1_tu (v3, v2, x, 0, 4);
__riscv_vse64_v_u64m1 (out + i + 2, v4, 4);
}
}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-109.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-109.c
index 9127b869f53..11b6b1862af 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-109.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-109.c
@@ -15,8 +15,8 @@ void f0 (void * in, void *out, int64_t x, int n)
{
vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
- vint64m1_t v3 = __riscv_vssub_vx_i64m1 (v2, -15, 4);
- vint64m1_t v4 = __riscv_vssub_vx_i64m1 (v3, -15, 4);
+ vint64m1_t v3 = __riscv_vssub_vx_i64m1 (v2, -15, 0, 4);
+ vint64m1_t v4 = __riscv_vssub_vx_i64m1 (v3, -15, 0, 4);
__riscv_vse64_v_i64m1 (out + 2, v4, 4);
}
@@ -32,8 +32,8 @@ void f1 (void * in, void *out, int64_t x, int n)
{
vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
- vint64m1_t v3 = __riscv_vssub_vx_i64m1 (v2, 16, 4);
- vint64m1_t v4 = __riscv_vssub_vx_i64m1 (v3, 16, 4);
+ vint64m1_t v3 = __riscv_vssub_vx_i64m1 (v2, 16, 0, 4);
+ vint64m1_t v4 = __riscv_vssub_vx_i64m1 (v3, 16, 0, 4);
__riscv_vse64_v_i64m1 (out + 2, v4, 4);
}
@@ -49,8 +49,8 @@ void f2 (void * in, void *out, int64_t x, int n)
{
vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
- vint64m1_t v3 = __riscv_vssub_vx_i64m1 (v2, 17, 4);
- vint64m1_t v4 = __riscv_vssub_vx_i64m1 (v3, 17, 4);
+ vint64m1_t v3 = __riscv_vssub_vx_i64m1 (v2, 17, 0, 4);
+ vint64m1_t v4 = __riscv_vssub_vx_i64m1 (v3, 17, 0, 4);
__riscv_vse64_v_i64m1 (out + 2, v4, 4);
}
@@ -66,8 +66,8 @@ void f3 (void * in, void *out, int64_t x, int n)
{
vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
- vint64m1_t v3 = __riscv_vssub_vx_i64m1 (v2, 0xAAAAAAAA, 4);
- vint64m1_t v4 = __riscv_vssub_vx_i64m1 (v3, 0xAAAAAAAA, 4);
+ vint64m1_t v3 = __riscv_vssub_vx_i64m1 (v2, 0xAAAAAAAA, 0, 4);
+ vint64m1_t v4 = __riscv_vssub_vx_i64m1 (v3, 0xAAAAAAAA, 0, 4);
__riscv_vse64_v_i64m1 (out + 2, v4, 4);
}
@@ -83,8 +83,8 @@ void f4 (void * in, void *out, int64_t x, int n)
{
vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
- vint64m1_t v3 = __riscv_vssub_vx_i64m1 (v2, 0xAAAAAAAAAAAAAAAA, 4);
- vint64m1_t v4 = __riscv_vssub_vx_i64m1 (v3, 0xAAAAAAAAAAAAAAAA, 4);
+ vint64m1_t v3 = __riscv_vssub_vx_i64m1 (v2, 0xAAAAAAAAAAAAAAAA, 0, 4);
+ vint64m1_t v4 = __riscv_vssub_vx_i64m1 (v3, 0xAAAAAAAAAAAAAAAA, 0, 4);
__riscv_vse64_v_i64m1 (out + 2, v4, 4);
}
@@ -100,8 +100,8 @@ void f5 (void * in, void *out, int64_t x, int n)
{
vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
- vint64m1_t v3 = __riscv_vssub_vx_i64m1 (v2, 0xAAAAAAAAAAAAAAAA, 4);
- vint64m1_t v4 = __riscv_vssub_vx_i64m1 (v3, 0xAAAAAAAAAAAAAAAA, 4);
+ vint64m1_t v3 = __riscv_vssub_vx_i64m1 (v2, 0xAAAAAAAAAAAAAAAA, 0, 4);
+ vint64m1_t v4 = __riscv_vssub_vx_i64m1 (v3, 0xAAAAAAAAAAAAAAAA, 0, 4);
__riscv_vse64_v_i64m1 (out + 2, v4, 4);
}
@@ -117,7 +117,7 @@ void f6 (void * in, void *out, int64_t x, int n)
{
vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
- vint64m1_t v3 = __riscv_vssub_vx_i64m1 (v2, x, 4);
- vint64m1_t v4 = __riscv_vssub_vx_i64m1 (v3, x, 4);
+ vint64m1_t v3 = __riscv_vssub_vx_i64m1 (v2, x, 0, 4);
+ vint64m1_t v4 = __riscv_vssub_vx_i64m1 (v3, x, 0, 4);
__riscv_vse64_v_i64m1 (out + 2, v4, 4);
}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-110.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-110.c
index d70789e1810..11b6f724359 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-110.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-110.c
@@ -15,8 +15,8 @@ void f0 (void * in, void *out, int64_t x, int n)
{
vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
- vint64m1_t v3 = __riscv_vssub_vx_i64m1 (v2, -15, 4);
- vint64m1_t v4 = __riscv_vssub_vx_i64m1 (v3, -15, 4);
+ vint64m1_t v3 = __riscv_vssub_vx_i64m1 (v2, -15, 0, 4);
+ vint64m1_t v4 = __riscv_vssub_vx_i64m1 (v3, -15, 0, 4);
__riscv_vse64_v_i64m1 (out + 2, v4, 4);
}
@@ -32,8 +32,8 @@ void f1 (void * in, void *out, int64_t x, int n)
{
vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
- vint64m1_t v3 = __riscv_vssub_vx_i64m1 (v2, 16, 4);
- vint64m1_t v4 = __riscv_vssub_vx_i64m1 (v3, 16, 4);
+ vint64m1_t v3 = __riscv_vssub_vx_i64m1 (v2, 16, 0, 4);
+ vint64m1_t v4 = __riscv_vssub_vx_i64m1 (v3, 16, 0, 4);
__riscv_vse64_v_i64m1 (out + 2, v4, 4);
}
@@ -49,8 +49,8 @@ void f2 (void * in, void *out, int64_t x, int n)
{
vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
- vint64m1_t v3 = __riscv_vssub_vx_i64m1 (v2, 17, 4);
- vint64m1_t v4 = __riscv_vssub_vx_i64m1 (v3, 17, 4);
+ vint64m1_t v3 = __riscv_vssub_vx_i64m1 (v2, 17, 0, 4);
+ vint64m1_t v4 = __riscv_vssub_vx_i64m1 (v3, 17, 0, 4);
__riscv_vse64_v_i64m1 (out + 2, v4, 4);
}
@@ -66,7 +66,7 @@ void f3 (void * in, void *out, int64_t x, int n)
{
vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
- vint64m1_t v3 = __riscv_vssub_vx_i64m1 (v2, 0xAAAAAAA, 4);
- vint64m1_t v4 = __riscv_vssub_vx_i64m1 (v3, 0xAAAAAAA, 4);
+ vint64m1_t v3 = __riscv_vssub_vx_i64m1 (v2, 0xAAAAAAA, 0, 4);
+ vint64m1_t v4 = __riscv_vssub_vx_i64m1 (v3, 0xAAAAAAA, 0, 4);
__riscv_vse64_v_i64m1 (out + 2, v4, 4);
}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-111.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-111.c
index e02b21554a8..bd1fd27ad93 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-111.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-111.c
@@ -6,8 +6,8 @@ void f (void * in, void *out, int64_t x, int n)
{
vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
- vint64m1_t v3 = __riscv_vssub_vx_i64m1 (v2, 0xAAAAAAAA, 4);
- vint64m1_t v4 = __riscv_vssub_vx_i64m1_tu (v3, v2, 0xAAAAAAAA, 4);
+ vint64m1_t v3 = __riscv_vssub_vx_i64m1 (v2, 0xAAAAAAAA, 0, 4);
+ vint64m1_t v4 = __riscv_vssub_vx_i64m1_tu (v3, v2, 0xAAAAAAAA, 0, 4);
__riscv_vse64_v_i64m1 (out + 2, v4, 4);
}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-112.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-112.c
index 8cd9c4d09ba..6696d4c1867 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-112.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-112.c
@@ -6,8 +6,8 @@ void f (void * in, void *out, int64_t x, int n)
{
vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
- vint64m1_t v3 = __riscv_vssub_vx_i64m1 (v2, 0xAAAAAAAAAAAAAAAA, 4);
- vint64m1_t v4 = __riscv_vssub_vx_i64m1_tu (v3, v2, 0xAAAAAAAAAAAAAAAA, 4);
+ vint64m1_t v3 = __riscv_vssub_vx_i64m1 (v2, 0xAAAAAAAAAAAAAAAA, 0, 4);
+ vint64m1_t v4 = __riscv_vssub_vx_i64m1_tu (v3, v2, 0xAAAAAAAAAAAAAAAA, 0, 4);
__riscv_vse64_v_i64m1 (out + 2, v4, 4);
}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-113.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-113.c
index 6090a1da69d..80bba2ae796 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-113.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-113.c
@@ -6,8 +6,8 @@ void f (void * in, void *out, int64_t x, int n)
{
vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
- vint64m1_t v3 = __riscv_vssub_vx_i64m1 (v2, x, 4);
- vint64m1_t v4 = __riscv_vssub_vx_i64m1_tu (v3, v2, x, 4);
+ vint64m1_t v3 = __riscv_vssub_vx_i64m1 (v2, x, 0, 4);
+ vint64m1_t v4 = __riscv_vssub_vx_i64m1_tu (v3, v2, x, 0, 4);
__riscv_vse64_v_i64m1 (out + 2, v4, 4);
}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-114.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-114.c
index d595c446503..ecbb3e3d386 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-114.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-114.c
@@ -7,8 +7,8 @@ void f (void * in, void *out, int32_t x, int n)
for (int i = 0; i < n; i++) {
vint64m1_t v = __riscv_vle64_v_i64m1 (in + i + 1, 4);
vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + i + 2, 4);
- vint64m1_t v3 = __riscv_vssub_vx_i64m1 (v2, x, 4);
- vint64m1_t v4 = __riscv_vssub_vx_i64m1_tu (v3, v2, x, 4);
+ vint64m1_t v3 = __riscv_vssub_vx_i64m1 (v2, x, 0, 4);
+ vint64m1_t v4 = __riscv_vssub_vx_i64m1_tu (v3, v2, x, 0, 4);
__riscv_vse64_v_i64m1 (out + i + 2, v4, 4);
}
}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-115.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-115.c
index 9722f5e6118..d7ec2688e64 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-115.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-115.c
@@ -15,8 +15,8 @@ void f0 (void * in, void *out, uint64_t x, int n)
{
vuint64m1_t v = __riscv_vle64_v_u64m1 (in + 1, 4);
vuint64m1_t v2 = __riscv_vle64_v_u64m1_tu (v, in + 2, 4);
- vuint64m1_t v3 = __riscv_vssubu_vx_u64m1 (v2, -16, 4);
- vuint64m1_t v4 = __riscv_vssubu_vx_u64m1 (v3, -16, 4);
+ vuint64m1_t v3 = __riscv_vssubu_vx_u64m1 (v2, -16, 0, 4);
+ vuint64m1_t v4 = __riscv_vssubu_vx_u64m1 (v3, -16, 0, 4);
__riscv_vse64_v_u64m1 (out + 2, v4, 4);
}
@@ -32,8 +32,8 @@ void f1 (void * in, void *out, uint64_t x, int n)
{
vuint64m1_t v = __riscv_vle64_v_u64m1 (in + 1, 4);
vuint64m1_t v2 = __riscv_vle64_v_u64m1_tu (v, in + 2, 4);
- vuint64m1_t v3 = __riscv_vssubu_vx_u64m1 (v2, 15, 4);
- vuint64m1_t v4 = __riscv_vssubu_vx_u64m1 (v3, 15, 4);
+ vuint64m1_t v3 = __riscv_vssubu_vx_u64m1 (v2, 15, 0, 4);
+ vuint64m1_t v4 = __riscv_vssubu_vx_u64m1 (v3, 15, 0, 4);
__riscv_vse64_v_u64m1 (out + 2, v4, 4);
}
@@ -49,8 +49,8 @@ void f2 (void * in, void *out, uint64_t x, int n)
{
vuint64m1_t v = __riscv_vle64_v_u64m1 (in + 1, 4);
vuint64m1_t v2 = __riscv_vle64_v_u64m1_tu (v, in + 2, 4);
- vuint64m1_t v3 = __riscv_vssubu_vx_u64m1 (v2, 16, 4);
- vuint64m1_t v4 = __riscv_vssubu_vx_u64m1 (v3, 16, 4);
+ vuint64m1_t v3 = __riscv_vssubu_vx_u64m1 (v2, 16, 0, 4);
+ vuint64m1_t v4 = __riscv_vssubu_vx_u64m1 (v3, 16, 0, 4);
__riscv_vse64_v_u64m1 (out + 2, v4, 4);
}
@@ -66,7 +66,7 @@ void f3 (void * in, void *out, uint64_t x, int n)
{
vuint64m1_t v = __riscv_vle64_v_u64m1 (in + 1, 4);
vuint64m1_t v2 = __riscv_vle64_v_u64m1_tu (v, in + 2, 4);
- vuint64m1_t v3 = __riscv_vssubu_vx_u64m1 (v2, 0xAAAAAAA, 4);
- vuint64m1_t v4 = __riscv_vssubu_vx_u64m1 (v3, 0xAAAAAAA, 4);
+ vuint64m1_t v3 = __riscv_vssubu_vx_u64m1 (v2, 0xAAAAAAA, 0, 4);
+ vuint64m1_t v4 = __riscv_vssubu_vx_u64m1 (v3, 0xAAAAAAA, 0, 4);
__riscv_vse64_v_u64m1 (out + 2, v4, 4);
}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-116.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-116.c
index 066365dc744..ae6a40540e6 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-116.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-116.c
@@ -6,8 +6,8 @@ void f (void * in, void *out, uint64_t x, int n)
{
vuint64m1_t v = __riscv_vle64_v_u64m1 (in + 1, 4);
vuint64m1_t v2 = __riscv_vle64_v_u64m1_tu (v, in + 2, 4);
- vuint64m1_t v3 = __riscv_vssubu_vx_u64m1 (v2, 0xAAAAAAAA, 4);
- vuint64m1_t v4 = __riscv_vssubu_vx_u64m1_tu (v3, v2, 0xAAAAAAAA, 4);
+ vuint64m1_t v3 = __riscv_vssubu_vx_u64m1 (v2, 0xAAAAAAAA, 0, 4);
+ vuint64m1_t v4 = __riscv_vssubu_vx_u64m1_tu (v3, v2, 0xAAAAAAAA, 0, 4);
__riscv_vse64_v_u64m1 (out + 2, v4, 4);
}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-117.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-117.c
index bfc6773f198..60461f86a28 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-117.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-117.c
@@ -6,8 +6,8 @@ void f (void * in, void *out, uint64_t x, int n)
{
vuint64m1_t v = __riscv_vle64_v_u64m1 (in + 1, 4);
vuint64m1_t v2 = __riscv_vle64_v_u64m1_tu (v, in + 2, 4);
- vuint64m1_t v3 = __riscv_vssubu_vx_u64m1 (v2, 0xAAAAAAAAAAAAAAAA, 4);
- vuint64m1_t v4 = __riscv_vssubu_vx_u64m1_tu (v3, v2, 0xAAAAAAAAAAAAAAAA, 4);
+ vuint64m1_t v3 = __riscv_vssubu_vx_u64m1 (v2, 0xAAAAAAAAAAAAAAAA, 0, 4);
+ vuint64m1_t v4 = __riscv_vssubu_vx_u64m1_tu (v3, v2, 0xAAAAAAAAAAAAAAAA, 0, 4);
__riscv_vse64_v_u64m1 (out + 2, v4, 4);
}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-118.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-118.c
index 05a7a1d9e65..088d797dc9f 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-118.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-118.c
@@ -6,8 +6,8 @@ void f (void * in, void *out, uint64_t x, int n)
{
vuint64m1_t v = __riscv_vle64_v_u64m1 (in + 1, 4);
vuint64m1_t v2 = __riscv_vle64_v_u64m1_tu (v, in + 2, 4);
- vuint64m1_t v3 = __riscv_vssubu_vx_u64m1 (v2, x, 4);
- vuint64m1_t v4 = __riscv_vssubu_vx_u64m1_tu (v3, v2, x, 4);
+ vuint64m1_t v3 = __riscv_vssubu_vx_u64m1 (v2, x, 0, 4);
+ vuint64m1_t v4 = __riscv_vssubu_vx_u64m1_tu (v3, v2, x, 0, 4);
__riscv_vse64_v_u64m1 (out + 2, v4, 4);
}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-119.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-119.c
index 0b51175f66c..7d00b8499ef 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-119.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-119.c
@@ -7,8 +7,8 @@ void f (void * in, void *out, uint64_t x, int n)
for (int i = 0; i < n; i++) {
vuint64m1_t v = __riscv_vle64_v_u64m1 (in + i + 1, 4);
vuint64m1_t v2 = __riscv_vle64_v_u64m1_tu (v, in + i + 2, 4);
- vuint64m1_t v3 = __riscv_vssubu_vx_u64m1 (v2, x, 4);
- vuint64m1_t v4 = __riscv_vssubu_vx_u64m1_tu (v3, v2, x, 4);
+ vuint64m1_t v3 = __riscv_vssubu_vx_u64m1 (v2, x, 0, 4);
+ vuint64m1_t v4 = __riscv_vssubu_vx_u64m1_tu (v3, v2, x, 0, 4);
__riscv_vse64_v_u64m1 (out + i + 2, v4, 4);
}
}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-122.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-122.c
index c5fd4701dec..495f8cad3b8 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-122.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-122.c
@@ -6,7 +6,7 @@ void f1 (void * in, void *out, int32_t x)
{
vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
vint32m1_t v2 = __riscv_vle32_v_i32m1_tu (v, in, 4);
- vint32m1_t v3 = __riscv_vaadd_vx_i32m1 (v2, 0, 4);
+ vint32m1_t v3 = __riscv_vaadd_vx_i32m1 (v2, 0, 0, 4);
__riscv_vse32_v_i32m1 (out, v3, 4);
}
@@ -14,7 +14,7 @@ void f2 (void * in, void *out, int32_t x)
{
vint64m1_t v = __riscv_vle64_v_i64m1 (in, 4);
vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in, 4);
- vint64m1_t v3 = __riscv_vaadd_vx_i64m1 (v2, 0, 4);
+ vint64m1_t v3 = __riscv_vaadd_vx_i64m1 (v2, 0, 0, 4);
__riscv_vse64_v_i64m1 (out, v3, 4);
}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-97.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-97.c
index d1283d89a93..c13ed674c67 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-97.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-97.c
@@ -15,8 +15,8 @@ void f0 (void * in, void *out, int64_t x, int n)
{
vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
- vint64m1_t v3 = __riscv_vsadd_vx_i64m1 (v2, -16, 4);
- vint64m1_t v4 = __riscv_vsadd_vx_i64m1 (v3, -16, 4);
+ vint64m1_t v3 = __riscv_vsadd_vx_i64m1 (v2, -16, 0,4);
+ vint64m1_t v4 = __riscv_vsadd_vx_i64m1 (v3, -16, 0,4);
__riscv_vse64_v_i64m1 (out + 2, v4, 4);
}
@@ -32,8 +32,8 @@ void f1 (void * in, void *out, int64_t x, int n)
{
vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
- vint64m1_t v3 = __riscv_vsadd_vx_i64m1 (v2, 15, 4);
- vint64m1_t v4 = __riscv_vsadd_vx_i64m1 (v3, 15, 4);
+ vint64m1_t v3 = __riscv_vsadd_vx_i64m1 (v2, 15, 0,4);
+ vint64m1_t v4 = __riscv_vsadd_vx_i64m1 (v3, 15, 0,4);
__riscv_vse64_v_i64m1 (out + 2, v4, 4);
}
@@ -49,8 +49,8 @@ void f2 (void * in, void *out, int64_t x, int n)
{
vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
- vint64m1_t v3 = __riscv_vsadd_vx_i64m1 (v2, 16, 4);
- vint64m1_t v4 = __riscv_vsadd_vx_i64m1 (v3, 16, 4);
+ vint64m1_t v3 = __riscv_vsadd_vx_i64m1 (v2, 16, 0,4);
+ vint64m1_t v4 = __riscv_vsadd_vx_i64m1 (v3, 16, 0,4);
__riscv_vse64_v_i64m1 (out + 2, v4, 4);
}
@@ -66,8 +66,8 @@ void f3 (void * in, void *out, int64_t x, int n)
{
vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
- vint64m1_t v3 = __riscv_vsadd_vx_i64m1 (v2, 0xAAAAAAAA, 4);
- vint64m1_t v4 = __riscv_vsadd_vx_i64m1 (v3, 0xAAAAAAAA, 4);
+ vint64m1_t v3 = __riscv_vsadd_vx_i64m1 (v2, 0xAAAAAAAA, 0,4);
+ vint64m1_t v4 = __riscv_vsadd_vx_i64m1 (v3, 0xAAAAAAAA, 0,4);
__riscv_vse64_v_i64m1 (out + 2, v4, 4);
}
@@ -83,8 +83,8 @@ void f4 (void * in, void *out, int64_t x, int n)
{
vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
- vint64m1_t v3 = __riscv_vsadd_vx_i64m1 (v2, 0xAAAAAAAAAAAAAAAA, 4);
- vint64m1_t v4 = __riscv_vsadd_vx_i64m1 (v3, 0xAAAAAAAAAAAAAAAA, 4);
+ vint64m1_t v3 = __riscv_vsadd_vx_i64m1 (v2, 0xAAAAAAAAAAAAAAAA, 0,4);
+ vint64m1_t v4 = __riscv_vsadd_vx_i64m1 (v3, 0xAAAAAAAAAAAAAAAA, 0,4);
__riscv_vse64_v_i64m1 (out + 2, v4, 4);
}
@@ -100,8 +100,8 @@ void f5 (void * in, void *out, int64_t x, int n)
{
vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
- vint64m1_t v3 = __riscv_vsadd_vx_i64m1 (v2, 0xAAAAAAAAAAAAAAAA, 4);
- vint64m1_t v4 = __riscv_vsadd_vx_i64m1 (v3, 0xAAAAAAAAAAAAAAAA, 4);
+ vint64m1_t v3 = __riscv_vsadd_vx_i64m1 (v2, 0xAAAAAAAAAAAAAAAA, 0,4);
+ vint64m1_t v4 = __riscv_vsadd_vx_i64m1 (v3, 0xAAAAAAAAAAAAAAAA, 0,4);
__riscv_vse64_v_i64m1 (out + 2, v4, 4);
}
@@ -117,7 +117,7 @@ void f6 (void * in, void *out, int64_t x, int n)
{
vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
- vint64m1_t v3 = __riscv_vsadd_vx_i64m1 (v2, x, 4);
- vint64m1_t v4 = __riscv_vsadd_vx_i64m1 (v3, x, 4);
+ vint64m1_t v3 = __riscv_vsadd_vx_i64m1 (v2, x, 0,4);
+ vint64m1_t v4 = __riscv_vsadd_vx_i64m1 (v3, x, 0,4);
__riscv_vse64_v_i64m1 (out + 2, v4, 4);
}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-98.c b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-98.c
index ed7477b3ca8..328d1d62a70 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-98.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-98.c
@@ -15,8 +15,8 @@ void f0 (void * in, void *out, int64_t x, int n)
{
vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
- vint64m1_t v3 = __riscv_vsadd_vx_i64m1 (v2, -16, 4);
- vint64m1_t v4 = __riscv_vsadd_vx_i64m1 (v3, -16, 4);
+ vint64m1_t v3 = __riscv_vsadd_vx_i64m1 (v2, -16, 0,4);
+ vint64m1_t v4 = __riscv_vsadd_vx_i64m1 (v3, -16, 0,4);
__riscv_vse64_v_i64m1 (out + 2, v4, 4);
}
@@ -32,8 +32,8 @@ void f1 (void * in, void *out, int64_t x, int n)
{
vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
- vint64m1_t v3 = __riscv_vsadd_vx_i64m1 (v2, 15, 4);
- vint64m1_t v4 = __riscv_vsadd_vx_i64m1 (v3, 15, 4);
+ vint64m1_t v3 = __riscv_vsadd_vx_i64m1 (v2, 15, 0,4);
+ vint64m1_t v4 = __riscv_vsadd_vx_i64m1 (v3, 15, 0,4);
__riscv_vse64_v_i64m1 (out + 2, v4, 4);
}
@@ -49,8 +49,8 @@ void f2 (void * in, void *out, int64_t x, int n)
{
vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
- vint64m1_t v3 = __riscv_vsadd_vx_i64m1 (v2, 16, 4);
- vint64m1_t v4 = __riscv_vsadd_vx_i64m1 (v3, 16, 4);
+ vint64m1_t v3 = __riscv_vsadd_vx_i64m1 (v2, 16, 0,4);
+ vint64m1_t v4 = __riscv_vsadd_vx_i64m1 (v3, 16, 0,4);
__riscv_vse64_v_i64m1 (out + 2, v4, 4);
}
@@ -66,7 +66,7 @@ void f3 (void * in, void *out, int64_t x, int n)
{
vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4);
vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4);
- vint64m1_t v3 = __riscv_vsadd_vx_i64m1 (v2, 0xAAAAAAA, 4);
- vint64m1_t v4 = __riscv_vsadd_vx_i64m1 (v3, 0xAAAAAAA, 4);
+ vint64m1_t v3 = __riscv_vsadd_vx_i64m1 (v2, 0xAAAAAAA, 0,4);
+ vint64m1_t v4 = __riscv_vsadd_vx_i64m1 (v3, 0xAAAAAAA, 0,4);
__riscv_vse64_v_i64m1 (out + 2, v4, 4);
}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/merge_constraint-1.c b/gcc/testsuite/gcc.target/riscv/rvv/base/merge_constraint-1.c
index 380d67fab53..1b418b82f36 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/base/merge_constraint-1.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/merge_constraint-1.c
@@ -165,7 +165,7 @@ void f16 (int8_t* base1,int8_t* base2,int8_t* out,int n)
{
vint8mf4_t v = __riscv_vle8_v_i8mf4 (base1, 32);
for (int i = 0; i < n; i++){
- v = __riscv_vsadd_vv_i8mf4 (v, v, 32);
+ v = __riscv_vsadd_vv_i8mf4 (v, v, 0, 32);
v = __riscv_vle8_v_i8mf4_tu (v, base2, 32);
}
__riscv_vse8_v_i8mf4 (out, v, 32);
@@ -175,7 +175,7 @@ void f17 (int8_t* base1,int8_t* base2,int8_t* out,int n)
{
vint8mf4_t v = __riscv_vle8_v_i8mf4 (base1, 32);
for (int i = 0; i < n; i++){
- v = __riscv_vsadd_vx_i8mf4 (v, 100, 32);
+ v = __riscv_vsadd_vx_i8mf4 (v, 100, 0, 32);
v = __riscv_vle8_v_i8mf4_tu (v, base2, 32);
}
__riscv_vse8_v_i8mf4 (out, v, 32);
@@ -185,7 +185,7 @@ void f18 (int8_t* base1,int8_t* base2,int8_t* out,int n)
{
vint8mf4_t v = __riscv_vle8_v_i8mf4 (base1, 32);
for (int i = 0; i < n; i++){
- v = __riscv_vaadd_vv_i8mf4 (v, v, 32);
+ v = __riscv_vaadd_vv_i8mf4 (v, v, 0, 32);
v = __riscv_vle8_v_i8mf4_tu (v, base2, 32);
}
__riscv_vse8_v_i8mf4 (out, v, 32);
@@ -195,7 +195,7 @@ void f19 (int8_t* base1,int8_t* base2,int8_t* out,int n)
{
vint8mf4_t v = __riscv_vle8_v_i8mf4 (base1, 32);
for (int i = 0; i < n; i++){
- v = __riscv_vaadd_vx_i8mf4 (v, 100, 32);
+ v = __riscv_vaadd_vx_i8mf4 (v, 100, 0, 32);
v = __riscv_vle8_v_i8mf4_tu (v, base2, 32);
}
__riscv_vse8_v_i8mf4 (out, v, 32);
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/narrow_constraint-6.c b/gcc/testsuite/gcc.target/riscv/rvv/base/narrow_constraint-6.c
index fd7ffd3c97b..9659b25a6c3 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/base/narrow_constraint-6.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/narrow_constraint-6.c
@@ -6,24 +6,24 @@
void f0 (uint16_t *base,uint8_t *out,size_t vl, size_t shift)
{
vuint16mf4_t src = __riscv_vle16_v_u16mf4 (base, vl);
- vuint8mf8_t v = __riscv_vnclipu_wx_u8mf8(src,shift,vl);
- v = __riscv_vnclipu_wv_u8mf8(src,v,vl);
+ vuint8mf8_t v = __riscv_vnclipu_wx_u8mf8(src,shift,0,vl);
+ v = __riscv_vnclipu_wv_u8mf8(src,v,0,vl);
__riscv_vse8_v_u8mf8 (out,v,vl);
}
void f1 (uint16_t *base,uint8_t *out,size_t vl, size_t shift)
{
vuint16mf4_t src = __riscv_vle16_v_u16mf4 (base, vl);
- vuint8mf8_t v = __riscv_vnclipu_wx_u8mf8(src,shift,vl);
- v = __riscv_vnclipu_wv_u8mf8(src,v,vl);
+ vuint8mf8_t v = __riscv_vnclipu_wx_u8mf8(src,shift,0,vl);
+ v = __riscv_vnclipu_wv_u8mf8(src,v,0,vl);
__riscv_vse8_v_u8mf8 (out,v,vl);
}
void f2 (void *base,void *out,size_t vl, size_t shift)
{
vuint16mf4_t src = __riscv_vle16_v_u16mf4 (base, vl);
- vuint8mf8_t v = __riscv_vnclipu_wx_u8mf8(src,shift,vl);
- v = __riscv_vnclipu_wv_u8mf8_tu(v,src,v,vl);
+ vuint8mf8_t v = __riscv_vnclipu_wx_u8mf8(src,shift,0,vl);
+ v = __riscv_vnclipu_wv_u8mf8_tu(v,src,v,0,vl);
__riscv_vse8_v_u8mf8 (out,v,vl);
__riscv_vse16_v_u16mf4 (out+100,src,vl);
}
@@ -31,8 +31,8 @@ void f2 (void *base,void *out,size_t vl, size_t shift)
void f3 (void *base,void *out,size_t vl, size_t shift)
{
vuint16mf4_t src = __riscv_vle16_v_u16mf4 (base, vl);
- vuint8mf8_t v = __riscv_vnclipu_wx_u8mf8(src,shift,vl);
- v = __riscv_vnclipu_wv_u8mf8(src,v,vl);
+ vuint8mf8_t v = __riscv_vnclipu_wx_u8mf8(src,shift,0,vl);
+ v = __riscv_vnclipu_wv_u8mf8(src,v,0,vl);
__riscv_vse8_v_u8mf8 (out,v,vl);
__riscv_vse16_v_u16mf4 (out+100,src,vl);
}
@@ -41,8 +41,8 @@ void f4 (void *base,void *out,size_t vl, size_t shift)
{
vbool64_t m = __riscv_vlm_v_b64 (base + 500, vl);
vuint16mf4_t src = __riscv_vle16_v_u16mf4 (base, vl);
- vuint8mf8_t v = __riscv_vnclipu_wx_u8mf8(src,shift,vl);
- v = __riscv_vnclipu_wv_u8mf8_tumu(m,v,src,v,vl);
+ vuint8mf8_t v = __riscv_vnclipu_wx_u8mf8(src,shift,0,vl);
+ v = __riscv_vnclipu_wv_u8mf8_tumu(m,v,src,v,0,vl);
__riscv_vse8_v_u8mf8 (out,v,vl);
__riscv_vse16_v_u16mf4 (out+100,src,vl);
}
@@ -51,8 +51,8 @@ void f5 (void *base,void *out,size_t vl, size_t shift)
{
vbool64_t m = __riscv_vlm_v_b64 (base + 500, vl);
vuint16mf4_t src = __riscv_vle16_v_u16mf4 (base, vl);
- vuint8mf8_t v = __riscv_vnclipu_wx_u8mf8(src,shift,vl);
- v = __riscv_vnclipu_wv_u8mf8_m(m,src,v,vl);
+ vuint8mf8_t v = __riscv_vnclipu_wx_u8mf8(src,shift,0,vl);
+ v = __riscv_vnclipu_wv_u8mf8_m(m,src,v,0,vl);
__riscv_vse8_v_u8mf8 (out,v,vl);
__riscv_vse16_v_u16mf4 (out+100,src,vl);
}
@@ -62,7 +62,7 @@ void f6 (void *base,void *out,size_t vl, size_t shift)
vbool64_t m = __riscv_vlm_v_b64 (base + 500, vl);
vuint8mf8_t v = __riscv_vle8_v_u8mf8 (base + 600, vl);
vuint16mf4_t src = __riscv_vle16_v_u16mf4 (base, vl);
- vuint8mf8_t v2 = __riscv_vnclipu_wv_u8mf8_m(m,src,v,vl);
+ vuint8mf8_t v2 = __riscv_vnclipu_wv_u8mf8_m(m,src,v,0,vl);
__riscv_vse8_v_u8mf8 (out,v2,vl);
__riscv_vse8_v_u8mf8 (out+100,v,vl);
}
@@ -71,8 +71,8 @@ void f7 (void *base,void *out,size_t vl, size_t shift)
{
vuint8mf8_t v = __riscv_vle8_v_u8mf8 (base + 600, vl);
vuint16mf4_t src = __riscv_vle16_v_u16mf4 (base, vl);
- vuint8mf8_t v2 = __riscv_vnclipu_wx_u8mf8(src,shift,vl);
- v2 = __riscv_vnclipu_wv_u8mf8 (src,v,vl);
+ vuint8mf8_t v2 = __riscv_vnclipu_wx_u8mf8(src,shift,0,vl);
+ v2 = __riscv_vnclipu_wv_u8mf8 (src,v,0,vl);
__riscv_vse8_v_u8mf8 (out,v2,vl);
__riscv_vse8_v_u8mf8 (out+100,v,vl);
}
@@ -81,8 +81,8 @@ void f8 (void *base,void *out,size_t vl, size_t shift)
{
vuint8mf8_t v = __riscv_vle8_v_u8mf8 (base + 600, vl);
vuint16mf4_t src = __riscv_vle16_v_u16mf4 (base, vl);
- vuint8mf8_t v2 = __riscv_vnclipu_wx_u8mf8(src,shift,vl);
- v2 = __riscv_vnclipu_wv_u8mf8 (src,v,vl);
+ vuint8mf8_t v2 = __riscv_vnclipu_wx_u8mf8(src,shift,0,vl);
+ v2 = __riscv_vnclipu_wv_u8mf8 (src,v,0,vl);
__riscv_vse8_v_u8mf8 (out,v2,vl);
__riscv_vse8_v_u8mf8 (out+100,v,vl);
__riscv_vse16_v_u16mf4 (out+200,src,vl);
@@ -92,8 +92,8 @@ void f9 (void *base,void *out,size_t vl, size_t shift)
{
vuint8mf8_t v = __riscv_vle8_v_u8mf8 (base + 600, vl);
vuint16mf4_t src = __riscv_vle16_v_u16mf4 (base, vl);
- vuint8mf8_t v2 = __riscv_vnclipu_wx_u8mf8(src,shift,vl);
- v2 = __riscv_vnclipu_wv_u8mf8_tu (v2,src,v,vl);
+ vuint8mf8_t v2 = __riscv_vnclipu_wx_u8mf8(src,shift,0,vl);
+ v2 = __riscv_vnclipu_wv_u8mf8_tu (v2,src,v,0,vl);
__riscv_vse8_v_u8mf8 (out,v2,vl);
__riscv_vse8_v_u8mf8 (out+100,v,vl);
__riscv_vse16_v_u16mf4 (out+200,src,vl);
@@ -102,11 +102,11 @@ void f9 (void *base,void *out,size_t vl, size_t shift)
void f10 (void *base,void *out,size_t vl, size_t shift)
{
vuint16mf4_t src = __riscv_vle16_v_u16mf4 (base, vl);
- vuint8mf8_t v = __riscv_vnclipu_wx_u8mf8(src,shift,vl);
- v = __riscv_vnclipu_wv_u8mf8_tu(v,src,v,vl);
- v = __riscv_vnclipu_wv_u8mf8_tu(v,src,v,vl);
- v = __riscv_vnclipu_wv_u8mf8_tu(v,src,v,vl);
- v = __riscv_vnclipu_wv_u8mf8_tu(v,src,v,vl);
+ vuint8mf8_t v = __riscv_vnclipu_wx_u8mf8(src,shift,0,vl);
+ v = __riscv_vnclipu_wv_u8mf8_tu(v,src,v,0,vl);
+ v = __riscv_vnclipu_wv_u8mf8_tu(v,src,v,0,vl);
+ v = __riscv_vnclipu_wv_u8mf8_tu(v,src,v,0,vl);
+ v = __riscv_vnclipu_wv_u8mf8_tu(v,src,v,0,vl);
__riscv_vse8_v_u8mf8 (out,v,vl);
__riscv_vse16_v_u16mf4 (out+100,src,vl);
}
@@ -115,12 +115,12 @@ void f11 (void *base,void *out,size_t vl, size_t shift)
{
vuint8mf8_t v = __riscv_vle8_v_u8mf8 (base + 600, vl);
vuint16mf4_t src = __riscv_vle16_v_u16mf4 (base, vl);
- vuint8mf8_t v2 = __riscv_vnclipu_wx_u8mf8(src,shift,vl);
- v2 = __riscv_vnclipu_wv_u8mf8_tu (v2,src,v,vl);
- v2 = __riscv_vnclipu_wv_u8mf8_tu (v2,src,v,vl);
- v2 = __riscv_vnclipu_wv_u8mf8_tu (v2,src,v,vl);
- v2 = __riscv_vnclipu_wv_u8mf8_tu (v2,src,v,vl);
- v2 = __riscv_vnclipu_wv_u8mf8_tu (v2,src,v,vl);
+ vuint8mf8_t v2 = __riscv_vnclipu_wx_u8mf8(src,shift,0,vl);
+ v2 = __riscv_vnclipu_wv_u8mf8_tu (v2,src,v,0,vl);
+ v2 = __riscv_vnclipu_wv_u8mf8_tu (v2,src,v,0,vl);
+ v2 = __riscv_vnclipu_wv_u8mf8_tu (v2,src,v,0,vl);
+ v2 = __riscv_vnclipu_wv_u8mf8_tu (v2,src,v,0,vl);
+ v2 = __riscv_vnclipu_wv_u8mf8_tu (v2,src,v,0,vl);
__riscv_vse8_v_u8mf8 (out,v2,vl);
__riscv_vse8_v_u8mf8 (out+100,v,vl);
__riscv_vse16_v_u16mf4 (out+200,src,vl);
@@ -130,11 +130,11 @@ void f12 (void *base,void *out,size_t vl, size_t shift)
{
vuint8mf8_t v = __riscv_vle8_v_u8mf8 (base + 600, vl);
vuint16mf4_t src = __riscv_vle16_v_u16mf4 (base, vl);
- vuint8mf8_t v2 = __riscv_vnclipu_wx_u8mf8(src,shift,vl);
- v2 = __riscv_vnclipu_wv_u8mf8(src,v2,vl);
- v2 = __riscv_vnclipu_wv_u8mf8(src,v2,vl);
- v2 = __riscv_vnclipu_wv_u8mf8(src,v2,vl);
- v2 = __riscv_vnclipu_wv_u8mf8 (src,v2,vl);
+ vuint8mf8_t v2 = __riscv_vnclipu_wx_u8mf8(src,shift,0,vl);
+ v2 = __riscv_vnclipu_wv_u8mf8(src,v2,0,vl);
+ v2 = __riscv_vnclipu_wv_u8mf8(src,v2,0,vl);
+ v2 = __riscv_vnclipu_wv_u8mf8(src,v2,0,vl);
+ v2 = __riscv_vnclipu_wv_u8mf8 (src,v2,0,vl);
__riscv_vse8_v_u8mf8 (out,v2,vl);
__riscv_vse8_v_u8mf8 (out+100,v,vl);
}
@@ -144,8 +144,8 @@ void f13 (void *base,void *base2,void *out,size_t vl, int n)
vuint16mf4_t src = __riscv_vle16_v_u16mf4 (base + 100, vl);
for (int i = 0; i < n; i++){
vbool64_t m = __riscv_vlm_v_b64 (base + i, vl);
- vuint8mf8_t v = __riscv_vnclipu_wx_u8mf8_m(m,src,vl,vl);
- v = __riscv_vnclipu_wv_u8mf8_tu(v,src,v,vl);
+ vuint8mf8_t v = __riscv_vnclipu_wx_u8mf8_m(m,src,vl,0,vl);
+ v = __riscv_vnclipu_wv_u8mf8_tu(v,src,v,0,vl);
v = __riscv_vle8_v_u8mf8_tu (v, base2, vl);
__riscv_vse8_v_u8mf8 (out + 100*i,v,vl);
}
@@ -157,7 +157,7 @@ void f14 (void *base,void *base2,void *out,size_t vl, int n)
for (int i = 0; i < n; i++){
vbool64_t m = __riscv_vlm_v_b64 (base + i, vl);
vuint8mf8_t v = __riscv_vle8_v_u8mf8 (base + 600, vl);
- vuint8mf8_t v2 = __riscv_vnclipu_wv_u8mf8(src,v,vl);
+ vuint8mf8_t v2 = __riscv_vnclipu_wv_u8mf8(src,v,0,vl);
v = __riscv_vle8_v_u8mf8_tu (v, base2, vl);
__riscv_vse8_v_u8mf8 (out + 100*i,v,vl);
__riscv_vse8_v_u8mf8 (out + 100*i,v2,vl);
@@ -170,11 +170,11 @@ void f15 (void *base,void *base2,void *out,size_t vl, int n)
for (int i = 0; i < n; i++){
vbool64_t m = __riscv_vlm_v_b64 (base + i, vl);
vuint8mf8_t v = __riscv_vle8_v_u8mf8 (base + 600, vl);
- vuint8mf8_t v2 = __riscv_vnclipu_wv_u8mf8(src,v,vl);
- v = __riscv_vnclipu_wv_u8mf8(src,v,vl);
- v = __riscv_vnclipu_wv_u8mf8(src,v,vl);
- v = __riscv_vnclipu_wv_u8mf8(src,v,vl);
- v = __riscv_vnclipu_wv_u8mf8(src,v,vl);
+ vuint8mf8_t v2 = __riscv_vnclipu_wv_u8mf8(src,v,0,vl);
+ v = __riscv_vnclipu_wv_u8mf8(src,v,0,vl);
+ v = __riscv_vnclipu_wv_u8mf8(src,v,0,vl);
+ v = __riscv_vnclipu_wv_u8mf8(src,v,0,vl);
+ v = __riscv_vnclipu_wv_u8mf8(src,v,0,vl);
v = __riscv_vle8_v_u8mf8_tu (v, base2, vl);
__riscv_vse8_v_u8mf8 (out + 100*i,v,vl);
__riscv_vse8_v_u8mf8 (out + 100*i,v2,vl);
@@ -185,7 +185,7 @@ void f16 (uint16_t *base,uint8_t *out,size_t vl, size_t shift)
{
vuint16mf4_t src = __riscv_vle16_v_u16mf4 (base, vl);
vuint8mf8_t v = __riscv_vncvt_x_x_w_u8mf8(src,vl);
- vuint8mf8_t v3 = __riscv_vnclipu_wv_u8mf8(src,v,vl);
+ vuint8mf8_t v3 = __riscv_vnclipu_wv_u8mf8(src,v,0,vl);
__riscv_vse8_v_u8mf8 (out,v,vl);
__riscv_vse8_v_u8mf8 (out + 100,v3,vl);
}
@@ -195,7 +195,7 @@ void f17 (void *base,void *out,size_t vl, int n)
for (int i = 0; i < n; i++){
vuint16mf4_t src = __riscv_vle16_v_u16mf4 (base + 100*i, vl);
vuint8mf8_t src2 = __riscv_vle8_v_u8mf8 (base + 200*i, vl);
- vuint8mf8_t v = __riscv_vnclipu_wv_u8mf8(src,src2,vl);
+ vuint8mf8_t v = __riscv_vnclipu_wv_u8mf8(src,src2,0,vl);
vuint16mf4_t v2 = __riscv_vadd_vv_u16mf4 (src, src,vl);
asm volatile ("":::"memory");
__riscv_vse8_v_u8mf8 (out + 100*i,v,vl);
@@ -209,12 +209,12 @@ void f18 (void *base,void *out,size_t vl, int n)
vuint8mf8_t v = __riscv_vle8_v_u8mf8 ((base + 1000), vl);
for (int i = 0; i < n; i++){
vuint16mf4_t src = __riscv_vle16_v_u16mf4 (base + 100*i, vl);
- v = __riscv_vnclipu_wv_u8mf8_tu(v,src,v,vl);
- v = __riscv_vnclipu_wv_u8mf8_tu(v,src,v,vl);
- v = __riscv_vnclipu_wv_u8mf8_tu(v,src,v,vl);
- v = __riscv_vnclipu_wv_u8mf8_tu(v,src,v,vl);
- v = __riscv_vnclipu_wv_u8mf8_tu(v,src,v,vl);
- v = __riscv_vnclipu_wv_u8mf8_tu(v,src,v,vl);
+ v = __riscv_vnclipu_wv_u8mf8_tu(v,src,v,0,vl);
+ v = __riscv_vnclipu_wv_u8mf8_tu(v,src,v,0,vl);
+ v = __riscv_vnclipu_wv_u8mf8_tu(v,src,v,0,vl);
+ v = __riscv_vnclipu_wv_u8mf8_tu(v,src,v,0,vl);
+ v = __riscv_vnclipu_wv_u8mf8_tu(v,src,v,0,vl);
+ v = __riscv_vnclipu_wv_u8mf8_tu(v,src,v,0,vl);
__riscv_vse8_v_u8mf8 (out + 100*i,v,vl);
}
}
@@ -224,12 +224,12 @@ void f19 (void *base,void *out,size_t vl, int n)
vuint8mf8_t v = __riscv_vle8_v_u8mf8 ((base + 1000), vl);
for (int i = 0; i < n; i++){
vuint16mf4_t src = __riscv_vle16_v_u16mf4 (base + 100*i, vl);
- v = __riscv_vnclipu_wv_u8mf8(src,v,vl);
- vuint8mf8_t v2 = __riscv_vnclipu_wv_u8mf8(src,v,vl);
- v2 = __riscv_vnclipu_wv_u8mf8(src,v2,vl);
- v2 = __riscv_vnclipu_wv_u8mf8(src,v2,vl);
- v2 = __riscv_vnclipu_wv_u8mf8(src,v2,vl);
- v2 = __riscv_vnclipu_wv_u8mf8(src,v2,vl);
+ v = __riscv_vnclipu_wv_u8mf8(src,v,0,vl);
+ vuint8mf8_t v2 = __riscv_vnclipu_wv_u8mf8(src,v,0,vl);
+ v2 = __riscv_vnclipu_wv_u8mf8(src,v2,0,vl);
+ v2 = __riscv_vnclipu_wv_u8mf8(src,v2,0,vl);
+ v2 = __riscv_vnclipu_wv_u8mf8(src,v2,0,vl);
+ v2 = __riscv_vnclipu_wv_u8mf8(src,v2,0,vl);
__riscv_vse8_v_u8mf8 (out + 100*i,v,vl);
__riscv_vse8_v_u8mf8 (out + 200*i,v2,vl);
}
@@ -240,9 +240,9 @@ void f20 (void *base,void *out,size_t vl, int n)
vuint8mf8_t v = __riscv_vle8_v_u8mf8 ((base + 1000), vl);
for (int i = 0; i < n; i++){
vuint16mf4_t src = __riscv_vle16_v_u16mf4 (base + 100*i, vl);
- v = __riscv_vnclipu_wv_u8mf8(src,v,vl);
- vuint8mf8_t v2 = __riscv_vnclipu_wv_u8mf8(src,v,vl);
- v2 = __riscv_vnclipu_wv_u8mf8(src,v2,vl);
+ v = __riscv_vnclipu_wv_u8mf8(src,v,0,vl);
+ vuint8mf8_t v2 = __riscv_vnclipu_wv_u8mf8(src,v,0,vl);
+ v2 = __riscv_vnclipu_wv_u8mf8(src,v2,0,vl);
__riscv_vse8_v_u8mf8 (out + 100*i,v,vl);
__riscv_vse8_v_u8mf8 (out + 200*i,v2,vl);
}
@@ -253,12 +253,12 @@ void f21 (void *base,void *out,size_t vl, int n)
for (int i = 0; i < n; i++){
vuint8mf8_t v = __riscv_vle8_v_u8mf8 ((base + 1000 * i), vl);
vuint16mf4_t src = __riscv_vle16_v_u16mf4 (base + 100*i, vl);
- v = __riscv_vnclipu_wv_u8mf8_tu(v,src,v,vl);
- v = __riscv_vnclipu_wv_u8mf8_tu(v,src,v,vl);
- v = __riscv_vnclipu_wv_u8mf8_tu(v,src,v,vl);
- v = __riscv_vnclipu_wv_u8mf8_tu(v,src,v,vl);
- v = __riscv_vnclipu_wv_u8mf8_tu(v,src,v,vl);
- v = __riscv_vnclipu_wv_u8mf8_tu(v,src,v,vl);
+ v = __riscv_vnclipu_wv_u8mf8_tu(v,src,v,0,vl);
+ v = __riscv_vnclipu_wv_u8mf8_tu(v,src,v,0,vl);
+ v = __riscv_vnclipu_wv_u8mf8_tu(v,src,v,0,vl);
+ v = __riscv_vnclipu_wv_u8mf8_tu(v,src,v,0,vl);
+ v = __riscv_vnclipu_wv_u8mf8_tu(v,src,v,0,vl);
+ v = __riscv_vnclipu_wv_u8mf8_tu(v,src,v,0,vl);
__riscv_vse8_v_u8mf8 (out + 100*i,v,vl);
__riscv_vse16_v_u16mf4 (out + 200*i,src,vl);
}
@@ -274,12 +274,12 @@ void f22 (uint16_t *base,uint8_t *out,size_t vl, int n)
vuint16mf4_t src4 = __riscv_vle16_v_u16mf4 (base + 400*i, vl);
vuint16mf4_t src5 = __riscv_vle16_v_u16mf4 (base + 500*i, vl);
vuint16mf4_t src6 = __riscv_vle16_v_u16mf4 (base + 600*i, vl);
- v = __riscv_vnclipu_wv_u8mf8_tu(v,src1,v,vl);
- v = __riscv_vnclipu_wv_u8mf8_tu(v,src2,v,vl);
- v = __riscv_vnclipu_wv_u8mf8_tu(v,src3,v,vl);
- v = __riscv_vnclipu_wv_u8mf8_tu(v,src4,v,vl);
- v = __riscv_vnclipu_wv_u8mf8_tu(v,src5,v,vl);
- v = __riscv_vnclipu_wv_u8mf8_tu(v,src6,v,vl);
+ v = __riscv_vnclipu_wv_u8mf8_tu(v,src1,v,0,vl);
+ v = __riscv_vnclipu_wv_u8mf8_tu(v,src2,v,0,vl);
+ v = __riscv_vnclipu_wv_u8mf8_tu(v,src3,v,0,vl);
+ v = __riscv_vnclipu_wv_u8mf8_tu(v,src4,v,0,vl);
+ v = __riscv_vnclipu_wv_u8mf8_tu(v,src5,v,0,vl);
+ v = __riscv_vnclipu_wv_u8mf8_tu(v,src6,v,0,vl);
__riscv_vse8_v_u8mf8 (out + 100*i,v,vl);
}
}
@@ -295,12 +295,12 @@ void f23 (uint16_t *base,uint8_t *out,size_t vl, int n)
vuint16mf4_t src4 = __riscv_vle16_v_u16mf4 (base + 400*i, vl);
vuint16mf4_t src5 = __riscv_vle16_v_u16mf4 (base + 500*i, vl);
vuint16mf4_t src6 = __riscv_vle16_v_u16mf4 (base + 600*i, vl);
- v = __riscv_vnclipu_wv_u8mf8_tu(v,src1,v2,vl);
- v = __riscv_vnclipu_wv_u8mf8_tu(v,src2,v2,vl);
- v = __riscv_vnclipu_wv_u8mf8_tu(v,src3,v2,vl);
- v = __riscv_vnclipu_wv_u8mf8_tu(v,src4,v2,vl);
- v = __riscv_vnclipu_wv_u8mf8_tu(v,src5,v2,vl);
- v = __riscv_vnclipu_wv_u8mf8_tu(v,src6,v2,vl);
+ v = __riscv_vnclipu_wv_u8mf8_tu(v,src1,v2,0,vl);
+ v = __riscv_vnclipu_wv_u8mf8_tu(v,src2,v2,0,vl);
+ v = __riscv_vnclipu_wv_u8mf8_tu(v,src3,v2,0,vl);
+ v = __riscv_vnclipu_wv_u8mf8_tu(v,src4,v2,0,vl);
+ v = __riscv_vnclipu_wv_u8mf8_tu(v,src5,v2,0,vl);
+ v = __riscv_vnclipu_wv_u8mf8_tu(v,src6,v2,0,vl);
__riscv_vse8_v_u8mf8 (out + 100*i,v,vl);
}
}
@@ -312,9 +312,9 @@ void f24 (void *base,void *base2,void *out,size_t vl, int n)
vuint8mf8_t src3 = __riscv_vle8_v_u8mf8 (base + 300, vl);
for (int i = 0; i < n; i++){
vbool64_t m = __riscv_vlm_v_b64 (base + i, vl);
- vuint16mf4_t v = __riscv_vnclipu_wv_u16mf4_m(m,src,src2,vl);
+ vuint16mf4_t v = __riscv_vnclipu_wv_u16mf4_m(m,src,src2,0,vl);
vuint16mf4_t v2 = __riscv_vle16_v_u16mf4_tu (v, base2 + i, vl);
- vuint8mf8_t v3 = __riscv_vnclipu_wv_u8mf8_m(m,v2,src3,vl);
+ vuint8mf8_t v3 = __riscv_vnclipu_wv_u8mf8_m(m,v2,src3,0,vl);
__riscv_vse8_v_u8mf8 (out + 100*i,v3,vl);
}
}
@@ -328,7 +328,7 @@ void f25 (void *base,void *out,size_t vl, size_t shift)
"v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25",
"v26", "v27", "v28", "v29", "v30");
vuint8mf8_t v = __riscv_vle8_v_u8mf8 (base + 100, vl);
- vuint8mf8_t v2 = __riscv_vnclipu_wv_u8mf8(src,v,vl);
+ vuint8mf8_t v2 = __riscv_vnclipu_wv_u8mf8(src,v,0,vl);
asm volatile("#" ::
: "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9",
"v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17",
@@ -351,7 +351,7 @@ void f26 (void *base,void *out,size_t vl, size_t shift)
"v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25",
"v26", "v27", "v28", "v29", "v30");
vuint16mf4_t src = __riscv_vle16_v_u16mf4 (base, vl);
- vuint8mf8_t v2 = __riscv_vnclipu_wv_u8mf8(src,v,vl);
+ vuint8mf8_t v2 = __riscv_vnclipu_wv_u8mf8(src,v,0,vl);
asm volatile("#" ::
: "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9",
"v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17",
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/narrow_constraint-7.c b/gcc/testsuite/gcc.target/riscv/rvv/base/narrow_constraint-7.c
index 70ba7d7459e..a97a87e7925 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/base/narrow_constraint-7.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/narrow_constraint-7.c
@@ -6,24 +6,24 @@
void f0 (uint16_t *base,uint8_t *out,size_t vl, size_t shift)
{
vuint16m2_t src = __riscv_vle16_v_u16m2 (base, vl);
- vuint8m1_t v = __riscv_vnclipu_wx_u8m1(src,shift,vl);
- v = __riscv_vnclipu_wv_u8m1(src,v,vl);
+ vuint8m1_t v = __riscv_vnclipu_wx_u8m1(src,shift,0,vl);
+ v = __riscv_vnclipu_wv_u8m1(src,v,0,vl);
__riscv_vse8_v_u8m1 (out,v,vl);
}
void f1 (uint16_t *base,uint8_t *out,size_t vl, size_t shift)
{
vuint16m2_t src = __riscv_vle16_v_u16m2 (base, vl);
- vuint8m1_t v = __riscv_vnclipu_wx_u8m1(src,shift,vl);
- v = __riscv_vnclipu_wv_u8m1(src,v,vl);
+ vuint8m1_t v = __riscv_vnclipu_wx_u8m1(src,shift,0,vl);
+ v = __riscv_vnclipu_wv_u8m1(src,v,0,vl);
__riscv_vse8_v_u8m1 (out,v,vl);
}
void f2 (void *base,void *out,size_t vl, size_t shift)
{
vuint16m2_t src = __riscv_vle16_v_u16m2 (base, vl);
- vuint8m1_t v = __riscv_vnclipu_wx_u8m1(src,shift,vl);
- v = __riscv_vnclipu_wv_u8m1_tu(v,src,v,vl);
+ vuint8m1_t v = __riscv_vnclipu_wx_u8m1(src,shift,0,vl);
+ v = __riscv_vnclipu_wv_u8m1_tu(v,src,v,0,vl);
__riscv_vse8_v_u8m1 (out,v,vl);
__riscv_vse16_v_u16m2 (out+100,src,vl);
}
@@ -31,8 +31,8 @@ void f2 (void *base,void *out,size_t vl, size_t shift)
void f3 (void *base,void *out,size_t vl, size_t shift)
{
vuint16m2_t src = __riscv_vle16_v_u16m2 (base, vl);
- vuint8m1_t v = __riscv_vnclipu_wx_u8m1(src,shift,vl);
- v = __riscv_vnclipu_wv_u8m1(src,v,vl);
+ vuint8m1_t v = __riscv_vnclipu_wx_u8m1(src,shift,0,vl);
+ v = __riscv_vnclipu_wv_u8m1(src,v,0,vl);
__riscv_vse8_v_u8m1 (out,v,vl);
__riscv_vse16_v_u16m2 (out+100,src,vl);
}
@@ -41,8 +41,8 @@ void f4 (void *base,void *out,size_t vl, size_t shift)
{
vbool8_t m = __riscv_vlm_v_b8 (base + 500, vl);
vuint16m2_t src = __riscv_vle16_v_u16m2 (base, vl);
- vuint8m1_t v = __riscv_vnclipu_wx_u8m1(src,shift,vl);
- v = __riscv_vnclipu_wv_u8m1_tumu(m,v,src,v,vl);
+ vuint8m1_t v = __riscv_vnclipu_wx_u8m1(src,shift,0,vl);
+ v = __riscv_vnclipu_wv_u8m1_tumu(m,v,src,v,0,vl);
__riscv_vse8_v_u8m1 (out,v,vl);
__riscv_vse16_v_u16m2 (out+100,src,vl);
}
@@ -51,8 +51,8 @@ void f5 (void *base,void *out,size_t vl, size_t shift)
{
vbool8_t m = __riscv_vlm_v_b8 (base + 500, vl);
vuint16m2_t src = __riscv_vle16_v_u16m2 (base, vl);
- vuint8m1_t v = __riscv_vnclipu_wx_u8m1(src,shift,vl);
- v = __riscv_vnclipu_wv_u8m1_m(m,src,v,vl);
+ vuint8m1_t v = __riscv_vnclipu_wx_u8m1(src,shift,0,vl);
+ v = __riscv_vnclipu_wv_u8m1_m(m,src,v,0,vl);
__riscv_vse8_v_u8m1 (out,v,vl);
__riscv_vse16_v_u16m2 (out+100,src,vl);
}
@@ -62,7 +62,7 @@ void f6 (void *base,void *out,size_t vl, size_t shift)
vbool8_t m = __riscv_vlm_v_b8 (base + 500, vl);
vuint8m1_t v = __riscv_vle8_v_u8m1 (base + 600, vl);
vuint16m2_t src = __riscv_vle16_v_u16m2 (base, vl);
- vuint8m1_t v2 = __riscv_vnclipu_wv_u8m1_m(m,src,v,vl);
+ vuint8m1_t v2 = __riscv_vnclipu_wv_u8m1_m(m,src,v,0,vl);
__riscv_vse8_v_u8m1 (out,v2,vl);
__riscv_vse8_v_u8m1 (out+100,v,vl);
}
@@ -71,8 +71,8 @@ void f7 (void *base,void *out,size_t vl, size_t shift)
{
vuint8m1_t v = __riscv_vle8_v_u8m1 (base + 600, vl);
vuint16m2_t src = __riscv_vle16_v_u16m2 (base, vl);
- vuint8m1_t v2 = __riscv_vnclipu_wx_u8m1(src,shift,vl);
- v2 = __riscv_vnclipu_wv_u8m1 (src,v,vl);
+ vuint8m1_t v2 = __riscv_vnclipu_wx_u8m1(src,shift,0,vl);
+ v2 = __riscv_vnclipu_wv_u8m1 (src,v,0,vl);
__riscv_vse8_v_u8m1 (out,v2,vl);
__riscv_vse8_v_u8m1 (out+100,v,vl);
}
@@ -81,8 +81,8 @@ void f8 (void *base,void *out,size_t vl, size_t shift)
{
vuint8m1_t v = __riscv_vle8_v_u8m1 (base + 600, vl);
vuint16m2_t src = __riscv_vle16_v_u16m2 (base, vl);
- vuint8m1_t v2 = __riscv_vnclipu_wx_u8m1(src,shift,vl);
- v2 = __riscv_vnclipu_wv_u8m1 (src,v,vl);
+ vuint8m1_t v2 = __riscv_vnclipu_wx_u8m1(src,shift,0,vl);
+ v2 = __riscv_vnclipu_wv_u8m1 (src,v,0,vl);
__riscv_vse8_v_u8m1 (out,v2,vl);
__riscv_vse8_v_u8m1 (out+100,v,vl);
__riscv_vse16_v_u16m2 (out+200,src,vl);
@@ -92,8 +92,8 @@ void f9 (void *base,void *out,size_t vl, size_t shift)
{
vuint8m1_t v = __riscv_vle8_v_u8m1 (base + 600, vl);
vuint16m2_t src = __riscv_vle16_v_u16m2 (base, vl);
- vuint8m1_t v2 = __riscv_vnclipu_wx_u8m1(src,shift,vl);
- v2 = __riscv_vnclipu_wv_u8m1_tu (v2,src,v,vl);
+ vuint8m1_t v2 = __riscv_vnclipu_wx_u8m1(src,shift,0,vl);
+ v2 = __riscv_vnclipu_wv_u8m1_tu (v2,src,v,0,vl);
__riscv_vse8_v_u8m1 (out,v2,vl);
__riscv_vse8_v_u8m1 (out+100,v,vl);
__riscv_vse16_v_u16m2 (out+200,src,vl);
@@ -102,11 +102,11 @@ void f9 (void *base,void *out,size_t vl, size_t shift)
void f10 (void *base,void *out,size_t vl, size_t shift)
{
vuint16m2_t src = __riscv_vle16_v_u16m2 (base, vl);
- vuint8m1_t v = __riscv_vnclipu_wx_u8m1(src,shift,vl);
- v = __riscv_vnclipu_wv_u8m1_tu(v,src,v,vl);
- v = __riscv_vnclipu_wv_u8m1_tu(v,src,v,vl);
- v = __riscv_vnclipu_wv_u8m1_tu(v,src,v,vl);
- v = __riscv_vnclipu_wv_u8m1_tu(v,src,v,vl);
+ vuint8m1_t v = __riscv_vnclipu_wx_u8m1(src,shift,0,vl);
+ v = __riscv_vnclipu_wv_u8m1_tu(v,src,v,0,vl);
+ v = __riscv_vnclipu_wv_u8m1_tu(v,src,v,0,vl);
+ v = __riscv_vnclipu_wv_u8m1_tu(v,src,v,0,vl);
+ v = __riscv_vnclipu_wv_u8m1_tu(v,src,v,0,vl);
__riscv_vse8_v_u8m1 (out,v,vl);
__riscv_vse16_v_u16m2 (out+100,src,vl);
}
@@ -115,12 +115,12 @@ void f11 (void *base,void *out,size_t vl, size_t shift)
{
vuint8m1_t v = __riscv_vle8_v_u8m1 (base + 600, vl);
vuint16m2_t src = __riscv_vle16_v_u16m2 (base, vl);
- vuint8m1_t v2 = __riscv_vnclipu_wx_u8m1(src,shift,vl);
- v2 = __riscv_vnclipu_wv_u8m1_tu (v2,src,v,vl);
- v2 = __riscv_vnclipu_wv_u8m1_tu (v2,src,v,vl);
- v2 = __riscv_vnclipu_wv_u8m1_tu (v2,src,v,vl);
- v2 = __riscv_vnclipu_wv_u8m1_tu (v2,src,v,vl);
- v2 = __riscv_vnclipu_wv_u8m1_tu (v2,src,v,vl);
+ vuint8m1_t v2 = __riscv_vnclipu_wx_u8m1(src,shift,0,vl);
+ v2 = __riscv_vnclipu_wv_u8m1_tu (v2,src,v,0,vl);
+ v2 = __riscv_vnclipu_wv_u8m1_tu (v2,src,v,0,vl);
+ v2 = __riscv_vnclipu_wv_u8m1_tu (v2,src,v,0,vl);
+ v2 = __riscv_vnclipu_wv_u8m1_tu (v2,src,v,0,vl);
+ v2 = __riscv_vnclipu_wv_u8m1_tu (v2,src,v,0,vl);
__riscv_vse8_v_u8m1 (out,v2,vl);
__riscv_vse8_v_u8m1 (out+100,v,vl);
__riscv_vse16_v_u16m2 (out+200,src,vl);
@@ -130,11 +130,11 @@ void f12 (void *base,void *out,size_t vl, size_t shift)
{
vuint8m1_t v = __riscv_vle8_v_u8m1 (base + 600, vl);
vuint16m2_t src = __riscv_vle16_v_u16m2 (base, vl);
- vuint8m1_t v2 = __riscv_vnclipu_wx_u8m1(src,shift,vl);
- v2 = __riscv_vnclipu_wv_u8m1(src,v2,vl);
- v2 = __riscv_vnclipu_wv_u8m1(src,v2,vl);
- v2 = __riscv_vnclipu_wv_u8m1(src,v2,vl);
- v2 = __riscv_vnclipu_wv_u8m1 (src,v2,vl);
+ vuint8m1_t v2 = __riscv_vnclipu_wx_u8m1(src,shift,0,vl);
+ v2 = __riscv_vnclipu_wv_u8m1(src,v2,0,vl);
+ v2 = __riscv_vnclipu_wv_u8m1(src,v2,0,vl);
+ v2 = __riscv_vnclipu_wv_u8m1(src,v2,0,vl);
+ v2 = __riscv_vnclipu_wv_u8m1 (src,v2,0,vl);
__riscv_vse8_v_u8m1 (out,v2,vl);
__riscv_vse8_v_u8m1 (out+100,v,vl);
}
@@ -144,8 +144,8 @@ void f13 (void *base,void *base2,void *out,size_t vl, int n)
vuint16m2_t src = __riscv_vle16_v_u16m2 (base + 100, vl);
for (int i = 0; i < n; i++){
vbool8_t m = __riscv_vlm_v_b8 (base + i, vl);
- vuint8m1_t v = __riscv_vnclipu_wx_u8m1_m(m,src,vl,vl);
- v = __riscv_vnclipu_wv_u8m1_tu(v,src,v,vl);
+ vuint8m1_t v = __riscv_vnclipu_wx_u8m1_m(m,src,vl,0,vl);
+ v = __riscv_vnclipu_wv_u8m1_tu(v,src,v,0,vl);
v = __riscv_vle8_v_u8m1_tu (v, base2, vl);
__riscv_vse8_v_u8m1 (out + 100*i,v,vl);
}
@@ -157,7 +157,7 @@ void f14 (void *base,void *base2,void *out,size_t vl, int n)
for (int i = 0; i < n; i++){
vbool8_t m = __riscv_vlm_v_b8 (base + i, vl);
vuint8m1_t v = __riscv_vle8_v_u8m1 (base + 600, vl);
- vuint8m1_t v2 = __riscv_vnclipu_wv_u8m1(src,v,vl);
+ vuint8m1_t v2 = __riscv_vnclipu_wv_u8m1(src,v,0,vl);
v = __riscv_vle8_v_u8m1_tu (v, base2, vl);
__riscv_vse8_v_u8m1 (out + 100*i,v,vl);
__riscv_vse8_v_u8m1 (out + 100*i,v2,vl);
@@ -170,11 +170,11 @@ void f15 (void *base,void *base2,void *out,size_t vl, int n)
for (int i = 0; i < n; i++){
vbool8_t m = __riscv_vlm_v_b8 (base + i, vl);
vuint8m1_t v = __riscv_vle8_v_u8m1 (base + 600, vl);
- vuint8m1_t v2 = __riscv_vnclipu_wv_u8m1(src,v,vl);
- v = __riscv_vnclipu_wv_u8m1(src,v,vl);
- v = __riscv_vnclipu_wv_u8m1(src,v,vl);
- v = __riscv_vnclipu_wv_u8m1(src,v,vl);
- v = __riscv_vnclipu_wv_u8m1(src,v,vl);
+ vuint8m1_t v2 = __riscv_vnclipu_wv_u8m1(src,v,0,vl);
+ v = __riscv_vnclipu_wv_u8m1(src,v,0,vl);
+ v = __riscv_vnclipu_wv_u8m1(src,v,0,vl);
+ v = __riscv_vnclipu_wv_u8m1(src,v,0,vl);
+ v = __riscv_vnclipu_wv_u8m1(src,v,0,vl);
v = __riscv_vle8_v_u8m1_tu (v, base2, vl);
__riscv_vse8_v_u8m1 (out + 100*i,v,vl);
__riscv_vse8_v_u8m1 (out + 100*i,v2,vl);
@@ -185,7 +185,7 @@ void f16 (uint16_t *base,uint8_t *out,size_t vl, size_t shift)
{
vuint16m2_t src = __riscv_vle16_v_u16m2 (base, vl);
vuint8m1_t v = __riscv_vncvt_x_x_w_u8m1(src,vl);
- vuint8m1_t v3 = __riscv_vnclipu_wv_u8m1(src,v,vl);
+ vuint8m1_t v3 = __riscv_vnclipu_wv_u8m1(src,v,0,vl);
__riscv_vse8_v_u8m1 (out,v,vl);
__riscv_vse8_v_u8m1 (out + 100,v3,vl);
}
@@ -195,7 +195,7 @@ void f17 (void *base,void *out,size_t vl, int n)
for (int i = 0; i < n; i++){
vuint16m2_t src = __riscv_vle16_v_u16m2 (base + 100*i, vl);
vuint8m1_t src2 = __riscv_vle8_v_u8m1 (base + 200*i, vl);
- vuint8m1_t v = __riscv_vnclipu_wv_u8m1(src,src2,vl);
+ vuint8m1_t v = __riscv_vnclipu_wv_u8m1(src,src2,0,vl);
vuint16m2_t v2 = __riscv_vadd_vv_u16m2 (src, src,vl);
asm volatile ("":::"memory");
__riscv_vse8_v_u8m1 (out + 100*i,v,vl);
@@ -209,12 +209,12 @@ void f18 (void *base,void *out,size_t vl, int n)
vuint8m1_t v = __riscv_vle8_v_u8m1 ((base + 1000), vl);
for (int i = 0; i < n; i++){
vuint16m2_t src = __riscv_vle16_v_u16m2 (base + 100*i, vl);
- v = __riscv_vnclipu_wv_u8m1_tu(v,src,v,vl);
- v = __riscv_vnclipu_wv_u8m1_tu(v,src,v,vl);
- v = __riscv_vnclipu_wv_u8m1_tu(v,src,v,vl);
- v = __riscv_vnclipu_wv_u8m1_tu(v,src,v,vl);
- v = __riscv_vnclipu_wv_u8m1_tu(v,src,v,vl);
- v = __riscv_vnclipu_wv_u8m1_tu(v,src,v,vl);
+ v = __riscv_vnclipu_wv_u8m1_tu(v,src,v,0,vl);
+ v = __riscv_vnclipu_wv_u8m1_tu(v,src,v,0,vl);
+ v = __riscv_vnclipu_wv_u8m1_tu(v,src,v,0,vl);
+ v = __riscv_vnclipu_wv_u8m1_tu(v,src,v,0,vl);
+ v = __riscv_vnclipu_wv_u8m1_tu(v,src,v,0,vl);
+ v = __riscv_vnclipu_wv_u8m1_tu(v,src,v,0,vl);
__riscv_vse8_v_u8m1 (out + 100*i,v,vl);
}
}
@@ -224,12 +224,12 @@ void f19 (void *base,void *out,size_t vl, int n)
vuint8m1_t v = __riscv_vle8_v_u8m1 ((base + 1000), vl);
for (int i = 0; i < n; i++){
vuint16m2_t src = __riscv_vle16_v_u16m2 (base + 100*i, vl);
- v = __riscv_vnclipu_wv_u8m1(src,v,vl);
- vuint8m1_t v2 = __riscv_vnclipu_wv_u8m1(src,v,vl);
- v2 = __riscv_vnclipu_wv_u8m1(src,v2,vl);
- v2 = __riscv_vnclipu_wv_u8m1(src,v2,vl);
- v2 = __riscv_vnclipu_wv_u8m1(src,v2,vl);
- v2 = __riscv_vnclipu_wv_u8m1(src,v2,vl);
+ v = __riscv_vnclipu_wv_u8m1(src,v,0,vl);
+ vuint8m1_t v2 = __riscv_vnclipu_wv_u8m1(src,v,0,vl);
+ v2 = __riscv_vnclipu_wv_u8m1(src,v2,0,vl);
+ v2 = __riscv_vnclipu_wv_u8m1(src,v2,0,vl);
+ v2 = __riscv_vnclipu_wv_u8m1(src,v2,0,vl);
+ v2 = __riscv_vnclipu_wv_u8m1(src,v2,0,vl);
__riscv_vse8_v_u8m1 (out + 100*i,v,vl);
__riscv_vse8_v_u8m1 (out + 200*i,v2,vl);
}
@@ -240,9 +240,9 @@ void f20 (void *base,void *out,size_t vl, int n)
vuint8m1_t v = __riscv_vle8_v_u8m1 ((base + 1000), vl);
for (int i = 0; i < n; i++){
vuint16m2_t src = __riscv_vle16_v_u16m2 (base + 100*i, vl);
- v = __riscv_vnclipu_wv_u8m1(src,v,vl);
- vuint8m1_t v2 = __riscv_vnclipu_wv_u8m1(src,v,vl);
- v2 = __riscv_vnclipu_wv_u8m1(src,v2,vl);
+ v = __riscv_vnclipu_wv_u8m1(src,v,0,vl);
+ vuint8m1_t v2 = __riscv_vnclipu_wv_u8m1(src,v,0,vl);
+ v2 = __riscv_vnclipu_wv_u8m1(src,v2,0,vl);
__riscv_vse8_v_u8m1 (out + 100*i,v,vl);
__riscv_vse8_v_u8m1 (out + 200*i,v2,vl);
}
@@ -253,12 +253,12 @@ void f21 (void *base,void *out,size_t vl, int n)
for (int i = 0; i < n; i++){
vuint8m1_t v = __riscv_vle8_v_u8m1 ((base + 1000 * i), vl);
vuint16m2_t src = __riscv_vle16_v_u16m2 (base + 100*i, vl);
- v = __riscv_vnclipu_wv_u8m1_tu(v,src,v,vl);
- v = __riscv_vnclipu_wv_u8m1_tu(v,src,v,vl);
- v = __riscv_vnclipu_wv_u8m1_tu(v,src,v,vl);
- v = __riscv_vnclipu_wv_u8m1_tu(v,src,v,vl);
- v = __riscv_vnclipu_wv_u8m1_tu(v,src,v,vl);
- v = __riscv_vnclipu_wv_u8m1_tu(v,src,v,vl);
+ v = __riscv_vnclipu_wv_u8m1_tu(v,src,v,0,vl);
+ v = __riscv_vnclipu_wv_u8m1_tu(v,src,v,0,vl);
+ v = __riscv_vnclipu_wv_u8m1_tu(v,src,v,0,vl);
+ v = __riscv_vnclipu_wv_u8m1_tu(v,src,v,0,vl);
+ v = __riscv_vnclipu_wv_u8m1_tu(v,src,v,0,vl);
+ v = __riscv_vnclipu_wv_u8m1_tu(v,src,v,0,vl);
__riscv_vse8_v_u8m1 (out + 100*i,v,vl);
__riscv_vse16_v_u16m2 (out + 200*i,src,vl);
}
@@ -274,12 +274,12 @@ void f22 (uint16_t *base,uint8_t *out,size_t vl, int n)
vuint16m2_t src4 = __riscv_vle16_v_u16m2 (base + 400*i, vl);
vuint16m2_t src5 = __riscv_vle16_v_u16m2 (base + 500*i, vl);
vuint16m2_t src6 = __riscv_vle16_v_u16m2 (base + 600*i, vl);
- v = __riscv_vnclipu_wv_u8m1_tu(v,src1,v,vl);
- v = __riscv_vnclipu_wv_u8m1_tu(v,src2,v,vl);
- v = __riscv_vnclipu_wv_u8m1_tu(v,src3,v,vl);
- v = __riscv_vnclipu_wv_u8m1_tu(v,src4,v,vl);
- v = __riscv_vnclipu_wv_u8m1_tu(v,src5,v,vl);
- v = __riscv_vnclipu_wv_u8m1_tu(v,src6,v,vl);
+ v = __riscv_vnclipu_wv_u8m1_tu(v,src1,v,0,vl);
+ v = __riscv_vnclipu_wv_u8m1_tu(v,src2,v,0,vl);
+ v = __riscv_vnclipu_wv_u8m1_tu(v,src3,v,0,vl);
+ v = __riscv_vnclipu_wv_u8m1_tu(v,src4,v,0,vl);
+ v = __riscv_vnclipu_wv_u8m1_tu(v,src5,v,0,vl);
+ v = __riscv_vnclipu_wv_u8m1_tu(v,src6,v,0,vl);
__riscv_vse8_v_u8m1 (out + 100*i,v,vl);
}
}
@@ -295,12 +295,12 @@ void f23 (uint16_t *base,uint8_t *out,size_t vl, int n)
vuint16m2_t src4 = __riscv_vle16_v_u16m2 (base + 400*i, vl);
vuint16m2_t src5 = __riscv_vle16_v_u16m2 (base + 500*i, vl);
vuint16m2_t src6 = __riscv_vle16_v_u16m2 (base + 600*i, vl);
- v = __riscv_vnclipu_wv_u8m1_tu(v,src1,v2,vl);
- v = __riscv_vnclipu_wv_u8m1_tu(v,src2,v2,vl);
- v = __riscv_vnclipu_wv_u8m1_tu(v,src3,v2,vl);
- v = __riscv_vnclipu_wv_u8m1_tu(v,src4,v2,vl);
- v = __riscv_vnclipu_wv_u8m1_tu(v,src5,v2,vl);
- v = __riscv_vnclipu_wv_u8m1_tu(v,src6,v2,vl);
+ v = __riscv_vnclipu_wv_u8m1_tu(v,src1,v2,0,vl);
+ v = __riscv_vnclipu_wv_u8m1_tu(v,src2,v2,0,vl);
+ v = __riscv_vnclipu_wv_u8m1_tu(v,src3,v2,0,vl);
+ v = __riscv_vnclipu_wv_u8m1_tu(v,src4,v2,0,vl);
+ v = __riscv_vnclipu_wv_u8m1_tu(v,src5,v2,0,vl);
+ v = __riscv_vnclipu_wv_u8m1_tu(v,src6,v2,0,vl);
__riscv_vse8_v_u8m1 (out + 100*i,v,vl);
}
}
@@ -312,9 +312,9 @@ void f24 (void *base,void *base2,void *out,size_t vl, int n)
vuint8m1_t src3 = __riscv_vle8_v_u8m1 (base + 300, vl);
for (int i = 0; i < n; i++){
vbool8_t m = __riscv_vlm_v_b8 (base + i, vl);
- vuint16m2_t v = __riscv_vnclipu_wv_u16m2_m(m,src,src2,vl);
+ vuint16m2_t v = __riscv_vnclipu_wv_u16m2_m(m,src,src2,0,vl);
vuint16m2_t v2 = __riscv_vle16_v_u16m2_tu (v, base2 + i, vl);
- vuint8m1_t v3 = __riscv_vnclipu_wv_u8m1_m(m,v2,src3,vl);
+ vuint8m1_t v3 = __riscv_vnclipu_wv_u8m1_m(m,v2,src3,0,vl);
__riscv_vse8_v_u8m1 (out + 100*i,v3,vl);
}
}
@@ -328,7 +328,7 @@ void f25 (void *base,void *out,size_t vl, size_t shift)
"v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25",
"v26", "v27", "v28", "v29");
vuint8m1_t v = __riscv_vle8_v_u8m1 (base + 100, vl);
- vuint8m1_t v2 = __riscv_vnclipu_wv_u8m1(src,v,vl);
+ vuint8m1_t v2 = __riscv_vnclipu_wv_u8m1(src,v,0,vl);
asm volatile("#" ::
: "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9",
"v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17",
@@ -351,7 +351,7 @@ void f26 (void *base,void *out,size_t vl, size_t shift)
"v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17",
"v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25",
"v26", "v27", "v28");
- vuint8m1_t v2 = __riscv_vnclipu_wv_u8m1(src,v,vl);
+ vuint8m1_t v2 = __riscv_vnclipu_wv_u8m1(src,v,0,vl);
asm volatile("#" ::
: "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9",
"v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17",
@@ -374,7 +374,7 @@ void f27 (void *base,void *out,size_t vl, size_t shift)
"v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17",
"v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25",
"v26", "v27", "v28");
- vuint8m1_t v2 = __riscv_vnclipu_wv_u8m1(src,v,vl);
+ vuint8m1_t v2 = __riscv_vnclipu_wv_u8m1(src,v,0,vl);
asm volatile("#" ::
: "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9",
"v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17",
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/narrow_constraint-8.c b/gcc/testsuite/gcc.target/riscv/rvv/base/narrow_constraint-8.c
index ec8a5565bd9..0dac85a3203 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/base/narrow_constraint-8.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/narrow_constraint-8.c
@@ -6,7 +6,7 @@
void f0 (int16_t *base,int8_t *out,size_t vl, size_t shift)
{
vuint16mf4_t src = __riscv_vle16_v_u16mf4 (base, vl);
- vuint8mf8_t v = __riscv_vnclipu_wx_u8mf8(src,shift,vl);
+ vuint8mf8_t v = __riscv_vnclipu_wx_u8mf8(src,shift,0,vl);
__riscv_vse8_v_u8mf8 (out,v,vl);
}
@@ -14,14 +14,14 @@ void f1 (int16_t *base,int8_t *out,size_t vl, size_t shift)
{
vuint16mf4_t src = __riscv_vle16_v_u16mf4 (base, vl);
vuint8mf8_t src2 = __riscv_vle8_v_u8mf8 ((int8_t *)(base + 100), vl);
- vuint8mf8_t v = __riscv_vnclipu_wx_u8mf8_tu(src2,src,shift,vl);
+ vuint8mf8_t v = __riscv_vnclipu_wx_u8mf8_tu(src2,src,shift,0,vl);
__riscv_vse8_v_u8mf8 (out,v,vl);
}
void f2 (int16_t *base,int8_t *out,size_t vl, size_t shift)
{
vuint16mf4_t src = __riscv_vle16_v_u16mf4 (base, vl);
- vuint8mf8_t v = __riscv_vnclipu_wx_u8mf8(src,shift,vl);
+ vuint8mf8_t v = __riscv_vnclipu_wx_u8mf8(src,shift,0,vl);
vuint16mf4_t v2 = __riscv_vadd_vv_u16mf4 (src, src,vl);
__riscv_vse8_v_u8mf8 (out,v,vl);
__riscv_vse16_v_u16mf4 ((int16_t *)out,v2,vl);
@@ -31,7 +31,7 @@ void f3 (int16_t *base,int8_t *out,size_t vl, int n, size_t shift)
{
for (int i = 0; i < n; i++){
vuint16mf4_t src = __riscv_vle16_v_u16mf4 (base + 100*i, vl);
- vuint8mf8_t v = __riscv_vnclipu_wx_u8mf8(src,shift,vl);
+ vuint8mf8_t v = __riscv_vnclipu_wx_u8mf8(src,shift,0,vl);
vuint16mf4_t v2 = __riscv_vadd_vv_u16mf4 (src, src,vl);
__riscv_vse8_v_u8mf8 (out + 100*i,v,vl);
__riscv_vse16_v_u16mf4 ((int16_t *)(out + 200*i),v2,vl);
@@ -41,9 +41,9 @@ void f3 (int16_t *base,int8_t *out,size_t vl, int n, size_t shift)
void f4 (int16_t *base,int8_t *out,size_t vl, size_t shift)
{
vuint16mf4_t src = __riscv_vle16_v_u16mf4 (base, vl);
- vuint8mf8_t v = __riscv_vnclipu_wx_u8mf8(src,shift,vl);
- v = __riscv_vnclipu_wx_u8mf8_tu(v,src,shift,vl);
- v = __riscv_vnclipu_wx_u8mf8_tu(v,src,shift,vl);
+ vuint8mf8_t v = __riscv_vnclipu_wx_u8mf8(src,shift,0,vl);
+ v = __riscv_vnclipu_wx_u8mf8_tu(v,src,shift,0,vl);
+ v = __riscv_vnclipu_wx_u8mf8_tu(v,src,shift,0,vl);
vuint16mf4_t v2 = __riscv_vadd_vv_u16mf4 (src, src,vl);
__riscv_vse8_v_u8mf8 (out,v,vl);
__riscv_vse16_v_u16mf4 ((int16_t *)out,v2,vl);
@@ -54,8 +54,8 @@ void f5 (void *base,void *base2,void *out,size_t vl, int n, size_t shift)
vuint16mf4_t src = __riscv_vle16_v_u16mf4 (base + 100, vl);
for (int i = 0; i < n; i++){
vbool64_t m = __riscv_vlm_v_b64 (base + i, vl);
- vuint8mf8_t v = __riscv_vnclipu_wx_u8mf8_m(m,src,shift,vl);
- v = __riscv_vnclipu_wx_u8mf8_tu(v,src,shift,vl);
+ vuint8mf8_t v = __riscv_vnclipu_wx_u8mf8_m(m,src,shift,0,vl);
+ v = __riscv_vnclipu_wx_u8mf8_tu(v,src,shift,0,vl);
v = __riscv_vle8_v_u8mf8_tu (v, base2, vl);
__riscv_vse8_v_u8mf8 (out + 100*i,v,vl);
}
@@ -64,7 +64,7 @@ void f5 (void *base,void *base2,void *out,size_t vl, int n, size_t shift)
void f6 (int16_t *base,int8_t *out,size_t vl, size_t shift)
{
vuint16m2_t src = __riscv_vle16_v_u16m2 (base, vl);
- vuint8m1_t v = __riscv_vnclipu_wx_u8m1(src,shift,vl);
+ vuint8m1_t v = __riscv_vnclipu_wx_u8m1(src,shift,0,vl);
__riscv_vse8_v_u8m1 (out,v,vl);
}
@@ -72,14 +72,14 @@ void f7 (int16_t *base,int8_t *out,size_t vl, size_t shift)
{
vuint16m2_t src = __riscv_vle16_v_u16m2 (base, vl);
vuint8m1_t src2 = __riscv_vle8_v_u8m1 ((int8_t *)(base + 100), vl);
- vuint8m1_t v = __riscv_vnclipu_wx_u8m1_tu(src2,src,shift,vl);
+ vuint8m1_t v = __riscv_vnclipu_wx_u8m1_tu(src2,src,shift,0,vl);
__riscv_vse8_v_u8m1 (out,v,vl);
}
void f8 (int16_t *base,int8_t *out,size_t vl, size_t shift)
{
vuint16m2_t src = __riscv_vle16_v_u16m2 (base, vl);
- vuint8m1_t v = __riscv_vnclipu_wx_u8m1(src,shift,vl);
+ vuint8m1_t v = __riscv_vnclipu_wx_u8m1(src,shift,0,vl);
vuint16m2_t v2 = __riscv_vadd_vv_u16m2 (src, src,vl);
__riscv_vse8_v_u8m1 (out,v,vl);
__riscv_vse16_v_u16m2 ((int16_t *)out,v2,vl);
@@ -89,7 +89,7 @@ void f9 (int16_t *base,int8_t *out,size_t vl, int n, size_t shift)
{
for (int i = 0; i < n; i++){
vuint16m2_t src = __riscv_vle16_v_u16m2 (base + 100*i, vl);
- vuint8m1_t v = __riscv_vnclipu_wx_u8m1(src,shift,vl);
+ vuint8m1_t v = __riscv_vnclipu_wx_u8m1(src,shift,0,vl);
vuint16m2_t v2 = __riscv_vadd_vv_u16m2 (src, src,vl);
__riscv_vse8_v_u8m1 (out + 100*i,v,vl);
__riscv_vse16_v_u16m2 ((int16_t *)(out + 200*i),v2,vl);
@@ -99,9 +99,9 @@ void f9 (int16_t *base,int8_t *out,size_t vl, int n, size_t shift)
void f10 (int16_t *base,int8_t *out,size_t vl, size_t shift)
{
vuint16m2_t src = __riscv_vle16_v_u16m2 (base, vl);
- vuint8m1_t v = __riscv_vnclipu_wx_u8m1(src,shift,vl);
- v = __riscv_vnclipu_wx_u8m1_tu(v,src,shift,vl);
- v = __riscv_vnclipu_wx_u8m1_tu(v,src,shift,vl);
+ vuint8m1_t v = __riscv_vnclipu_wx_u8m1(src,shift,0,vl);
+ v = __riscv_vnclipu_wx_u8m1_tu(v,src,shift,0,vl);
+ v = __riscv_vnclipu_wx_u8m1_tu(v,src,shift,0,vl);
vuint16m2_t v2 = __riscv_vadd_vv_u16m2 (src, src,vl);
__riscv_vse8_v_u8m1 (out,v,vl);
__riscv_vse16_v_u16m2 ((int16_t *)out,v2,vl);
@@ -112,8 +112,8 @@ void f11 (void *base,void *base2,void *out,size_t vl, int n, size_t shift)
vuint16m2_t src = __riscv_vle16_v_u16m2 (base + 100, vl);
for (int i = 0; i < n; i++){
vbool8_t m = __riscv_vlm_v_b8 (base + i, vl);
- vuint8m1_t v = __riscv_vnclipu_wx_u8m1_m(m,src,shift,vl);
- v = __riscv_vnclipu_wx_u8m1_tu(v,src,shift,vl);
+ vuint8m1_t v = __riscv_vnclipu_wx_u8m1_m(m,src,shift,0,vl);
+ v = __riscv_vnclipu_wx_u8m1_tu(v,src,shift,0,vl);
v = __riscv_vle8_v_u8m1_tu (v, base2, vl);
__riscv_vse8_v_u8m1 (out + 100*i,v,vl);
}
@@ -124,12 +124,12 @@ void f12 (int16_t *base,int8_t *out,size_t vl, int n, size_t shift)
vuint8mf8_t v = __riscv_vle8_v_u8mf8 ((int8_t *)(base + 1000), vl);
for (int i = 0; i < n; i++){
vuint16mf4_t src = __riscv_vle16_v_u16mf4 (base + 100*i, vl);
- v = __riscv_vnclipu_wx_u8mf8_tu(v,src,shift,vl);
- v = __riscv_vnclipu_wx_u8mf8_tu(v,src,shift,vl);
- v = __riscv_vnclipu_wx_u8mf8_tu(v,src,shift,vl);
- v = __riscv_vnclipu_wx_u8mf8_tu(v,src,shift,vl);
- v = __riscv_vnclipu_wx_u8mf8_tu(v,src,shift,vl);
- v = __riscv_vnclipu_wx_u8mf8_tu(v,src,shift,vl);
+ v = __riscv_vnclipu_wx_u8mf8_tu(v,src,shift,0,vl);
+ v = __riscv_vnclipu_wx_u8mf8_tu(v,src,shift,0,vl);
+ v = __riscv_vnclipu_wx_u8mf8_tu(v,src,shift,0,vl);
+ v = __riscv_vnclipu_wx_u8mf8_tu(v,src,shift,0,vl);
+ v = __riscv_vnclipu_wx_u8mf8_tu(v,src,shift,0,vl);
+ v = __riscv_vnclipu_wx_u8mf8_tu(v,src,shift,0,vl);
__riscv_vse8_v_u8mf8 (out + 100*i,v,vl);
}
}
@@ -139,12 +139,12 @@ void f13 (int16_t *base,int8_t *out,size_t vl, int n, size_t shift)
vuint8m1_t v = __riscv_vle8_v_u8m1 ((int8_t *)(base + 1000), vl);
for (int i = 0; i < n; i++){
vuint16m2_t src = __riscv_vle16_v_u16m2 (base + 100*i, vl);
- v = __riscv_vnclipu_wx_u8m1_tu(v,src,shift,vl);
- v = __riscv_vnclipu_wx_u8m1_tu(v,src,shift,vl);
- v = __riscv_vnclipu_wx_u8m1_tu(v,src,shift,vl);
- v = __riscv_vnclipu_wx_u8m1_tu(v,src,shift,vl);
- v = __riscv_vnclipu_wx_u8m1_tu(v,src,shift,vl);
- v = __riscv_vnclipu_wx_u8m1_tu(v,src,shift,vl);
+ v = __riscv_vnclipu_wx_u8m1_tu(v,src,shift,0,vl);
+ v = __riscv_vnclipu_wx_u8m1_tu(v,src,shift,0,vl);
+ v = __riscv_vnclipu_wx_u8m1_tu(v,src,shift,0,vl);
+ v = __riscv_vnclipu_wx_u8m1_tu(v,src,shift,0,vl);
+ v = __riscv_vnclipu_wx_u8m1_tu(v,src,shift,0,vl);
+ v = __riscv_vnclipu_wx_u8m1_tu(v,src,shift,0,vl);
__riscv_vse8_v_u8m1 (out + 100*i,v,vl);
}
}
@@ -154,12 +154,12 @@ void f14 (int16_t *base,int8_t *out,size_t vl, int n, size_t shift)
for (int i = 0; i < n; i++){
vuint8mf8_t v = __riscv_vle8_v_u8mf8 ((int8_t *)(base + 1000 * i), vl);
vuint16mf4_t src = __riscv_vle16_v_u16mf4 (base + 100*i, vl);
- v = __riscv_vnclipu_wx_u8mf8_tu(v,src,shift,vl);
- v = __riscv_vnclipu_wx_u8mf8_tu(v,src,shift,vl);
- v = __riscv_vnclipu_wx_u8mf8_tu(v,src,shift,vl);
- v = __riscv_vnclipu_wx_u8mf8_tu(v,src,shift,vl);
- v = __riscv_vnclipu_wx_u8mf8_tu(v,src,shift,vl);
- v = __riscv_vnclipu_wx_u8mf8_tu(v,src,shift,vl);
+ v = __riscv_vnclipu_wx_u8mf8_tu(v,src,shift,0,vl);
+ v = __riscv_vnclipu_wx_u8mf8_tu(v,src,shift,0,vl);
+ v = __riscv_vnclipu_wx_u8mf8_tu(v,src,shift,0,vl);
+ v = __riscv_vnclipu_wx_u8mf8_tu(v,src,shift,0,vl);
+ v = __riscv_vnclipu_wx_u8mf8_tu(v,src,shift,0,vl);
+ v = __riscv_vnclipu_wx_u8mf8_tu(v,src,shift,0,vl);
__riscv_vse8_v_u8mf8 (out + 100*i,v,vl);
}
}
@@ -169,12 +169,12 @@ void f15 (int16_t *base,int8_t *out,size_t vl, int n, size_t shift)
for (int i = 0; i < n; i++){
vuint8m1_t v = __riscv_vle8_v_u8m1 ((int8_t *)(base + 1000 * i), vl);
vuint16m2_t src = __riscv_vle16_v_u16m2 (base + 100*i, vl);
- v = __riscv_vnclipu_wx_u8m1_tu(v,src,shift,vl);
- v = __riscv_vnclipu_wx_u8m1_tu(v,src,shift,vl);
- v = __riscv_vnclipu_wx_u8m1_tu(v,src,shift,vl);
- v = __riscv_vnclipu_wx_u8m1_tu(v,src,shift,vl);
- v = __riscv_vnclipu_wx_u8m1_tu(v,src,shift,vl);
- v = __riscv_vnclipu_wx_u8m1_tu(v,src,shift,vl);
+ v = __riscv_vnclipu_wx_u8m1_tu(v,src,shift,0,vl);
+ v = __riscv_vnclipu_wx_u8m1_tu(v,src,shift,0,vl);
+ v = __riscv_vnclipu_wx_u8m1_tu(v,src,shift,0,vl);
+ v = __riscv_vnclipu_wx_u8m1_tu(v,src,shift,0,vl);
+ v = __riscv_vnclipu_wx_u8m1_tu(v,src,shift,0,vl);
+ v = __riscv_vnclipu_wx_u8m1_tu(v,src,shift,0,vl);
__riscv_vse8_v_u8m1 (out + 100*i,v,vl);
}
}
@@ -189,12 +189,12 @@ void f16 (int16_t *base,int8_t *out,size_t vl, int n, size_t shift)
vuint16mf4_t src4 = __riscv_vle16_v_u16mf4 (base + 400*i, vl);
vuint16mf4_t src5 = __riscv_vle16_v_u16mf4 (base + 500*i, vl);
vuint16mf4_t src6 = __riscv_vle16_v_u16mf4 (base + 600*i, vl);
- v = __riscv_vnclipu_wx_u8mf8_tu(v,src1,shift,vl);
- v = __riscv_vnclipu_wx_u8mf8_tu(v,src2,shift,vl);
- v = __riscv_vnclipu_wx_u8mf8_tu(v,src3,shift,vl);
- v = __riscv_vnclipu_wx_u8mf8_tu(v,src4,shift,vl);
- v = __riscv_vnclipu_wx_u8mf8_tu(v,src5,shift,vl);
- v = __riscv_vnclipu_wx_u8mf8_tu(v,src6,shift,vl);
+ v = __riscv_vnclipu_wx_u8mf8_tu(v,src1,shift,0,vl);
+ v = __riscv_vnclipu_wx_u8mf8_tu(v,src2,shift,0,vl);
+ v = __riscv_vnclipu_wx_u8mf8_tu(v,src3,shift,0,vl);
+ v = __riscv_vnclipu_wx_u8mf8_tu(v,src4,shift,0,vl);
+ v = __riscv_vnclipu_wx_u8mf8_tu(v,src5,shift,0,vl);
+ v = __riscv_vnclipu_wx_u8mf8_tu(v,src6,shift,0,vl);
__riscv_vse8_v_u8mf8 (out + 100*i,v,vl);
}
}
@@ -209,12 +209,12 @@ void f17 (int16_t *base,int8_t *out,size_t vl, int n, size_t shift)
vuint16m2_t src4 = __riscv_vle16_v_u16m2 (base + 400*i, vl);
vuint16m2_t src5 = __riscv_vle16_v_u16m2 (base + 500*i, vl);
vuint16m2_t src6 = __riscv_vle16_v_u16m2 (base + 600*i, vl);
- v = __riscv_vnclipu_wx_u8m1_tu(v,src1,shift,vl);
- v = __riscv_vnclipu_wx_u8m1_tu(v,src2,shift,vl);
- v = __riscv_vnclipu_wx_u8m1_tu(v,src3,shift,vl);
- v = __riscv_vnclipu_wx_u8m1_tu(v,src4,shift,vl);
- v = __riscv_vnclipu_wx_u8m1_tu(v,src5,shift,vl);
- v = __riscv_vnclipu_wx_u8m1_tu(v,src6,shift,vl);
+ v = __riscv_vnclipu_wx_u8m1_tu(v,src1,shift,0,vl);
+ v = __riscv_vnclipu_wx_u8m1_tu(v,src2,shift,0,vl);
+ v = __riscv_vnclipu_wx_u8m1_tu(v,src3,shift,0,vl);
+ v = __riscv_vnclipu_wx_u8m1_tu(v,src4,shift,0,vl);
+ v = __riscv_vnclipu_wx_u8m1_tu(v,src5,shift,0,vl);
+ v = __riscv_vnclipu_wx_u8m1_tu(v,src6,shift,0,vl);
__riscv_vse8_v_u8m1 (out + 100*i,v,vl);
}
}
@@ -224,9 +224,9 @@ void f18 (void *base,void *base2,void *out,size_t vl, int n, size_t shift)
vuint32mf2_t src = __riscv_vle32_v_u32mf2 (base + 100, vl);
for (int i = 0; i < n; i++){
vbool64_t m = __riscv_vlm_v_b64 (base + i, vl);
- vuint16mf4_t v = __riscv_vnclipu_wx_u16mf4_m(m,src,shift,vl);
+ vuint16mf4_t v = __riscv_vnclipu_wx_u16mf4_m(m,src,shift,0,vl);
vuint16mf4_t v2 = __riscv_vle16_v_u16mf4_tu (v, base2 + i, vl);
- vuint8mf8_t v3 = __riscv_vnclipu_wx_u8mf8_m(m,v2,shift,vl);
+ vuint8mf8_t v3 = __riscv_vnclipu_wx_u8mf8_m(m,v2,shift,0,vl);
__riscv_vse8_v_u8mf8 (out + 100*i,v3,vl);
}
}
@@ -236,10 +236,10 @@ void f19 (void *base,void *base2,void *out,size_t vl, int n, size_t shift)
vuint32m4_t src = __riscv_vle32_v_u32m4 (base + 100, vl);
for (int i = 0; i < n; i++){
vbool8_t m = __riscv_vlm_v_b8 (base + i, vl);
- vuint16m2_t v = __riscv_vnclipu_wx_u16m2_m(m,src,shift,vl);
+ vuint16m2_t v = __riscv_vnclipu_wx_u16m2_m(m,src,shift,0,vl);
vuint16m2_t v2 = __riscv_vle16_v_u16m2_tu (v, base2 + i, vl);
- vuint8m1_t v3 = __riscv_vnclipu_wx_u8m1_m(m,v2,shift,vl);
- vuint8m1_t v4 = __riscv_vnclipu_wx_u8m1_tumu(m,v3,v2,shift,vl);
+ vuint8m1_t v3 = __riscv_vnclipu_wx_u8m1_m(m,v2,shift,0,vl);
+ vuint8m1_t v4 = __riscv_vnclipu_wx_u8m1_tumu(m,v3,v2,shift,0,vl);
__riscv_vse8_v_u8m1 (out + 100*i,v3,vl);
__riscv_vse8_v_u8m1 (out + 222*i,v4,vl);
}
@@ -255,7 +255,7 @@ void f20 (int16_t *base,int8_t *out,size_t vl, size_t shift)
"v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25",
"v26", "v27", "v28", "v29");
- vuint8m1_t v = __riscv_vnclipu_wx_u8m1(src,shift,vl);
+ vuint8m1_t v = __riscv_vnclipu_wx_u8m1(src,shift,0,vl);
/* Only allow vncvt SRC == DEST v30. */
asm volatile("#" ::
: "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9",
@@ -276,7 +276,7 @@ void f21 (int16_t *base,int8_t *out,size_t vl, size_t shift)
"v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25",
"v26", "v27", "v28", "v29", "v30");
- vuint8mf2_t v = __riscv_vnclipu_wx_u8mf2(src,shift,vl);
+ vuint8mf2_t v = __riscv_vnclipu_wx_u8mf2(src,shift,0,vl);
/* Only allow vncvt SRC == DEST v31. */
asm volatile("#" ::
: "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9",
@@ -297,7 +297,7 @@ void f22 (int16_t *base,int8_t *out,size_t vl, size_t shift)
"v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25",
"v26", "v27", "v28", "v29");
- vuint8m1_t v = __riscv_vnclipu_wx_u8m1(src,shift,vl);
+ vuint8m1_t v = __riscv_vnclipu_wx_u8m1(src,shift,0,vl);
/* Only allow v29. */
asm volatile("#" ::
: "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9",
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/narrow_constraint-9.c b/gcc/testsuite/gcc.target/riscv/rvv/base/narrow_constraint-9.c
index ff34749bdb9..448ca859dbb 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/base/narrow_constraint-9.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/narrow_constraint-9.c
@@ -6,7 +6,7 @@
void f0 (int16_t *base,int8_t *out,size_t vl, size_t shift)
{
vuint16mf4_t src = __riscv_vle16_v_u16mf4 (base, vl);
- vuint8mf8_t v = __riscv_vnclipu_wx_u8mf8(src,31,vl);
+ vuint8mf8_t v = __riscv_vnclipu_wx_u8mf8(src,31,0,vl);
__riscv_vse8_v_u8mf8 (out,v,vl);
}
@@ -14,14 +14,14 @@ void f1 (int16_t *base,int8_t *out,size_t vl, size_t shift)
{
vuint16mf4_t src = __riscv_vle16_v_u16mf4 (base, vl);
vuint8mf8_t src2 = __riscv_vle8_v_u8mf8 ((int8_t *)(base + 100), vl);
- vuint8mf8_t v = __riscv_vnclipu_wx_u8mf8_tu(src2,src,31,vl);
+ vuint8mf8_t v = __riscv_vnclipu_wx_u8mf8_tu(src2,src,31,0,vl);
__riscv_vse8_v_u8mf8 (out,v,vl);
}
void f2 (int16_t *base,int8_t *out,size_t vl, size_t shift)
{
vuint16mf4_t src = __riscv_vle16_v_u16mf4 (base, vl);
- vuint8mf8_t v = __riscv_vnclipu_wx_u8mf8(src,31,vl);
+ vuint8mf8_t v = __riscv_vnclipu_wx_u8mf8(src,31,0,vl);
vuint16mf4_t v2 = __riscv_vadd_vv_u16mf4 (src, src,vl);
__riscv_vse8_v_u8mf8 (out,v,vl);
__riscv_vse16_v_u16mf4 ((int16_t *)out,v2,vl);
@@ -31,7 +31,7 @@ void f3 (int16_t *base,int8_t *out,size_t vl, int n, size_t shift)
{
for (int i = 0; i < n; i++){
vuint16mf4_t src = __riscv_vle16_v_u16mf4 (base + 100*i, vl);
- vuint8mf8_t v = __riscv_vnclipu_wx_u8mf8(src,31,vl);
+ vuint8mf8_t v = __riscv_vnclipu_wx_u8mf8(src,31,0,vl);
vuint16mf4_t v2 = __riscv_vadd_vv_u16mf4 (src, src,vl);
__riscv_vse8_v_u8mf8 (out + 100*i,v,vl);
__riscv_vse16_v_u16mf4 ((int16_t *)(out + 200*i),v2,vl);
@@ -41,9 +41,9 @@ void f3 (int16_t *base,int8_t *out,size_t vl, int n, size_t shift)
void f4 (int16_t *base,int8_t *out,size_t vl, size_t shift)
{
vuint16mf4_t src = __riscv_vle16_v_u16mf4 (base, vl);
- vuint8mf8_t v = __riscv_vnclipu_wx_u8mf8(src,31,vl);
- v = __riscv_vnclipu_wx_u8mf8_tu(v,src,31,vl);
- v = __riscv_vnclipu_wx_u8mf8_tu(v,src,31,vl);
+ vuint8mf8_t v = __riscv_vnclipu_wx_u8mf8(src,31,0,vl);
+ v = __riscv_vnclipu_wx_u8mf8_tu(v,src,31,0,vl);
+ v = __riscv_vnclipu_wx_u8mf8_tu(v,src,31,0,vl);
vuint16mf4_t v2 = __riscv_vadd_vv_u16mf4 (src, src,vl);
__riscv_vse8_v_u8mf8 (out,v,vl);
__riscv_vse16_v_u16mf4 ((int16_t *)out,v2,vl);
@@ -54,8 +54,8 @@ void f5 (void *base,void *base2,void *out,size_t vl, int n, size_t shift)
vuint16mf4_t src = __riscv_vle16_v_u16mf4 (base + 100, vl);
for (int i = 0; i < n; i++){
vbool64_t m = __riscv_vlm_v_b64 (base + i, vl);
- vuint8mf8_t v = __riscv_vnclipu_wx_u8mf8_m(m,src,31,vl);
- v = __riscv_vnclipu_wx_u8mf8_tu(v,src,31,vl);
+ vuint8mf8_t v = __riscv_vnclipu_wx_u8mf8_m(m,src,31,0,vl);
+ v = __riscv_vnclipu_wx_u8mf8_tu(v,src,31,0,vl);
v = __riscv_vle8_v_u8mf8_tu (v, base2, vl);
__riscv_vse8_v_u8mf8 (out + 100*i,v,vl);
}
@@ -64,7 +64,7 @@ void f5 (void *base,void *base2,void *out,size_t vl, int n, size_t shift)
void f6 (int16_t *base,int8_t *out,size_t vl, size_t shift)
{
vuint16m2_t src = __riscv_vle16_v_u16m2 (base, vl);
- vuint8m1_t v = __riscv_vnclipu_wx_u8m1(src,31,vl);
+ vuint8m1_t v = __riscv_vnclipu_wx_u8m1(src,31,0,vl);
__riscv_vse8_v_u8m1 (out,v,vl);
}
@@ -72,14 +72,14 @@ void f7 (int16_t *base,int8_t *out,size_t vl, size_t shift)
{
vuint16m2_t src = __riscv_vle16_v_u16m2 (base, vl);
vuint8m1_t src2 = __riscv_vle8_v_u8m1 ((int8_t *)(base + 100), vl);
- vuint8m1_t v = __riscv_vnclipu_wx_u8m1_tu(src2,src,31,vl);
+ vuint8m1_t v = __riscv_vnclipu_wx_u8m1_tu(src2,src,31,0,vl);
__riscv_vse8_v_u8m1 (out,v,vl);
}
void f8 (int16_t *base,int8_t *out,size_t vl, size_t shift)
{
vuint16m2_t src = __riscv_vle16_v_u16m2 (base, vl);
- vuint8m1_t v = __riscv_vnclipu_wx_u8m1(src,31,vl);
+ vuint8m1_t v = __riscv_vnclipu_wx_u8m1(src,31,0,vl);
vuint16m2_t v2 = __riscv_vadd_vv_u16m2 (src, src,vl);
__riscv_vse8_v_u8m1 (out,v,vl);
__riscv_vse16_v_u16m2 ((int16_t *)out,v2,vl);
@@ -89,7 +89,7 @@ void f9 (int16_t *base,int8_t *out,size_t vl, int n, size_t shift)
{
for (int i = 0; i < n; i++){
vuint16m2_t src = __riscv_vle16_v_u16m2 (base + 100*i, vl);
- vuint8m1_t v = __riscv_vnclipu_wx_u8m1(src,31,vl);
+ vuint8m1_t v = __riscv_vnclipu_wx_u8m1(src,31,0,vl);
vuint16m2_t v2 = __riscv_vadd_vv_u16m2 (src, src,vl);
__riscv_vse8_v_u8m1 (out + 100*i,v,vl);
__riscv_vse16_v_u16m2 ((int16_t *)(out + 200*i),v2,vl);
@@ -99,9 +99,9 @@ void f9 (int16_t *base,int8_t *out,size_t vl, int n, size_t shift)
void f10 (int16_t *base,int8_t *out,size_t vl, size_t shift)
{
vuint16m2_t src = __riscv_vle16_v_u16m2 (base, vl);
- vuint8m1_t v = __riscv_vnclipu_wx_u8m1(src,31,vl);
- v = __riscv_vnclipu_wx_u8m1_tu(v,src,31,vl);
- v = __riscv_vnclipu_wx_u8m1_tu(v,src,31,vl);
+ vuint8m1_t v = __riscv_vnclipu_wx_u8m1(src,31,0,vl);
+ v = __riscv_vnclipu_wx_u8m1_tu(v,src,31,0,vl);
+ v = __riscv_vnclipu_wx_u8m1_tu(v,src,31,0,vl);
vuint16m2_t v2 = __riscv_vadd_vv_u16m2 (src, src,vl);
__riscv_vse8_v_u8m1 (out,v,vl);
__riscv_vse16_v_u16m2 ((int16_t *)out,v2,vl);
@@ -112,8 +112,8 @@ void f11 (void *base,void *base2,void *out,size_t vl, int n, size_t shift)
vuint16m2_t src = __riscv_vle16_v_u16m2 (base + 100, vl);
for (int i = 0; i < n; i++){
vbool8_t m = __riscv_vlm_v_b8 (base + i, vl);
- vuint8m1_t v = __riscv_vnclipu_wx_u8m1_m(m,src,31,vl);
- v = __riscv_vnclipu_wx_u8m1_tu(v,src,31,vl);
+ vuint8m1_t v = __riscv_vnclipu_wx_u8m1_m(m,src,31,0,vl);
+ v = __riscv_vnclipu_wx_u8m1_tu(v,src,31,0,vl);
v = __riscv_vle8_v_u8m1_tu (v, base2, vl);
__riscv_vse8_v_u8m1 (out + 100*i,v,vl);
}
@@ -124,12 +124,12 @@ void f12 (int16_t *base,int8_t *out,size_t vl, int n, size_t shift)
vuint8mf8_t v = __riscv_vle8_v_u8mf8 ((int8_t *)(base + 1000), vl);
for (int i = 0; i < n; i++){
vuint16mf4_t src = __riscv_vle16_v_u16mf4 (base + 100*i, vl);
- v = __riscv_vnclipu_wx_u8mf8_tu(v,src,31,vl);
- v = __riscv_vnclipu_wx_u8mf8_tu(v,src,31,vl);
- v = __riscv_vnclipu_wx_u8mf8_tu(v,src,31,vl);
- v = __riscv_vnclipu_wx_u8mf8_tu(v,src,31,vl);
- v = __riscv_vnclipu_wx_u8mf8_tu(v,src,31,vl);
- v = __riscv_vnclipu_wx_u8mf8_tu(v,src,31,vl);
+ v = __riscv_vnclipu_wx_u8mf8_tu(v,src,31,0,vl);
+ v = __riscv_vnclipu_wx_u8mf8_tu(v,src,31,0,vl);
+ v = __riscv_vnclipu_wx_u8mf8_tu(v,src,31,0,vl);
+ v = __riscv_vnclipu_wx_u8mf8_tu(v,src,31,0,vl);
+ v = __riscv_vnclipu_wx_u8mf8_tu(v,src,31,0,vl);
+ v = __riscv_vnclipu_wx_u8mf8_tu(v,src,31,0,vl);
__riscv_vse8_v_u8mf8 (out + 100*i,v,vl);
}
}
@@ -139,12 +139,12 @@ void f13 (int16_t *base,int8_t *out,size_t vl, int n, size_t shift)
vuint8m1_t v = __riscv_vle8_v_u8m1 ((int8_t *)(base + 1000), vl);
for (int i = 0; i < n; i++){
vuint16m2_t src = __riscv_vle16_v_u16m2 (base + 100*i, vl);
- v = __riscv_vnclipu_wx_u8m1_tu(v,src,31,vl);
- v = __riscv_vnclipu_wx_u8m1_tu(v,src,31,vl);
- v = __riscv_vnclipu_wx_u8m1_tu(v,src,31,vl);
- v = __riscv_vnclipu_wx_u8m1_tu(v,src,31,vl);
- v = __riscv_vnclipu_wx_u8m1_tu(v,src,31,vl);
- v = __riscv_vnclipu_wx_u8m1_tu(v,src,31,vl);
+ v = __riscv_vnclipu_wx_u8m1_tu(v,src,31,0,vl);
+ v = __riscv_vnclipu_wx_u8m1_tu(v,src,31,0,vl);
+ v = __riscv_vnclipu_wx_u8m1_tu(v,src,31,0,vl);
+ v = __riscv_vnclipu_wx_u8m1_tu(v,src,31,0,vl);
+ v = __riscv_vnclipu_wx_u8m1_tu(v,src,31,0,vl);
+ v = __riscv_vnclipu_wx_u8m1_tu(v,src,31,0,vl);
__riscv_vse8_v_u8m1 (out + 100*i,v,vl);
}
}
@@ -154,12 +154,12 @@ void f14 (int16_t *base,int8_t *out,size_t vl, int n, size_t shift)
for (int i = 0; i < n; i++){
vuint8mf8_t v = __riscv_vle8_v_u8mf8 ((int8_t *)(base + 1000 * i), vl);
vuint16mf4_t src = __riscv_vle16_v_u16mf4 (base + 100*i, vl);
- v = __riscv_vnclipu_wx_u8mf8_tu(v,src,31,vl);
- v = __riscv_vnclipu_wx_u8mf8_tu(v,src,31,vl);
- v = __riscv_vnclipu_wx_u8mf8_tu(v,src,31,vl);
- v = __riscv_vnclipu_wx_u8mf8_tu(v,src,31,vl);
- v = __riscv_vnclipu_wx_u8mf8_tu(v,src,31,vl);
- v = __riscv_vnclipu_wx_u8mf8_tu(v,src,31,vl);
+ v = __riscv_vnclipu_wx_u8mf8_tu(v,src,31,0,vl);
+ v = __riscv_vnclipu_wx_u8mf8_tu(v,src,31,0,vl);
+ v = __riscv_vnclipu_wx_u8mf8_tu(v,src,31,0,vl);
+ v = __riscv_vnclipu_wx_u8mf8_tu(v,src,31,0,vl);
+ v = __riscv_vnclipu_wx_u8mf8_tu(v,src,31,0,vl);
+ v = __riscv_vnclipu_wx_u8mf8_tu(v,src,31,0,vl);
__riscv_vse8_v_u8mf8 (out + 100*i,v,vl);
}
}
@@ -169,12 +169,12 @@ void f15 (int16_t *base,int8_t *out,size_t vl, int n, size_t shift)
for (int i = 0; i < n; i++){
vuint8m1_t v = __riscv_vle8_v_u8m1 ((int8_t *)(base + 1000 * i), vl);
vuint16m2_t src = __riscv_vle16_v_u16m2 (base + 100*i, vl);
- v = __riscv_vnclipu_wx_u8m1_tu(v,src,31,vl);
- v = __riscv_vnclipu_wx_u8m1_tu(v,src,31,vl);
- v = __riscv_vnclipu_wx_u8m1_tu(v,src,31,vl);
- v = __riscv_vnclipu_wx_u8m1_tu(v,src,31,vl);
- v = __riscv_vnclipu_wx_u8m1_tu(v,src,31,vl);
- v = __riscv_vnclipu_wx_u8m1_tu(v,src,31,vl);
+ v = __riscv_vnclipu_wx_u8m1_tu(v,src,31,0,vl);
+ v = __riscv_vnclipu_wx_u8m1_tu(v,src,31,0,vl);
+ v = __riscv_vnclipu_wx_u8m1_tu(v,src,31,0,vl);
+ v = __riscv_vnclipu_wx_u8m1_tu(v,src,31,0,vl);
+ v = __riscv_vnclipu_wx_u8m1_tu(v,src,31,0,vl);
+ v = __riscv_vnclipu_wx_u8m1_tu(v,src,31,0,vl);
__riscv_vse8_v_u8m1 (out + 100*i,v,vl);
}
}
@@ -189,12 +189,12 @@ void f16 (int16_t *base,int8_t *out,size_t vl, int n, size_t shift)
vuint16mf4_t src4 = __riscv_vle16_v_u16mf4 (base + 400*i, vl);
vuint16mf4_t src5 = __riscv_vle16_v_u16mf4 (base + 500*i, vl);
vuint16mf4_t src6 = __riscv_vle16_v_u16mf4 (base + 600*i, vl);
- v = __riscv_vnclipu_wx_u8mf8_tu(v,src1,31,vl);
- v = __riscv_vnclipu_wx_u8mf8_tu(v,src2,31,vl);
- v = __riscv_vnclipu_wx_u8mf8_tu(v,src3,31,vl);
- v = __riscv_vnclipu_wx_u8mf8_tu(v,src4,31,vl);
- v = __riscv_vnclipu_wx_u8mf8_tu(v,src5,31,vl);
- v = __riscv_vnclipu_wx_u8mf8_tu(v,src6,31,vl);
+ v = __riscv_vnclipu_wx_u8mf8_tu(v,src1,31,0,vl);
+ v = __riscv_vnclipu_wx_u8mf8_tu(v,src2,31,0,vl);
+ v = __riscv_vnclipu_wx_u8mf8_tu(v,src3,31,0,vl);
+ v = __riscv_vnclipu_wx_u8mf8_tu(v,src4,31,0,vl);
+ v = __riscv_vnclipu_wx_u8mf8_tu(v,src5,31,0,vl);
+ v = __riscv_vnclipu_wx_u8mf8_tu(v,src6,31,0,vl);
__riscv_vse8_v_u8mf8 (out + 100*i,v,vl);
}
}
@@ -209,12 +209,12 @@ void f17 (int16_t *base,int8_t *out,size_t vl, int n, size_t shift)
vuint16m2_t src4 = __riscv_vle16_v_u16m2 (base + 400*i, vl);
vuint16m2_t src5 = __riscv_vle16_v_u16m2 (base + 500*i, vl);
vuint16m2_t src6 = __riscv_vle16_v_u16m2 (base + 600*i, vl);
- v = __riscv_vnclipu_wx_u8m1_tu(v,src1,31,vl);
- v = __riscv_vnclipu_wx_u8m1_tu(v,src2,31,vl);
- v = __riscv_vnclipu_wx_u8m1_tu(v,src3,31,vl);
- v = __riscv_vnclipu_wx_u8m1_tu(v,src4,31,vl);
- v = __riscv_vnclipu_wx_u8m1_tu(v,src5,31,vl);
- v = __riscv_vnclipu_wx_u8m1_tu(v,src6,31,vl);
+ v = __riscv_vnclipu_wx_u8m1_tu(v,src1,31,0,vl);
+ v = __riscv_vnclipu_wx_u8m1_tu(v,src2,31,0,vl);
+ v = __riscv_vnclipu_wx_u8m1_tu(v,src3,31,0,vl);
+ v = __riscv_vnclipu_wx_u8m1_tu(v,src4,31,0,vl);
+ v = __riscv_vnclipu_wx_u8m1_tu(v,src5,31,0,vl);
+ v = __riscv_vnclipu_wx_u8m1_tu(v,src6,31,0,vl);
__riscv_vse8_v_u8m1 (out + 100*i,v,vl);
}
}
@@ -224,9 +224,9 @@ void f18 (void *base,void *base2,void *out,size_t vl, int n, size_t shift)
vuint32mf2_t src = __riscv_vle32_v_u32mf2 (base + 100, vl);
for (int i = 0; i < n; i++){
vbool64_t m = __riscv_vlm_v_b64 (base + i, vl);
- vuint16mf4_t v = __riscv_vnclipu_wx_u16mf4_m(m,src,31,vl);
+ vuint16mf4_t v = __riscv_vnclipu_wx_u16mf4_m(m,src,31,0,vl);
vuint16mf4_t v2 = __riscv_vle16_v_u16mf4_tu (v, base2 + i, vl);
- vuint8mf8_t v3 = __riscv_vnclipu_wx_u8mf8_m(m,v2,31,vl);
+ vuint8mf8_t v3 = __riscv_vnclipu_wx_u8mf8_m(m,v2,31,0,vl);
__riscv_vse8_v_u8mf8 (out + 100*i,v3,vl);
}
}
@@ -236,10 +236,10 @@ void f19 (void *base,void *base2,void *out,size_t vl, int n, size_t shift)
vuint32m4_t src = __riscv_vle32_v_u32m4 (base + 100, vl);
for (int i = 0; i < n; i++){
vbool8_t m = __riscv_vlm_v_b8 (base + i, vl);
- vuint16m2_t v = __riscv_vnclipu_wx_u16m2_m(m,src,31,vl);
+ vuint16m2_t v = __riscv_vnclipu_wx_u16m2_m(m,src,31,0,vl);
vuint16m2_t v2 = __riscv_vle16_v_u16m2_tu (v, base2 + i, vl);
- vuint8m1_t v3 = __riscv_vnclipu_wx_u8m1_m(m,v2,31,vl);
- vuint8m1_t v4 = __riscv_vnclipu_wx_u8m1_tumu(m,v3,v2,31,vl);
+ vuint8m1_t v3 = __riscv_vnclipu_wx_u8m1_m(m,v2,31,0,vl);
+ vuint8m1_t v4 = __riscv_vnclipu_wx_u8m1_tumu(m,v3,v2,31,0,vl);
__riscv_vse8_v_u8m1 (out + 100*i,v3,vl);
__riscv_vse8_v_u8m1 (out + 222*i,v4,vl);
}
@@ -255,7 +255,7 @@ void f20 (int16_t *base,int8_t *out,size_t vl, size_t shift)
"v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25",
"v26", "v27", "v28", "v29");
- vuint8m1_t v = __riscv_vnclipu_wx_u8m1(src,31,vl);
+ vuint8m1_t v = __riscv_vnclipu_wx_u8m1(src,31,0,vl);
/* Only allow vncvt SRC == DEST v30. */
asm volatile("#" ::
: "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9",
@@ -276,7 +276,7 @@ void f21 (int16_t *base,int8_t *out,size_t vl, size_t shift)
"v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25",
"v26", "v27", "v28", "v29", "v30");
- vuint8mf2_t v = __riscv_vnclipu_wx_u8mf2(src,31,vl);
+ vuint8mf2_t v = __riscv_vnclipu_wx_u8mf2(src,31,0,vl);
/* Only allow vncvt SRC == DEST v31. */
asm volatile("#" ::
: "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9",
@@ -297,7 +297,7 @@ void f22 (int16_t *base,int8_t *out,size_t vl, size_t shift)
"v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25",
"v26", "v27", "v28", "v29");
- vuint8m1_t v = __riscv_vnclipu_wx_u8m1(src,31,vl);
+ vuint8m1_t v = __riscv_vnclipu_wx_u8m1(src,31,0,vl);
/* Only allow v29. */
asm volatile("#" ::
: "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9",
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/vxrm-2.c b/gcc/testsuite/gcc.target/riscv/rvv/base/vxrm-2.c
new file mode 100644
index 00000000000..74b53b89f2b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/vxrm-2.c
@@ -0,0 +1,12 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3" } */
+
+#include "riscv_vector.h"
+
+void f (void * in, void *out, int32_t x)
+{
+ vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
+ vint32m1_t v2 = __riscv_vle32_v_i32m1_tu (v, in, 4);
+ vint32m1_t v3 = __riscv_vaadd_vx_i32m1 (v2, 0, x, 4); /* { dg-error {argument 3 of '__riscv_vaadd_vx_i32m1' must be an integer constant expression} } */
+ __riscv_vse32_v_i32m1 (out, v3, 4);
+}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/vxrm-3.c b/gcc/testsuite/gcc.target/riscv/rvv/base/vxrm-3.c
new file mode 100644
index 00000000000..237b0734dc6
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/vxrm-3.c
@@ -0,0 +1,12 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3" } */
+
+#include "riscv_vector.h"
+
+void f (void * in, void *out, int32_t x)
+{
+ vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
+ vint32m1_t v2 = __riscv_vle32_v_i32m1_tu (v, in, 4);
+ vint32m1_t v3 = __riscv_vaadd_vx_i32m1 (v2, 0, 10, 4); /* { dg-error {passing 10 to argument 3 of '__riscv_vaadd_vx_i32m1', which expects a value in the range \[0, 3\]} } */
+ __riscv_vse32_v_i32m1 (out, v3, 4);
+}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/vxrm-4.c b/gcc/testsuite/gcc.target/riscv/rvv/base/vxrm-4.c
new file mode 100644
index 00000000000..551b601c472
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/vxrm-4.c
@@ -0,0 +1,11 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3" } */
+
+#include "riscv_vector.h"
+
+void f (uint16_t *base,uint8_t *out,size_t vl, size_t shift)
+{
+ vuint16m2_t src = __riscv_vle16_v_u16m2 (base, vl);
+ vuint8m1_t v = __riscv_vnclipu_wx_u8m1(src,shift,shift,vl); /* { dg-error {argument 3 of '__riscv_vnclipu_wx_u8m1' must be an integer constant expression} } */
+ __riscv_vse8_v_u8m1 (out,v,vl);
+}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/vxrm-5.c b/gcc/testsuite/gcc.target/riscv/rvv/base/vxrm-5.c
new file mode 100644
index 00000000000..69801119a72
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/vxrm-5.c
@@ -0,0 +1,11 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3" } */
+
+#include "riscv_vector.h"
+
+void f (uint16_t *base,uint8_t *out,size_t vl, size_t shift)
+{
+ vuint16m2_t src = __riscv_vle16_v_u16m2 (base, vl);
+ vuint8m1_t v = __riscv_vnclipu_wx_u8m1(src,shift,10,vl); /* { dg-error {passing 10 to argument 3 of '__riscv_vnclipu_wx_u8m1', which expects a value in the range \[0, 3\]} } */
+ __riscv_vse8_v_u8m1 (out,v,vl);
+}