summaryrefslogtreecommitdiff
path: root/gcc/testsuite/gcc.dg/vmx
diff options
context:
space:
mode:
authorLorry <lorry@roadtrain.codethink.co.uk>2012-01-09 13:47:42 +0000
committerLorry <lorry@roadtrain.codethink.co.uk>2012-01-09 13:47:42 +0000
commitb4a5df67f1382a33f4535eb1b10600ca52d294d3 (patch)
treed4571b191c2cfc0f5045bd27b54f8a48e70787a8 /gcc/testsuite/gcc.dg/vmx
downloadgcc-tarball-b4a5df67f1382a33f4535eb1b10600ca52d294d3.tar.gz
Tarball conversion
Diffstat (limited to 'gcc/testsuite/gcc.dg/vmx')
-rw-r--r--gcc/testsuite/gcc.dg/vmx/1b-01.c11
-rw-r--r--gcc/testsuite/gcc.dg/vmx/1b-02.c61
-rw-r--r--gcc/testsuite/gcc.dg/vmx/1b-03.c64
-rw-r--r--gcc/testsuite/gcc.dg/vmx/1b-04.c7
-rw-r--r--gcc/testsuite/gcc.dg/vmx/1b-05.c13
-rw-r--r--gcc/testsuite/gcc.dg/vmx/1b-06-ansi.c24
-rw-r--r--gcc/testsuite/gcc.dg/vmx/1b-06.c19
-rw-r--r--gcc/testsuite/gcc.dg/vmx/1b-07-ansi.c59
-rw-r--r--gcc/testsuite/gcc.dg/vmx/1b-07.c54
-rw-r--r--gcc/testsuite/gcc.dg/vmx/1c-01.c56
-rw-r--r--gcc/testsuite/gcc.dg/vmx/1c-02.c34
-rw-r--r--gcc/testsuite/gcc.dg/vmx/3a-01.c16
-rw-r--r--gcc/testsuite/gcc.dg/vmx/3a-01a.c17
-rw-r--r--gcc/testsuite/gcc.dg/vmx/3a-01m.c17
-rw-r--r--gcc/testsuite/gcc.dg/vmx/3a-03.c18
-rw-r--r--gcc/testsuite/gcc.dg/vmx/3a-03m.c18
-rw-r--r--gcc/testsuite/gcc.dg/vmx/3a-04.c22
-rw-r--r--gcc/testsuite/gcc.dg/vmx/3a-04m.c22
-rw-r--r--gcc/testsuite/gcc.dg/vmx/3a-05.c26
-rw-r--r--gcc/testsuite/gcc.dg/vmx/3a-06.c15
-rw-r--r--gcc/testsuite/gcc.dg/vmx/3a-06m.c15
-rw-r--r--gcc/testsuite/gcc.dg/vmx/3a-07.c16
-rw-r--r--gcc/testsuite/gcc.dg/vmx/3b-01.c18
-rw-r--r--gcc/testsuite/gcc.dg/vmx/3b-02.c16
-rw-r--r--gcc/testsuite/gcc.dg/vmx/3b-10.c21
-rw-r--r--gcc/testsuite/gcc.dg/vmx/3b-13.c15
-rw-r--r--gcc/testsuite/gcc.dg/vmx/3b-14.c29
-rw-r--r--gcc/testsuite/gcc.dg/vmx/3b-15.c19
-rw-r--r--gcc/testsuite/gcc.dg/vmx/3c-01.c86
-rw-r--r--gcc/testsuite/gcc.dg/vmx/3c-01a.c1450
-rw-r--r--gcc/testsuite/gcc.dg/vmx/3c-02.c17
-rw-r--r--gcc/testsuite/gcc.dg/vmx/3c-03.c17
-rw-r--r--gcc/testsuite/gcc.dg/vmx/3d-01.c171
-rw-r--r--gcc/testsuite/gcc.dg/vmx/4-01.c7
-rw-r--r--gcc/testsuite/gcc.dg/vmx/4-03.c7
-rw-r--r--gcc/testsuite/gcc.dg/vmx/5-01.c4
-rw-r--r--gcc/testsuite/gcc.dg/vmx/5-02.c4
-rw-r--r--gcc/testsuite/gcc.dg/vmx/5-03.c28
-rw-r--r--gcc/testsuite/gcc.dg/vmx/5-04.c10
-rw-r--r--gcc/testsuite/gcc.dg/vmx/5-07t.c37
-rw-r--r--gcc/testsuite/gcc.dg/vmx/5-10.c1352
-rw-r--r--gcc/testsuite/gcc.dg/vmx/5-11.c289
-rw-r--r--gcc/testsuite/gcc.dg/vmx/7-01.c36
-rw-r--r--gcc/testsuite/gcc.dg/vmx/7-01a.c36
-rw-r--r--gcc/testsuite/gcc.dg/vmx/7c-01.c11
-rw-r--r--gcc/testsuite/gcc.dg/vmx/7d-01.c19
-rw-r--r--gcc/testsuite/gcc.dg/vmx/7d-02.c21
-rw-r--r--gcc/testsuite/gcc.dg/vmx/8-01.c4
-rw-r--r--gcc/testsuite/gcc.dg/vmx/8-02.c299
-rw-r--r--gcc/testsuite/gcc.dg/vmx/8-02a.c17
-rw-r--r--gcc/testsuite/gcc.dg/vmx/brode-1.c10
-rw-r--r--gcc/testsuite/gcc.dg/vmx/bug-1.c39
-rw-r--r--gcc/testsuite/gcc.dg/vmx/bug-2.c24
-rw-r--r--gcc/testsuite/gcc.dg/vmx/bug-3.c45
-rw-r--r--gcc/testsuite/gcc.dg/vmx/cw-bug-1.c12
-rw-r--r--gcc/testsuite/gcc.dg/vmx/cw-bug-3.c5
-rw-r--r--gcc/testsuite/gcc.dg/vmx/dct.c176
-rw-r--r--gcc/testsuite/gcc.dg/vmx/debug-1.c26
-rw-r--r--gcc/testsuite/gcc.dg/vmx/debug-2.c42
-rw-r--r--gcc/testsuite/gcc.dg/vmx/debug-3.c75
-rw-r--r--gcc/testsuite/gcc.dg/vmx/debug-4.c78
-rw-r--r--gcc/testsuite/gcc.dg/vmx/dos-bug-1-gdb.c7
-rw-r--r--gcc/testsuite/gcc.dg/vmx/dos-bug-2-gdb.c7
-rw-r--r--gcc/testsuite/gcc.dg/vmx/eg-5.c27
-rw-r--r--gcc/testsuite/gcc.dg/vmx/fft.c99
-rw-r--r--gcc/testsuite/gcc.dg/vmx/gcc-bug-1.c20
-rw-r--r--gcc/testsuite/gcc.dg/vmx/gcc-bug-2.c19
-rw-r--r--gcc/testsuite/gcc.dg/vmx/gcc-bug-3.c26
-rw-r--r--gcc/testsuite/gcc.dg/vmx/gcc-bug-4.c5
-rw-r--r--gcc/testsuite/gcc.dg/vmx/gcc-bug-5.c26
-rw-r--r--gcc/testsuite/gcc.dg/vmx/gcc-bug-6.c30
-rw-r--r--gcc/testsuite/gcc.dg/vmx/gcc-bug-7.c35
-rw-r--r--gcc/testsuite/gcc.dg/vmx/gcc-bug-8.c13
-rw-r--r--gcc/testsuite/gcc.dg/vmx/gcc-bug-9.c5
-rw-r--r--gcc/testsuite/gcc.dg/vmx/gcc-bug-b.c23
-rw-r--r--gcc/testsuite/gcc.dg/vmx/gcc-bug-c.c25
-rw-r--r--gcc/testsuite/gcc.dg/vmx/gcc-bug-d.c14
-rw-r--r--gcc/testsuite/gcc.dg/vmx/gcc-bug-e.c44
-rw-r--r--gcc/testsuite/gcc.dg/vmx/gcc-bug-f.c125
-rw-r--r--gcc/testsuite/gcc.dg/vmx/gcc-bug-g.c119
-rw-r--r--gcc/testsuite/gcc.dg/vmx/gcc-bug-i.c42
-rw-r--r--gcc/testsuite/gcc.dg/vmx/harness.h30
-rw-r--r--gcc/testsuite/gcc.dg/vmx/ira1.c10
-rw-r--r--gcc/testsuite/gcc.dg/vmx/ira2.c23
-rw-r--r--gcc/testsuite/gcc.dg/vmx/ira2a.c9
-rw-r--r--gcc/testsuite/gcc.dg/vmx/ira2b.c17
-rw-r--r--gcc/testsuite/gcc.dg/vmx/ira2c.c11
-rw-r--r--gcc/testsuite/gcc.dg/vmx/mem.c9
-rw-r--r--gcc/testsuite/gcc.dg/vmx/newton-1.c67
-rw-r--r--gcc/testsuite/gcc.dg/vmx/ops-long-1.c80
-rw-r--r--gcc/testsuite/gcc.dg/vmx/ops-long-2.c34
-rw-r--r--gcc/testsuite/gcc.dg/vmx/ops.c3831
-rw-r--r--gcc/testsuite/gcc.dg/vmx/pr27006.c22
-rw-r--r--gcc/testsuite/gcc.dg/vmx/pr27842.c26
-rw-r--r--gcc/testsuite/gcc.dg/vmx/sn7153.c62
-rw-r--r--gcc/testsuite/gcc.dg/vmx/spill.c131
-rw-r--r--gcc/testsuite/gcc.dg/vmx/spill2.c155
-rw-r--r--gcc/testsuite/gcc.dg/vmx/spill3.c156
-rw-r--r--gcc/testsuite/gcc.dg/vmx/t.c43
-rw-r--r--gcc/testsuite/gcc.dg/vmx/varargs-1.c99
-rw-r--r--gcc/testsuite/gcc.dg/vmx/varargs-2.c78
-rw-r--r--gcc/testsuite/gcc.dg/vmx/varargs-3.c75
-rw-r--r--gcc/testsuite/gcc.dg/vmx/varargs-4.c291
-rw-r--r--gcc/testsuite/gcc.dg/vmx/varargs-5.c71
-rw-r--r--gcc/testsuite/gcc.dg/vmx/varargs-6.c35
-rw-r--r--gcc/testsuite/gcc.dg/vmx/varargs-7.c83
-rw-r--r--gcc/testsuite/gcc.dg/vmx/vmx.exp57
-rw-r--r--gcc/testsuite/gcc.dg/vmx/x-01.c25
-rw-r--r--gcc/testsuite/gcc.dg/vmx/x-02.c34
-rw-r--r--gcc/testsuite/gcc.dg/vmx/x-03.c124
-rw-r--r--gcc/testsuite/gcc.dg/vmx/x-04.c80
-rw-r--r--gcc/testsuite/gcc.dg/vmx/x-05.c82
-rw-r--r--gcc/testsuite/gcc.dg/vmx/yousufi-1.c15
-rw-r--r--gcc/testsuite/gcc.dg/vmx/zero-1.c13
-rw-r--r--gcc/testsuite/gcc.dg/vmx/zero.c100
115 files changed, 11865 insertions, 0 deletions
diff --git a/gcc/testsuite/gcc.dg/vmx/1b-01.c b/gcc/testsuite/gcc.dg/vmx/1b-01.c
new file mode 100644
index 0000000000..8cdacb640f
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/1b-01.c
@@ -0,0 +1,11 @@
+#include "harness.h"
+
+vector unsigned char a;
+
+static void test()
+{
+ check(sizeof(a) == 16, "sizeof(a)");
+ check(((long)&a & 15) == 0, "alignof(a)");
+ check((long)&a != 0, "&a");
+ check(vec_all_eq(a,((vector unsigned char){0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0})), "value(a)");
+}
diff --git a/gcc/testsuite/gcc.dg/vmx/1b-02.c b/gcc/testsuite/gcc.dg/vmx/1b-02.c
new file mode 100644
index 0000000000..2f9aca5da5
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/1b-02.c
@@ -0,0 +1,61 @@
+/* { dg-do compile } */
+#include <altivec.h>
+vector unsigned char u8;
+vector signed char s8;
+vector bool char b8;
+vector unsigned short u16;
+vector signed short s16;
+vector bool short b16;
+vector unsigned int u32;
+vector signed int s32;
+vector bool int b32;
+vector float f32;
+vector pixel p16;
+
+vector unsigned char const u8c;
+vector signed char const s8c;
+vector bool char const b8c;
+vector unsigned short const u16c;
+vector signed short const s16c;
+vector bool short const b16c;
+vector unsigned int const u32c;
+vector signed int const s32c;
+vector bool int const b32c;
+vector float const f32c;
+vector pixel const p16c;
+
+vector unsigned char volatile u8v;
+vector signed char volatile s8v;
+vector bool char volatile b8v;
+vector unsigned short volatile u16v;
+vector signed short volatile s16v;
+vector bool short volatile b16v;
+vector unsigned int volatile u32v;
+vector signed int volatile s32v;
+vector bool int volatile b32v;
+vector float volatile f32v;
+vector pixel volatile p16v;
+
+const vector unsigned char u8c_;
+const vector signed char s8c_;
+const vector bool char b8c_;
+const vector unsigned short u16c_;
+const vector signed short s16c_;
+const vector bool short b16c_;
+const vector unsigned int u32c_;
+const vector signed int s32c_;
+const vector bool int b32c_;
+const vector float f32c_;
+const vector pixel p16c_;
+
+volatile vector unsigned char u8v_;
+volatile vector signed char s8v_;
+volatile vector bool char b8v_;
+volatile vector unsigned short u16v_;
+volatile vector signed short s16v_;
+volatile vector bool short b16v_;
+volatile vector unsigned int u32v_;
+volatile vector signed int s32v_;
+volatile vector bool int b32v_;
+volatile vector float f32v_;
+volatile vector pixel p16v_;
diff --git a/gcc/testsuite/gcc.dg/vmx/1b-03.c b/gcc/testsuite/gcc.dg/vmx/1b-03.c
new file mode 100644
index 0000000000..2f8f816ba2
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/1b-03.c
@@ -0,0 +1,64 @@
+#include <altivec.h>
+int main()
+{
+ vector unsigned char u8;
+ vector signed char s8;
+ vector bool char b8;
+ vector unsigned short u16;
+ vector signed short s16;
+ vector bool short b16;
+ vector unsigned int u32;
+ vector signed int s32;
+ vector bool int b32;
+ vector float f32;
+ vector pixel p16;
+
+ vector unsigned char const u8c;
+ vector signed char const s8c;
+ vector bool char const b8c;
+ vector unsigned short const u16c;
+ vector signed short const s16c;
+ vector bool short const b16c;
+ vector unsigned int const u32c;
+ vector signed int const s32c;
+ vector bool int const b32c;
+ vector float const f32c;
+ vector pixel const p16c;
+
+ vector unsigned char volatile u8v;
+ vector signed char volatile s8v;
+ vector bool char volatile b8v;
+ vector unsigned short volatile u16v;
+ vector signed short volatile s16v;
+ vector bool short volatile b16v;
+ vector unsigned int volatile u32v;
+ vector signed int volatile s32v;
+ vector bool int volatile b32v;
+ vector float volatile f32v;
+ vector pixel volatile p16v;
+
+ const vector unsigned char u8c_;
+ const vector signed char s8c_;
+ const vector bool char b8c_;
+ const vector unsigned short u16c_;
+ const vector signed short s16c_;
+ const vector bool short b16c_;
+ const vector unsigned int u32c_;
+ const vector signed int s32c_;
+ const vector bool int b32c_;
+ const vector float f32c_;
+ const vector pixel p16c_;
+
+ volatile vector unsigned char u8v_;
+ volatile vector signed char s8v_;
+ volatile vector bool char b8v_;
+ volatile vector unsigned short u16v_;
+ volatile vector signed short s16v_;
+ volatile vector bool short b16v_;
+ volatile vector unsigned int u32v_;
+ volatile vector signed int s32v_;
+ volatile vector bool int b32v_;
+ volatile vector float f32v_;
+ volatile vector pixel p16v_;
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.dg/vmx/1b-04.c b/gcc/testsuite/gcc.dg/vmx/1b-04.c
new file mode 100644
index 0000000000..5807ea335a
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/1b-04.c
@@ -0,0 +1,7 @@
+#include <altivec.h>
+int main()
+{
+ vector unsigned char a,b;
+ b = (vector unsigned char)a;
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.dg/vmx/1b-05.c b/gcc/testsuite/gcc.dg/vmx/1b-05.c
new file mode 100644
index 0000000000..63eb10b7c3
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/1b-05.c
@@ -0,0 +1,13 @@
+/* { dg-do compile } */
+#include <altivec.h>
+vector float _0 ;
+vector pixel _1 ;
+vector bool int _2 ;
+vector unsigned int _3 ;
+vector signed int _4 ;
+vector bool short _5 ;
+vector unsigned short _6 ;
+vector signed short _7 ;
+vector bool char _8 ;
+vector unsigned char _9 ;
+vector signed char _10 ;
diff --git a/gcc/testsuite/gcc.dg/vmx/1b-06-ansi.c b/gcc/testsuite/gcc.dg/vmx/1b-06-ansi.c
new file mode 100644
index 0000000000..780a4e6bc6
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/1b-06-ansi.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-options "-ansi -maltivec" } */
+
+#include <altivec.h>
+vector char bool _4 ;
+vector char unsigned _31 ;
+vector char signed _59 ;
+/* bool is permitted in the predefine method, as it is expanded
+ unconditionally to int. */
+bool _84 ;
+vector pixel _89 ;
+vector int bool _95 ;
+vector short bool _102 ;
+vector unsigned int _122 ;
+vector unsigned short _129 ;
+vector signed int _150 ;
+vector signed short _157 ;
+vector int bool _179 ;
+vector int short bool _186 ;
+vector unsigned int _206 ;
+vector int unsigned short _213 ;
+vector signed int _234 ;
+vector int signed short _241 ;
+vector float _339 ;
diff --git a/gcc/testsuite/gcc.dg/vmx/1b-06.c b/gcc/testsuite/gcc.dg/vmx/1b-06.c
new file mode 100644
index 0000000000..d25164c315
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/1b-06.c
@@ -0,0 +1,19 @@
+/* { dg-do compile } */
+#include <altivec.h>
+vector char bool _4 ;
+vector char unsigned _31 ;
+vector char signed _59 ;
+vector pixel _89 ;
+vector int bool _95 ;
+vector short bool _102 ;
+vector unsigned int _122 ;
+vector unsigned short _129 ;
+vector signed int _150 ;
+vector signed short _157 ;
+vector int bool _179 ;
+vector int short bool _186 ;
+vector unsigned int _206 ;
+vector int unsigned short _213 ;
+vector signed int _234 ;
+vector int signed short _241 ;
+vector float _339 ;
diff --git a/gcc/testsuite/gcc.dg/vmx/1b-07-ansi.c b/gcc/testsuite/gcc.dg/vmx/1b-07-ansi.c
new file mode 100644
index 0000000000..cfc98639b5
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/1b-07-ansi.c
@@ -0,0 +1,59 @@
+/* { dg-do compile } */
+/* { dg-options "-ansi -maltivec" } */
+
+#include <altivec.h>
+vector char bool _0 ;
+vector bool char _8 ;
+vector char unsigned _56 ;
+vector unsigned char _64 ;
+vector char signed _112 ;
+vector signed char _120 ;
+/* bool is permitted in the predefine method, as it is expanded
+ unconditionally to int. */
+bool _168 ;
+vector pixel _170 ;
+vector int bool _178 ;
+vector bool int _186 ;
+vector short bool _234 ;
+vector bool short _242 ;
+vector unsigned int _290 ;
+vector int unsigned _298 ;
+vector unsigned short _346 ;
+vector short unsigned _354 ;
+vector signed int _402 ;
+vector int signed _410 ;
+vector signed short _458 ;
+vector short signed _466 ;
+vector int bool _514 ;
+vector int bool _544 ;
+vector int bool _559 ;
+vector bool int _589 ;
+vector int short bool _874 ;
+vector int bool short _889 ;
+vector short int bool _904 ;
+vector short bool int _919 ;
+vector bool int short _934 ;
+vector bool short int _949 ;
+vector unsigned int _1234 ;
+vector int unsigned _1249 ;
+vector unsigned int _1279 ;
+vector int unsigned _1294 ;
+vector unsigned int _1309 ;
+vector int unsigned short _1594 ;
+vector int short unsigned _1609 ;
+vector unsigned int short _1624 ;
+vector unsigned short int _1639 ;
+vector short int unsigned _1654 ;
+vector short unsigned int _1669 ;
+vector signed int _1954 ;
+vector int signed _1969 ;
+vector signed int _1999 ;
+vector int signed _2014 ;
+vector signed int _2029 ;
+vector int signed short _2314 ;
+vector int short signed _2329 ;
+vector signed int short _2344 ;
+vector signed short int _2359 ;
+vector short int signed _2374 ;
+vector short signed int _2389 ;
+vector float _2674 ;
diff --git a/gcc/testsuite/gcc.dg/vmx/1b-07.c b/gcc/testsuite/gcc.dg/vmx/1b-07.c
new file mode 100644
index 0000000000..44bf4029f0
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/1b-07.c
@@ -0,0 +1,54 @@
+/* { dg-do compile } */
+#include <altivec.h>
+vector char bool _0 ;
+vector bool char _8 ;
+vector char unsigned _56 ;
+vector unsigned char _64 ;
+vector char signed _112 ;
+vector signed char _120 ;
+vector pixel _170 ;
+vector int bool _178 ;
+vector bool int _186 ;
+vector short bool _234 ;
+vector bool short _242 ;
+vector unsigned int _290 ;
+vector int unsigned _298 ;
+vector unsigned short _346 ;
+vector short unsigned _354 ;
+vector signed int _402 ;
+vector int signed _410 ;
+vector signed short _458 ;
+vector short signed _466 ;
+vector int bool _514 ;
+vector int bool _544 ;
+vector int bool _559 ;
+vector bool int _589 ;
+vector int short bool _874 ;
+vector int bool short _889 ;
+vector short int bool _904 ;
+vector short bool int _919 ;
+vector bool int short _934 ;
+vector bool short int _949 ;
+vector unsigned int _1234 ;
+vector int unsigned _1249 ;
+vector unsigned int _1279 ;
+vector int unsigned _1294 ;
+vector unsigned int _1309 ;
+vector int unsigned short _1594 ;
+vector int short unsigned _1609 ;
+vector unsigned int short _1624 ;
+vector unsigned short int _1639 ;
+vector short int unsigned _1654 ;
+vector short unsigned int _1669 ;
+vector signed int _1954 ;
+vector int signed _1969 ;
+vector signed int _1999 ;
+vector int signed _2014 ;
+vector signed int _2029 ;
+vector int signed short _2314 ;
+vector int short signed _2329 ;
+vector signed int short _2344 ;
+vector signed short int _2359 ;
+vector short int signed _2374 ;
+vector short signed int _2389 ;
+vector float _2674 ;
diff --git a/gcc/testsuite/gcc.dg/vmx/1c-01.c b/gcc/testsuite/gcc.dg/vmx/1c-01.c
new file mode 100644
index 0000000000..974bda19ed
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/1c-01.c
@@ -0,0 +1,56 @@
+#include <stddef.h>
+#include "harness.h"
+
+/* Declare vector types. */
+vector unsigned char u8;
+vector signed char s8;
+vector bool char b8;
+vector unsigned short u16;
+vector signed short s16;
+vector bool short b16;
+vector unsigned int u32;
+vector signed int s32;
+vector bool int b32;
+vector float f32;
+vector pixel p16;
+
+vector unsigned char *u8c = &u8;
+vector signed char *s8c = &s8;
+vector bool char *b8c = &b8;
+vector unsigned short *u16c = &u16;
+vector signed short *s16c = &s16;
+vector bool short *b16c = &b16;
+vector unsigned int *u32c = &u32;
+vector signed int *s32c = &s32;
+vector bool int *b32c = &b32;
+vector float *f32c = &f32;
+vector pixel *p16c = &p16;
+
+static void test()
+{
+ check(((ptrdiff_t)u8c & 15) == 0, "alignof(u8)");
+ check(((ptrdiff_t)u8c & 15) == 0, "alignof(u8)");
+ check(((ptrdiff_t)s8c & 15) == 0, "alignof(s8)");
+ check(((ptrdiff_t)b8c & 15) == 0, "alignof(b8)");
+ check(((ptrdiff_t)u16c & 15) == 0, "alignof(u16)");
+ check(((ptrdiff_t)s16c & 15) == 0, "alignof(s16)");
+ check(((ptrdiff_t)b16c & 15) == 0, "alignof(b16)");
+ check(((ptrdiff_t)u32c & 15) == 0, "alignof(u32)");
+ check(((ptrdiff_t)s32c & 15) == 0, "alignof(s32)");
+ check(((ptrdiff_t)b32c & 15) == 0, "alignof(b32)");
+ check(((ptrdiff_t)f32c & 15) == 0, "alignof(f32)");
+ check(((ptrdiff_t)p16c & 15) == 0, "alignof(p16)");
+
+ check((ptrdiff_t)u8c == (ptrdiff_t)&u8, "u8c == &u8");
+ check((ptrdiff_t)u8c == (ptrdiff_t)&u8, "u8c == &u8");
+ check((ptrdiff_t)s8c == (ptrdiff_t)&s8, "s8c == &s8");
+ check((ptrdiff_t)b8c == (ptrdiff_t)&b8, "b8c == &b8");
+ check((ptrdiff_t)u16c == (ptrdiff_t)&u16, "u16c == &u16");
+ check((ptrdiff_t)s16c == (ptrdiff_t)&s16, "s16c == &s16");
+ check((ptrdiff_t)b16c == (ptrdiff_t)&b16, "b16c == &b16");
+ check((ptrdiff_t)u32c == (ptrdiff_t)&u32, "u32c == &u32");
+ check((ptrdiff_t)s32c == (ptrdiff_t)&s32, "s32c == &s32");
+ check((ptrdiff_t)b32c == (ptrdiff_t)&b32, "b32c == &b32");
+ check((ptrdiff_t)f32c == (ptrdiff_t)&f32, "f32c == &f32");
+ check((ptrdiff_t)p16c == (ptrdiff_t)&p16, "p16c == &p16");
+}
diff --git a/gcc/testsuite/gcc.dg/vmx/1c-02.c b/gcc/testsuite/gcc.dg/vmx/1c-02.c
new file mode 100644
index 0000000000..be6adf647f
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/1c-02.c
@@ -0,0 +1,34 @@
+#include "harness.h"
+
+/* Vector types used in aggregates. */
+struct { char b; vector unsigned char a; char e; } u8;
+struct { char b; vector signed char a; char e; } s8;
+struct { char b; vector bool char a; char e; } b8;
+struct { char b; vector unsigned short a; char e; } u16;
+struct { char b; vector signed short a; char e; } s16;
+struct { char b; vector bool short a; char e; } b16;
+struct { char b; vector unsigned int a; char e; } u32;
+struct { char b; vector signed int a; char e; } s32;
+struct { char b; vector bool int a; char e; } b32;
+struct { char b; vector float a; char e; } f32;
+struct { char b; vector pixel a; char e; } p16;
+
+union { char b; vector unsigned char a; } u8u;
+union { char b; vector signed char a; } s8u;
+union { char b; vector bool char a; } b8u;
+union { char b; vector unsigned short a; } u16u;
+union { char b; vector signed short a; } s16u;
+union { char b; vector bool short a; } b16u;
+union { char b; vector unsigned int a; } u32u;
+union { char b; vector signed int a; } s32u;
+union { char b; vector bool int a; } b32u;
+union { char b; vector float a; } f32u;
+union { char b; vector pixel a; } p16u;
+
+static void test()
+{
+ check((long)&u8.a - (long)&u8 == 16, "u8.a");
+ check((long)&u8.e - (long)&u8 == 32, "u8.e");
+ check(sizeof(u8) == 48, "sizeof(u8)");
+ check(sizeof(u8u) == 16, "sizeof(u8u)");
+}
diff --git a/gcc/testsuite/gcc.dg/vmx/3a-01.c b/gcc/testsuite/gcc.dg/vmx/3a-01.c
new file mode 100644
index 0000000000..86e514d4a2
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/3a-01.c
@@ -0,0 +1,16 @@
+#include "harness.h"
+/* Simple use of a non-overloaded generic vector intrinsic. */
+
+static vector unsigned int
+f(vector unsigned int a, vector unsigned int b)
+{
+ return vec_addc(a,b);
+}
+
+static void test()
+{
+ check(vec_all_eq(f(((vector unsigned int){1,1,3,2}),
+ ((vector unsigned int){-1,-2,3,-4})),
+ ((vector unsigned int){1,0,0,0})),
+ "f");
+}
diff --git a/gcc/testsuite/gcc.dg/vmx/3a-01a.c b/gcc/testsuite/gcc.dg/vmx/3a-01a.c
new file mode 100644
index 0000000000..7619d1185c
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/3a-01a.c
@@ -0,0 +1,17 @@
+#include "harness.h"
+
+/* Simple use of a non-overloaded specific vector intrinsic. */
+
+vector unsigned int
+f(vector unsigned int a, vector unsigned int b)
+{
+ return vec_vaddcuw(a,b);
+}
+
+void test()
+{
+ check(vec_all_eq(f(((vector unsigned int){1,1,3,2}),
+ ((vector unsigned int){-1,-2,3,-4})),
+ ((vector unsigned int){1,0,0,0})),
+ "f");
+}
diff --git a/gcc/testsuite/gcc.dg/vmx/3a-01m.c b/gcc/testsuite/gcc.dg/vmx/3a-01m.c
new file mode 100644
index 0000000000..d57287007d
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/3a-01m.c
@@ -0,0 +1,17 @@
+#include "harness.h"
+
+/* Simple use of a non-overloaded specific vector intrinsic. */
+
+vector unsigned int
+f(vector unsigned int a, vector unsigned int b)
+{
+ return vec_vaddcuw(a,b);
+}
+
+static void test()
+{
+ check(vec_all_eq(f(((vector unsigned int){1,1,3,2}),
+ ((vector unsigned int){-1,-2,3,-4})),
+ ((vector unsigned int){1,0,0,0})),
+ "f");
+}
diff --git a/gcc/testsuite/gcc.dg/vmx/3a-03.c b/gcc/testsuite/gcc.dg/vmx/3a-03.c
new file mode 100644
index 0000000000..5f46ee6f45
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/3a-03.c
@@ -0,0 +1,18 @@
+#include "harness.h"
+
+/* Small expression involving non-overloaded generic vector intrinsics. */
+
+vector float
+f(vector float a, vector float b, vector float c)
+{
+ return vec_nmsub(a, vec_re(b), vec_nmsub(b, c, vec_expte(a)));
+}
+
+static void test()
+{
+ check(vec_all_eq(f(((vector float){2,3,5,7}),
+ ((vector float){11,13,17,19}),
+ ((vector float){23,29,31,37})),
+ ((vector float){-249.181808, -369.230774, -495.294098, -575.368408})),
+ "f");
+}
diff --git a/gcc/testsuite/gcc.dg/vmx/3a-03m.c b/gcc/testsuite/gcc.dg/vmx/3a-03m.c
new file mode 100644
index 0000000000..68feecee71
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/3a-03m.c
@@ -0,0 +1,18 @@
+#include "harness.h"
+
+/* Small expression involving non-overloaded specific vector intrinsics. */
+
+vector float
+f(vector float a, vector float b, vector float c)
+{
+ return vec_nmsub(a, vec_vrefp(b), vec_nmsub(b, c, vec_vexptefp(a)));
+}
+
+static void test()
+{
+ check(vec_all_eq(f(((vector float){2,3,5,7}),
+ ((vector float){11,13,17,19}),
+ ((vector float){23,29,31,37})),
+ ((vector float){-249.181808, -369.230774, -495.294098, -575.368408})),
+ "f");
+}
diff --git a/gcc/testsuite/gcc.dg/vmx/3a-04.c b/gcc/testsuite/gcc.dg/vmx/3a-04.c
new file mode 100644
index 0000000000..a04497bede
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/3a-04.c
@@ -0,0 +1,22 @@
+#include "harness.h"
+
+/* Small expression involving non-overloaded vector intrinsics. */
+
+vector float
+f(vector float a, vector float b, vector float c)
+{
+ return vec_vmaddfp(a, vec_re(b), vec_vmaxfp(c, vec_expte(a)));
+}
+
+static void test()
+{
+ check(vec_all_gt(f(((vector float){2,3,5,7}),
+ ((vector float){11,13,17,19}),
+ ((vector float){23,29,31,37})),
+ ((vector float){23.18, 29.23, 32.29, 128.36}))
+ && vec_all_lt(f(((vector float){2,3,5,7}),
+ ((vector float){11,13,17,19}),
+ ((vector float){23,29,31,37})),
+ ((vector float){23.19, 29.24, 32.30, 128.37})),
+ "f");
+}
diff --git a/gcc/testsuite/gcc.dg/vmx/3a-04m.c b/gcc/testsuite/gcc.dg/vmx/3a-04m.c
new file mode 100644
index 0000000000..b6e273bcc9
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/3a-04m.c
@@ -0,0 +1,22 @@
+#include "harness.h"
+
+/* Small expression involving non-overloaded specific vector intrinsics. */
+
+vector float
+f(vector float a, vector float b, vector float c)
+{
+ return vec_vmaddfp(a, vec_vrefp(b), vec_vmaxfp(c, vec_vexptefp(a)));
+}
+
+static void test()
+{
+ check(vec_all_gt(f(((vector float){2,3,5,7}),
+ ((vector float){11,13,17,19}),
+ ((vector float){23,29,31,37})),
+ ((vector float){23.18, 29.23, 32.29, 128.36}))
+ && vec_all_lt(f(((vector float){2,3,5,7}),
+ ((vector float){11,13,17,19}),
+ ((vector float){23,29,31,37})),
+ ((vector float){23.19, 29.24, 32.30, 128.37})),
+ "f");
+}
diff --git a/gcc/testsuite/gcc.dg/vmx/3a-05.c b/gcc/testsuite/gcc.dg/vmx/3a-05.c
new file mode 100644
index 0000000000..8d6ba5eab4
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/3a-05.c
@@ -0,0 +1,26 @@
+#include "harness.h"
+
+/* Small expression involving non-overloaded specific vector intrinsics. */
+
+vector float
+f(vector float a, vector float b, vector float c)
+{
+ vector float q = vec_expte(a);
+ vector float r = vec_vsubfp(c, q);
+ vector float s = vec_re(b);
+ vector float t = vec_nmsub(s, c, r);
+ return t;
+}
+
+static void test()
+{
+ check(vec_all_gt(f(((vector float){2,3,5,7}),
+ ((vector float){11,13,17,19}),
+ ((vector float){23,29,31,37})),
+ ((vector float){16.90, 18.76, -2.83, -92.95}))
+ && vec_all_lt(f(((vector float){2,3,5,7}),
+ ((vector float){11,13,17,19}),
+ ((vector float){23,29,31,37})),
+ ((vector float){16.91, 18.77, -2.82, -92.94})),
+ "f");
+}
diff --git a/gcc/testsuite/gcc.dg/vmx/3a-06.c b/gcc/testsuite/gcc.dg/vmx/3a-06.c
new file mode 100644
index 0000000000..6f27b3860a
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/3a-06.c
@@ -0,0 +1,15 @@
+#include "harness.h"
+
+vector unsigned int
+f(vector unsigned int a, vector unsigned int b)
+{
+ return vec_addc(vec_addc(a,b),b);
+}
+
+static void test()
+{
+ check(vec_all_eq(f(((vector unsigned int){2,4,6,8}),
+ ((vector unsigned int){-1,-2,-3,-4})),
+ ((vector unsigned int){1,0,0,0})),
+ "f");
+}
diff --git a/gcc/testsuite/gcc.dg/vmx/3a-06m.c b/gcc/testsuite/gcc.dg/vmx/3a-06m.c
new file mode 100644
index 0000000000..e616f9aef2
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/3a-06m.c
@@ -0,0 +1,15 @@
+#include "harness.h"
+
+vector unsigned int
+f(vector unsigned int a, vector unsigned int b)
+{
+ return vec_vaddcuw(vec_vaddcuw(a,b),b);
+}
+
+static void test()
+{
+ check(vec_all_eq(f(((vector unsigned int){2,4,6,8}),
+ ((vector unsigned int){-1,-2,-3,-4})),
+ ((vector unsigned int){1,0,0,0})),
+ "f");
+}
diff --git a/gcc/testsuite/gcc.dg/vmx/3a-07.c b/gcc/testsuite/gcc.dg/vmx/3a-07.c
new file mode 100644
index 0000000000..197fd23c86
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/3a-07.c
@@ -0,0 +1,16 @@
+#include "harness.h"
+
+static vector unsigned char
+zero()
+{
+ /* MCC allocates a stack slot for and loads an uninitialized local
+ variable. */
+ vector unsigned char a;
+ return vec_sub(a,a);
+}
+
+static void test()
+{
+ static vector unsigned char zerov;
+ check(vec_all_eq(zero(), zerov), "zero");
+}
diff --git a/gcc/testsuite/gcc.dg/vmx/3b-01.c b/gcc/testsuite/gcc.dg/vmx/3b-01.c
new file mode 100644
index 0000000000..e8feec481e
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/3b-01.c
@@ -0,0 +1,18 @@
+#include "harness.h"
+
+/* Simple use of a overloaded generic vector intrinsic. */
+
+vector unsigned int
+f(vector unsigned int a, vector unsigned int b)
+{
+ return vec_subs(a,b);
+}
+
+static void test()
+{
+ static vector unsigned int zero;
+ check(vec_all_eq(f(((vector unsigned int){2,4,6,8}),
+ ((vector unsigned int){2,4,6,8})),
+ zero),
+ "f");
+}
diff --git a/gcc/testsuite/gcc.dg/vmx/3b-02.c b/gcc/testsuite/gcc.dg/vmx/3b-02.c
new file mode 100644
index 0000000000..66693e0ff7
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/3b-02.c
@@ -0,0 +1,16 @@
+#include "harness.h"
+
+vector unsigned char
+f(vector unsigned char a, vector unsigned char b)
+{
+ return vec_vsububs(a,b);
+}
+
+static void test()
+{
+ static vector unsigned char zero;
+ check(vec_all_eq(f(((vector unsigned char){2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2}),
+ ((vector unsigned char){2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2})),
+ zero),
+ "f");
+}
diff --git a/gcc/testsuite/gcc.dg/vmx/3b-10.c b/gcc/testsuite/gcc.dg/vmx/3b-10.c
new file mode 100644
index 0000000000..5f8fb3adfa
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/3b-10.c
@@ -0,0 +1,21 @@
+#include "harness.h"
+
+typedef vector unsigned int x;
+
+x f (x a)
+{
+ return vec_addc(a,a);
+}
+
+void g (int b)
+{
+ vec_dst(&b, 3, 3);
+ vec_dst(&b, 1, 1);
+}
+
+static void test()
+{
+ check(vec_all_eq(f(((vector unsigned int){0x80000000,0x7fffffff,3,4})),
+ ((vector unsigned int){1,0,0,0})),
+ "f");
+}
diff --git a/gcc/testsuite/gcc.dg/vmx/3b-13.c b/gcc/testsuite/gcc.dg/vmx/3b-13.c
new file mode 100644
index 0000000000..146f737aeb
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/3b-13.c
@@ -0,0 +1,15 @@
+#include "harness.h"
+
+vector signed int
+f(vector float a, vector signed int b)
+{
+ return vec_splat(vec_cts(vec_ctf(vec_ctu(a, 31),0),9),30);
+}
+
+static void test()
+{
+ check(vec_all_eq(f(((vector float){1,2,3,4}),
+ ((vector signed int){2,4,6,8})),
+ ((vector signed int){2147483647, 2147483647, 2147483647, 2147483647})),
+ "f");
+}
diff --git a/gcc/testsuite/gcc.dg/vmx/3b-14.c b/gcc/testsuite/gcc.dg/vmx/3b-14.c
new file mode 100644
index 0000000000..02b2d901eb
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/3b-14.c
@@ -0,0 +1,29 @@
+#include "harness.h"
+
+static vector bool char x(void);
+static void g(void);
+
+static vector bool char
+f (void)
+{
+ vector bool char a = x();
+ g();
+ return a;
+}
+
+static vector bool char
+x (void)
+{
+ static vector bool char zero;
+ return zero;
+}
+
+static void g ()
+{
+}
+
+static void test()
+{
+ static vector bool char zero;
+ check(vec_all_eq(f(), zero), "f");
+}
diff --git a/gcc/testsuite/gcc.dg/vmx/3b-15.c b/gcc/testsuite/gcc.dg/vmx/3b-15.c
new file mode 100644
index 0000000000..ec9cf2c5a0
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/3b-15.c
@@ -0,0 +1,19 @@
+#include "harness.h"
+
+vector unsigned char
+f (vector unsigned char a, vector unsigned char b, vector unsigned char c)
+{
+ return vec_perm(a,b,c);
+}
+
+static void test()
+{
+ check(vec_all_eq(f(((vector unsigned char){0,1,2,3,4,5,6,7,
+ 8,9,10,11,12,13,14,15}),
+ ((vector unsigned char){70,71,72,73,74,75,76,77,
+ 78,79,80,81,82,83,84,85}),
+ ((vector unsigned char){0x1,0x14,0x18,0x10,0x16,0x15,0x19,0x1a,
+ 0x1c,0x1c,0x1c,0x12,0x8,0x1d,0x1b,0xe})),
+ ((vector unsigned char){1,74,78,70,76,75,79,80,82,82,82,72,8,83,81,14})),
+ "f");
+}
diff --git a/gcc/testsuite/gcc.dg/vmx/3c-01.c b/gcc/testsuite/gcc.dg/vmx/3c-01.c
new file mode 100644
index 0000000000..c6da229f4b
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/3c-01.c
@@ -0,0 +1,86 @@
+#include "harness.h"
+
+vector unsigned char u8;
+vector signed char s8;
+vector bool char b8;
+vector unsigned short u16;
+vector signed short s16;
+vector bool short b16;
+vector unsigned int u32;
+vector signed int s32;
+vector bool int b32;
+vector float f32;
+vector pixel p16;
+
+static void g(void);
+
+static void f(void *p)
+{
+ u8 = vec_ld(16, (unsigned char *)p);
+ u16 = vec_ld(16, (unsigned short*)p);
+ u32 = vec_ld(16, (unsigned int*)p);
+ s8 = vec_ld(16, (signed char *)p);
+ s16 = vec_ld(16, (short*)p);
+ s32 = vec_ld(16, (int*)p);
+ g();
+ u8 = vec_ld(16, (vector unsigned char*)p);
+ s8 = vec_ld(16, (vector signed char*)p);
+ b8 = vec_ld(16, (vector bool char*)p);
+ g();
+ u16 = vec_ld(16, (vector unsigned short*)p);
+ s16 = vec_ld(16, (vector signed short*)p);
+ b16 = vec_ld(16, (vector bool short*)p);
+ g();
+ u32 = vec_ld(16, (vector unsigned int*)p);
+ s32 = vec_ld(16, (vector signed int*)p);
+ b32 = vec_ld(16, (vector bool int*)p);
+ f32 = vec_ld(16, (vector float*)p);
+ p16 = vec_ld(16, (vector pixel*)p);
+ g();
+ u8 = vec_lde(16, (unsigned char *)p);
+ u16 = vec_lde(16, (unsigned short*)p);
+ u32 = vec_lde(16, (unsigned int*)p);
+ s8 = vec_lde(16, (signed char *)p);
+ s16 = vec_lde(16, (short*)p);
+ s32 = vec_lde(16, (int*)p);
+ f32 = vec_ldl(16, (vector float*)p);
+ p16 = vec_ldl(16, (vector pixel*)p);
+ g();
+ u8 = vec_ldl(16, (vector unsigned char*)p);
+ s8 = vec_ldl(16, (vector signed char*)p);
+ b8 = vec_ldl(16, (vector bool char*)p);
+ g();
+ u16 = vec_ldl(16, (vector unsigned short*)p);
+ s16 = vec_ldl(16, (vector signed short*)p);
+ b16 = vec_ldl(16, (vector bool short*)p);
+ g();
+ u32 = vec_ldl(16, (vector unsigned int*)p);
+ s32 = vec_ldl(16, (vector signed int*)p);
+ b32 = vec_ldl(16, (vector bool int*)p);
+ f32 = vec_ldl(16, (vector float*)p);
+ p16 = vec_ldl(16, (vector pixel*)p);
+}
+
+static void g ()
+{
+}
+
+static void test()
+{
+ static vector unsigned int value = {1,-2,3,-4};
+ static vector unsigned int buffer[2];
+#define chek(v, s) check(vec_all_eq(v, value), s)
+ buffer[1] = value;
+ f((void *)buffer);
+ chek((vector unsigned int) u8, "u8");
+ chek((vector unsigned int) s8, "s8");
+ chek((vector unsigned int) b8, "b8");
+ chek((vector unsigned int) u16, "u16");
+ chek((vector unsigned int) s16, "s16");
+ chek((vector unsigned int) b16, "b16");
+ chek((vector unsigned int) u32, "u32");
+ chek((vector unsigned int) s32, "s32");
+ chek((vector unsigned int) b32, "b32");
+ chek((vector unsigned int) f32, "f32");
+ chek((vector unsigned int) p16, "p16");
+}
diff --git a/gcc/testsuite/gcc.dg/vmx/3c-01a.c b/gcc/testsuite/gcc.dg/vmx/3c-01a.c
new file mode 100644
index 0000000000..2499ca6659
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/3c-01a.c
@@ -0,0 +1,1450 @@
+/* { dg-do compile } */
+#include <altivec.h>
+typedef const volatile unsigned int _1;
+typedef const unsigned int _2;
+typedef volatile unsigned int _3;
+typedef unsigned int _4;
+typedef const volatile vector bool short _5;
+typedef const vector bool short _6;
+typedef volatile vector bool short _7;
+typedef vector bool short _8;
+typedef const volatile signed short _9;
+typedef const signed short _10;
+typedef volatile signed short _11;
+typedef signed short _12;
+typedef const volatile unsigned _13;
+typedef const unsigned _14;
+typedef volatile unsigned _15;
+typedef unsigned _16;
+typedef const volatile signed short int _17;
+typedef const signed short int _18;
+typedef volatile signed short int _19;
+typedef signed short int _20;
+typedef const volatile unsigned short int _21;
+typedef const unsigned short int _22;
+typedef volatile unsigned short int _23;
+typedef unsigned short int _24;
+typedef const volatile vector pixel _25;
+typedef const vector pixel _26;
+typedef volatile vector pixel _27;
+typedef vector pixel _28;
+typedef const volatile vector bool int _29;
+typedef const vector bool int _30;
+typedef volatile vector bool int _31;
+typedef vector bool int _32;
+typedef const volatile vector signed char _33;
+typedef const vector signed char _34;
+typedef volatile vector signed char _35;
+typedef vector signed char _36;
+typedef const volatile unsigned _37;
+typedef const unsigned _38;
+typedef volatile unsigned _39;
+typedef unsigned _40;
+typedef const volatile signed int _41;
+typedef const signed int _42;
+typedef volatile signed int _43;
+typedef signed int _44;
+typedef const volatile vector float _45;
+typedef const vector float _46;
+typedef volatile vector float _47;
+typedef vector float _48;
+typedef const volatile vector signed short _49;
+typedef const vector signed short _50;
+typedef volatile vector signed short _51;
+typedef vector signed short _52;
+typedef const volatile unsigned char _53;
+typedef const unsigned char _54;
+typedef volatile unsigned char _55;
+typedef unsigned char _56;
+typedef const volatile signed int _57;
+typedef const signed int _58;
+typedef volatile signed int _59;
+typedef signed int _60;
+typedef const volatile unsigned int _61;
+typedef const unsigned int _62;
+typedef volatile unsigned int _63;
+typedef unsigned int _64;
+typedef const volatile unsigned short _65;
+typedef const unsigned short _66;
+typedef volatile unsigned short _67;
+typedef unsigned short _68;
+typedef const volatile short _69;
+typedef const short _70;
+typedef volatile short _71;
+typedef short _72;
+typedef const volatile int _73;
+typedef const int _74;
+typedef volatile int _75;
+typedef int _76;
+typedef const volatile vector unsigned short _77;
+typedef const vector unsigned short _78;
+typedef volatile vector unsigned short _79;
+typedef vector unsigned short _80;
+typedef const volatile vector bool char _81;
+typedef const vector bool char _82;
+typedef volatile vector bool char _83;
+typedef vector bool char _84;
+typedef const volatile signed _85;
+typedef const signed _86;
+typedef volatile signed _87;
+typedef signed _88;
+typedef const volatile vector signed int _89;
+typedef const vector signed int _90;
+typedef volatile vector signed int _91;
+typedef vector signed int _92;
+typedef const volatile vector unsigned int _93;
+typedef const vector unsigned int _94;
+typedef volatile vector unsigned int _95;
+typedef vector unsigned int _96;
+typedef const volatile signed _97;
+typedef const signed _98;
+typedef volatile signed _99;
+typedef signed _100;
+typedef const volatile short int _101;
+typedef const short int _102;
+typedef volatile short int _103;
+typedef short int _104;
+typedef const volatile int _105;
+typedef const int _106;
+typedef volatile int _107;
+typedef int _108;
+typedef const volatile int _109;
+typedef const int _110;
+typedef volatile int _111;
+typedef int _112;
+typedef const volatile vector unsigned char _113;
+typedef const vector unsigned char _114;
+typedef volatile vector unsigned char _115;
+typedef vector unsigned char _116;
+typedef const volatile signed char _117;
+typedef const signed char _118;
+typedef volatile signed char _119;
+typedef signed char _120;
+typedef const volatile float _121;
+typedef const float _122;
+typedef volatile float _123;
+typedef float _124;
+
+vector unsigned char u8;
+vector signed char s8;
+vector bool char b8;
+vector unsigned short u16;
+vector signed short s16;
+vector bool short b16;
+vector unsigned int u32;
+vector signed int s32;
+vector bool int b32;
+vector float f32;
+vector pixel p16;
+
+void f(void *p)
+{
+ u8 = vec_lvsl(1,(const volatile unsigned int *)p);
+ u8 = vec_lvsl(1,(_1 *)p);
+ u8 = vec_lvsr(1,(const volatile unsigned int *)p);
+ u8 = vec_lvsr(1,(_1 *)p);
+ u8 = vec_lvsl(1,(const unsigned int *)p);
+ u8 = vec_lvsl(1,(_2 *)p);
+ u8 = vec_lvsr(1,(const unsigned int *)p);
+ u8 = vec_lvsr(1,(_2 *)p);
+ u32 = vec_ld(1,(const unsigned int *)p);
+ u32 = vec_ld(1,(_2 *)p);
+ u32 = vec_lde(1,(const unsigned int *)p);
+ u32 = vec_lde(1,(_2 *)p);
+ u32 = vec_ldl(1,(const unsigned int *)p);
+ u32 = vec_ldl(1,(_2 *)p);
+ vec_dst((const unsigned int *)p,1,1);
+ vec_dstst((const unsigned int *)p,1,1);
+ vec_dststt((const unsigned int *)p,1,1);
+ vec_dstt((const unsigned int *)p,1,1);
+ vec_dst((_2 *)p,1,1);
+ vec_dstst((_2 *)p,1,1);
+ vec_dststt((_2 *)p,1,1);
+ vec_dstt((_2 *)p,1,1);
+ u8 = vec_lvsl(1,( volatile unsigned int *)p);
+ u8 = vec_lvsl(1,(_3 *)p);
+ u8 = vec_lvsr(1,( volatile unsigned int *)p);
+ u8 = vec_lvsr(1,(_3 *)p);
+ u8 = vec_lvsl(1,( unsigned int *)p);
+ u8 = vec_lvsl(1,(_4 *)p);
+ u8 = vec_lvsr(1,( unsigned int *)p);
+ u8 = vec_lvsr(1,(_4 *)p);
+ u32 = vec_ld(1,( unsigned int *)p);
+ u32 = vec_ld(1,(_4 *)p);
+ u32 = vec_lde(1,( unsigned int *)p);
+ u32 = vec_lde(1,(_4 *)p);
+ u32 = vec_ldl(1,( unsigned int *)p);
+ u32 = vec_ldl(1,(_4 *)p);
+ vec_dst(( unsigned int *)p,1,1);
+ vec_dstst(( unsigned int *)p,1,1);
+ vec_dststt(( unsigned int *)p,1,1);
+ vec_dstt(( unsigned int *)p,1,1);
+ vec_dst((_4 *)p,1,1);
+ vec_dstst((_4 *)p,1,1);
+ vec_dststt((_4 *)p,1,1);
+ vec_dstt((_4 *)p,1,1);
+ vec_st(u32,1,( unsigned int *)p);
+ vec_st(u32,1,(_4 *)p);
+ vec_ste(u32,1,( unsigned int *)p);
+ vec_ste(u32,1,(_4 *)p);
+ vec_stl(u32,1,( unsigned int *)p);
+ vec_stl(u32,1,(_4 *)p);
+ b16 = vec_ld(1,(const vector bool short *)p);
+ b16 = vec_ld(1,(_6 *)p);
+ b16 = vec_ldl(1,(const vector bool short *)p);
+ b16 = vec_ldl(1,(_6 *)p);
+ vec_dst((const vector bool short *)p,1,1);
+ vec_dstst((const vector bool short *)p,1,1);
+ vec_dststt((const vector bool short *)p,1,1);
+ vec_dstt((const vector bool short *)p,1,1);
+ vec_dst((_6 *)p,1,1);
+ vec_dstst((_6 *)p,1,1);
+ vec_dststt((_6 *)p,1,1);
+ vec_dstt((_6 *)p,1,1);
+ b16 = vec_ld(1,( vector bool short *)p);
+ b16 = vec_ld(1,(_8 *)p);
+ b16 = vec_ldl(1,( vector bool short *)p);
+ b16 = vec_ldl(1,(_8 *)p);
+ vec_dst(( vector bool short *)p,1,1);
+ vec_dstst(( vector bool short *)p,1,1);
+ vec_dststt(( vector bool short *)p,1,1);
+ vec_dstt(( vector bool short *)p,1,1);
+ vec_dst((_8 *)p,1,1);
+ vec_dstst((_8 *)p,1,1);
+ vec_dststt((_8 *)p,1,1);
+ vec_dstt((_8 *)p,1,1);
+ vec_st(b16,1,( vector bool short *)p);
+ vec_st(b16,1,(_8 *)p);
+ vec_stl(b16,1,( vector bool short *)p);
+ vec_stl(b16,1,(_8 *)p);
+ u8 = vec_lvsl(1,(const volatile signed short *)p);
+ u8 = vec_lvsl(1,(_9 *)p);
+ u8 = vec_lvsr(1,(const volatile signed short *)p);
+ u8 = vec_lvsr(1,(_9 *)p);
+ u8 = vec_lvsl(1,(const signed short *)p);
+ u8 = vec_lvsl(1,(_10 *)p);
+ u8 = vec_lvsr(1,(const signed short *)p);
+ u8 = vec_lvsr(1,(_10 *)p);
+ s16 = vec_ld(1,(const signed short *)p);
+ s16 = vec_ld(1,(_10 *)p);
+ s16 = vec_lde(1,(const signed short *)p);
+ s16 = vec_lde(1,(_10 *)p);
+ s16 = vec_ldl(1,(const signed short *)p);
+ s16 = vec_ldl(1,(_10 *)p);
+ vec_dst((const signed short *)p,1,1);
+ vec_dstst((const signed short *)p,1,1);
+ vec_dststt((const signed short *)p,1,1);
+ vec_dstt((const signed short *)p,1,1);
+ vec_dst((_10 *)p,1,1);
+ vec_dstst((_10 *)p,1,1);
+ vec_dststt((_10 *)p,1,1);
+ vec_dstt((_10 *)p,1,1);
+ u8 = vec_lvsl(1,( volatile signed short *)p);
+ u8 = vec_lvsl(1,(_11 *)p);
+ u8 = vec_lvsr(1,( volatile signed short *)p);
+ u8 = vec_lvsr(1,(_11 *)p);
+ u8 = vec_lvsl(1,( signed short *)p);
+ u8 = vec_lvsl(1,(_12 *)p);
+ u8 = vec_lvsr(1,( signed short *)p);
+ u8 = vec_lvsr(1,(_12 *)p);
+ s16 = vec_ld(1,( signed short *)p);
+ s16 = vec_ld(1,(_12 *)p);
+ s16 = vec_lde(1,( signed short *)p);
+ s16 = vec_lde(1,(_12 *)p);
+ s16 = vec_ldl(1,( signed short *)p);
+ s16 = vec_ldl(1,(_12 *)p);
+ vec_dst(( signed short *)p,1,1);
+ vec_dstst(( signed short *)p,1,1);
+ vec_dststt(( signed short *)p,1,1);
+ vec_dstt(( signed short *)p,1,1);
+ vec_dst((_12 *)p,1,1);
+ vec_dstst((_12 *)p,1,1);
+ vec_dststt((_12 *)p,1,1);
+ vec_dstt((_12 *)p,1,1);
+ vec_st(s16,1,( signed short *)p);
+ vec_st(s16,1,(_12 *)p);
+ vec_ste(s16,1,( signed short *)p);
+ vec_ste(s16,1,(_12 *)p);
+ vec_stl(s16,1,( signed short *)p);
+ vec_stl(s16,1,(_12 *)p);
+ u8 = vec_lvsl(1,(const volatile unsigned *)p);
+ u8 = vec_lvsl(1,(_13 *)p);
+ u8 = vec_lvsr(1,(const volatile unsigned *)p);
+ u8 = vec_lvsr(1,(_13 *)p);
+ u8 = vec_lvsl(1,(const unsigned *)p);
+ u8 = vec_lvsl(1,(_14 *)p);
+ u8 = vec_lvsr(1,(const unsigned *)p);
+ u8 = vec_lvsr(1,(_14 *)p);
+ u32 = vec_ld(1,(const unsigned *)p);
+ u32 = vec_ld(1,(_14 *)p);
+ u32 = vec_lde(1,(const unsigned *)p);
+ u32 = vec_lde(1,(_14 *)p);
+ u32 = vec_ldl(1,(const unsigned *)p);
+ u32 = vec_ldl(1,(_14 *)p);
+ vec_dst((const unsigned *)p,1,1);
+ vec_dstst((const unsigned *)p,1,1);
+ vec_dststt((const unsigned *)p,1,1);
+ vec_dstt((const unsigned *)p,1,1);
+ vec_dst((_14 *)p,1,1);
+ vec_dstst((_14 *)p,1,1);
+ vec_dststt((_14 *)p,1,1);
+ vec_dstt((_14 *)p,1,1);
+ u8 = vec_lvsl(1,( volatile unsigned *)p);
+ u8 = vec_lvsl(1,(_15 *)p);
+ u8 = vec_lvsr(1,( volatile unsigned *)p);
+ u8 = vec_lvsr(1,(_15 *)p);
+ u8 = vec_lvsl(1,( unsigned *)p);
+ u8 = vec_lvsl(1,(_16 *)p);
+ u8 = vec_lvsr(1,( unsigned *)p);
+ u8 = vec_lvsr(1,(_16 *)p);
+ u32 = vec_ld(1,( unsigned *)p);
+ u32 = vec_ld(1,(_16 *)p);
+ u32 = vec_lde(1,( unsigned *)p);
+ u32 = vec_lde(1,(_16 *)p);
+ u32 = vec_ldl(1,( unsigned *)p);
+ u32 = vec_ldl(1,(_16 *)p);
+ vec_dst(( unsigned *)p,1,1);
+ vec_dstst(( unsigned *)p,1,1);
+ vec_dststt(( unsigned *)p,1,1);
+ vec_dstt(( unsigned *)p,1,1);
+ vec_dst((_16 *)p,1,1);
+ vec_dstst((_16 *)p,1,1);
+ vec_dststt((_16 *)p,1,1);
+ vec_dstt((_16 *)p,1,1);
+ vec_st(u32,1,( unsigned *)p);
+ vec_st(u32,1,(_16 *)p);
+ vec_ste(u32,1,( unsigned *)p);
+ vec_ste(u32,1,(_16 *)p);
+ vec_stl(u32,1,( unsigned *)p);
+ vec_stl(u32,1,(_16 *)p);
+ u8 = vec_lvsl(1,(const volatile signed short int *)p);
+ u8 = vec_lvsl(1,(_17 *)p);
+ u8 = vec_lvsr(1,(const volatile signed short int *)p);
+ u8 = vec_lvsr(1,(_17 *)p);
+ u8 = vec_lvsl(1,(const signed short int *)p);
+ u8 = vec_lvsl(1,(_18 *)p);
+ u8 = vec_lvsr(1,(const signed short int *)p);
+ u8 = vec_lvsr(1,(_18 *)p);
+ s16 = vec_ld(1,(const signed short int *)p);
+ s16 = vec_ld(1,(_18 *)p);
+ s16 = vec_lde(1,(const signed short int *)p);
+ s16 = vec_lde(1,(_18 *)p);
+ s16 = vec_ldl(1,(const signed short int *)p);
+ s16 = vec_ldl(1,(_18 *)p);
+ vec_dst((const signed short int *)p,1,1);
+ vec_dstst((const signed short int *)p,1,1);
+ vec_dststt((const signed short int *)p,1,1);
+ vec_dstt((const signed short int *)p,1,1);
+ vec_dst((_18 *)p,1,1);
+ vec_dstst((_18 *)p,1,1);
+ vec_dststt((_18 *)p,1,1);
+ vec_dstt((_18 *)p,1,1);
+ u8 = vec_lvsl(1,( volatile signed short int *)p);
+ u8 = vec_lvsl(1,(_19 *)p);
+ u8 = vec_lvsr(1,( volatile signed short int *)p);
+ u8 = vec_lvsr(1,(_19 *)p);
+ u8 = vec_lvsl(1,( signed short int *)p);
+ u8 = vec_lvsl(1,(_20 *)p);
+ u8 = vec_lvsr(1,( signed short int *)p);
+ u8 = vec_lvsr(1,(_20 *)p);
+ s16 = vec_ld(1,( signed short int *)p);
+ s16 = vec_ld(1,(_20 *)p);
+ s16 = vec_lde(1,( signed short int *)p);
+ s16 = vec_lde(1,(_20 *)p);
+ s16 = vec_ldl(1,( signed short int *)p);
+ s16 = vec_ldl(1,(_20 *)p);
+ vec_dst(( signed short int *)p,1,1);
+ vec_dstst(( signed short int *)p,1,1);
+ vec_dststt(( signed short int *)p,1,1);
+ vec_dstt(( signed short int *)p,1,1);
+ vec_dst((_20 *)p,1,1);
+ vec_dstst((_20 *)p,1,1);
+ vec_dststt((_20 *)p,1,1);
+ vec_dstt((_20 *)p,1,1);
+ vec_st(s16,1,( signed short int *)p);
+ vec_st(s16,1,(_20 *)p);
+ vec_ste(s16,1,( signed short int *)p);
+ vec_ste(s16,1,(_20 *)p);
+ vec_stl(s16,1,( signed short int *)p);
+ vec_stl(s16,1,(_20 *)p);
+ u8 = vec_lvsl(1,(const volatile unsigned short int *)p);
+ u8 = vec_lvsl(1,(_21 *)p);
+ u8 = vec_lvsr(1,(const volatile unsigned short int *)p);
+ u8 = vec_lvsr(1,(_21 *)p);
+ u8 = vec_lvsl(1,(const unsigned short int *)p);
+ u8 = vec_lvsl(1,(_22 *)p);
+ u8 = vec_lvsr(1,(const unsigned short int *)p);
+ u8 = vec_lvsr(1,(_22 *)p);
+ u16 = vec_ld(1,(const unsigned short int *)p);
+ u16 = vec_ld(1,(_22 *)p);
+ u16 = vec_lde(1,(const unsigned short int *)p);
+ u16 = vec_lde(1,(_22 *)p);
+ u16 = vec_ldl(1,(const unsigned short int *)p);
+ u16 = vec_ldl(1,(_22 *)p);
+ vec_dst((const unsigned short int *)p,1,1);
+ vec_dstst((const unsigned short int *)p,1,1);
+ vec_dststt((const unsigned short int *)p,1,1);
+ vec_dstt((const unsigned short int *)p,1,1);
+ vec_dst((_22 *)p,1,1);
+ vec_dstst((_22 *)p,1,1);
+ vec_dststt((_22 *)p,1,1);
+ vec_dstt((_22 *)p,1,1);
+ u8 = vec_lvsl(1,( volatile unsigned short int *)p);
+ u8 = vec_lvsl(1,(_23 *)p);
+ u8 = vec_lvsr(1,( volatile unsigned short int *)p);
+ u8 = vec_lvsr(1,(_23 *)p);
+ u8 = vec_lvsl(1,( unsigned short int *)p);
+ u8 = vec_lvsl(1,(_24 *)p);
+ u8 = vec_lvsr(1,( unsigned short int *)p);
+ u8 = vec_lvsr(1,(_24 *)p);
+ u16 = vec_ld(1,( unsigned short int *)p);
+ u16 = vec_ld(1,(_24 *)p);
+ u16 = vec_lde(1,( unsigned short int *)p);
+ u16 = vec_lde(1,(_24 *)p);
+ u16 = vec_ldl(1,( unsigned short int *)p);
+ u16 = vec_ldl(1,(_24 *)p);
+ vec_dst(( unsigned short int *)p,1,1);
+ vec_dstst(( unsigned short int *)p,1,1);
+ vec_dststt(( unsigned short int *)p,1,1);
+ vec_dstt(( unsigned short int *)p,1,1);
+ vec_dst((_24 *)p,1,1);
+ vec_dstst((_24 *)p,1,1);
+ vec_dststt((_24 *)p,1,1);
+ vec_dstt((_24 *)p,1,1);
+ vec_st(u16,1,( unsigned short int *)p);
+ vec_st(u16,1,(_24 *)p);
+ vec_ste(u16,1,( unsigned short int *)p);
+ vec_ste(u16,1,(_24 *)p);
+ vec_stl(u16,1,( unsigned short int *)p);
+ vec_stl(u16,1,(_24 *)p);
+ p16 = vec_ld(1,(const vector pixel *)p);
+ p16 = vec_ld(1,(_26 *)p);
+ p16 = vec_ldl(1,(const vector pixel *)p);
+ p16 = vec_ldl(1,(_26 *)p);
+ vec_dst((const vector pixel *)p,1,1);
+ vec_dstst((const vector pixel *)p,1,1);
+ vec_dststt((const vector pixel *)p,1,1);
+ vec_dstt((const vector pixel *)p,1,1);
+ vec_dst((_26 *)p,1,1);
+ vec_dstst((_26 *)p,1,1);
+ vec_dststt((_26 *)p,1,1);
+ vec_dstt((_26 *)p,1,1);
+ p16 = vec_ld(1,( vector pixel *)p);
+ p16 = vec_ld(1,(_28 *)p);
+ p16 = vec_ldl(1,( vector pixel *)p);
+ p16 = vec_ldl(1,(_28 *)p);
+ vec_dst(( vector pixel *)p,1,1);
+ vec_dstst(( vector pixel *)p,1,1);
+ vec_dststt(( vector pixel *)p,1,1);
+ vec_dstt(( vector pixel *)p,1,1);
+ vec_dst((_28 *)p,1,1);
+ vec_dstst((_28 *)p,1,1);
+ vec_dststt((_28 *)p,1,1);
+ vec_dstt((_28 *)p,1,1);
+ vec_st(p16,1,( vector pixel *)p);
+ vec_st(p16,1,(_28 *)p);
+ vec_stl(p16,1,( vector pixel *)p);
+ vec_stl(p16,1,(_28 *)p);
+ b32 = vec_ld(1,(const vector bool int *)p);
+ b32 = vec_ld(1,(_30 *)p);
+ b32 = vec_ldl(1,(const vector bool int *)p);
+ b32 = vec_ldl(1,(_30 *)p);
+ vec_dst((const vector bool int *)p,1,1);
+ vec_dstst((const vector bool int *)p,1,1);
+ vec_dststt((const vector bool int *)p,1,1);
+ vec_dstt((const vector bool int *)p,1,1);
+ vec_dst((_30 *)p,1,1);
+ vec_dstst((_30 *)p,1,1);
+ vec_dststt((_30 *)p,1,1);
+ vec_dstt((_30 *)p,1,1);
+ b32 = vec_ld(1,( vector bool int *)p);
+ b32 = vec_ld(1,(_32 *)p);
+ b32 = vec_ldl(1,( vector bool int *)p);
+ b32 = vec_ldl(1,(_32 *)p);
+ vec_dst(( vector bool int *)p,1,1);
+ vec_dstst(( vector bool int *)p,1,1);
+ vec_dststt(( vector bool int *)p,1,1);
+ vec_dstt(( vector bool int *)p,1,1);
+ vec_dst((_32 *)p,1,1);
+ vec_dstst((_32 *)p,1,1);
+ vec_dststt((_32 *)p,1,1);
+ vec_dstt((_32 *)p,1,1);
+ vec_st(b32,1,( vector bool int *)p);
+ vec_st(b32,1,(_32 *)p);
+ vec_stl(b32,1,( vector bool int *)p);
+ vec_stl(b32,1,(_32 *)p);
+ s8 = vec_ld(1,(const vector signed char *)p);
+ s8 = vec_ld(1,(_34 *)p);
+ s8 = vec_ldl(1,(const vector signed char *)p);
+ s8 = vec_ldl(1,(_34 *)p);
+ vec_dst((const vector signed char *)p,1,1);
+ vec_dstst((const vector signed char *)p,1,1);
+ vec_dststt((const vector signed char *)p,1,1);
+ vec_dstt((const vector signed char *)p,1,1);
+ vec_dst((_34 *)p,1,1);
+ vec_dstst((_34 *)p,1,1);
+ vec_dststt((_34 *)p,1,1);
+ vec_dstt((_34 *)p,1,1);
+ s8 = vec_ld(1,( vector signed char *)p);
+ s8 = vec_ld(1,(_36 *)p);
+ s8 = vec_ldl(1,( vector signed char *)p);
+ s8 = vec_ldl(1,(_36 *)p);
+ vec_dst(( vector signed char *)p,1,1);
+ vec_dstst(( vector signed char *)p,1,1);
+ vec_dststt(( vector signed char *)p,1,1);
+ vec_dstt(( vector signed char *)p,1,1);
+ vec_dst((_36 *)p,1,1);
+ vec_dstst((_36 *)p,1,1);
+ vec_dststt((_36 *)p,1,1);
+ vec_dstt((_36 *)p,1,1);
+ vec_st(s8,1,( vector signed char *)p);
+ vec_st(s8,1,(_36 *)p);
+ vec_stl(s8,1,( vector signed char *)p);
+ vec_stl(s8,1,(_36 *)p);
+ u8 = vec_lvsl(1,(const volatile unsigned *)p);
+ u8 = vec_lvsl(1,(_37 *)p);
+ u8 = vec_lvsr(1,(const volatile unsigned *)p);
+ u8 = vec_lvsr(1,(_37 *)p);
+ u8 = vec_lvsl(1,(const unsigned *)p);
+ u8 = vec_lvsl(1,(_38 *)p);
+ u8 = vec_lvsr(1,(const unsigned *)p);
+ u8 = vec_lvsr(1,(_38 *)p);
+ u32 = vec_ld(1,(const unsigned *)p);
+ u32 = vec_ld(1,(_38 *)p);
+ u32 = vec_lde(1,(const unsigned *)p);
+ u32 = vec_lde(1,(_38 *)p);
+ u32 = vec_ldl(1,(const unsigned *)p);
+ u32 = vec_ldl(1,(_38 *)p);
+ vec_dst((const unsigned *)p,1,1);
+ vec_dstst((const unsigned *)p,1,1);
+ vec_dststt((const unsigned *)p,1,1);
+ vec_dstt((const unsigned *)p,1,1);
+ vec_dst((_38 *)p,1,1);
+ vec_dstst((_38 *)p,1,1);
+ vec_dststt((_38 *)p,1,1);
+ vec_dstt((_38 *)p,1,1);
+ u8 = vec_lvsl(1,( volatile unsigned *)p);
+ u8 = vec_lvsl(1,(_39 *)p);
+ u8 = vec_lvsr(1,( volatile unsigned *)p);
+ u8 = vec_lvsr(1,(_39 *)p);
+ u8 = vec_lvsl(1,( unsigned *)p);
+ u8 = vec_lvsl(1,(_40 *)p);
+ u8 = vec_lvsr(1,( unsigned *)p);
+ u8 = vec_lvsr(1,(_40 *)p);
+ u32 = vec_ld(1,( unsigned *)p);
+ u32 = vec_ld(1,(_40 *)p);
+ u32 = vec_lde(1,( unsigned *)p);
+ u32 = vec_lde(1,(_40 *)p);
+ u32 = vec_ldl(1,( unsigned *)p);
+ u32 = vec_ldl(1,(_40 *)p);
+ vec_dst(( unsigned *)p,1,1);
+ vec_dstst(( unsigned *)p,1,1);
+ vec_dststt(( unsigned *)p,1,1);
+ vec_dstt(( unsigned *)p,1,1);
+ vec_dst((_40 *)p,1,1);
+ vec_dstst((_40 *)p,1,1);
+ vec_dststt((_40 *)p,1,1);
+ vec_dstt((_40 *)p,1,1);
+ vec_st(u32,1,( unsigned *)p);
+ vec_st(u32,1,(_40 *)p);
+ vec_ste(u32,1,( unsigned *)p);
+ vec_ste(u32,1,(_40 *)p);
+ vec_stl(u32,1,( unsigned *)p);
+ vec_stl(u32,1,(_40 *)p);
+ u8 = vec_lvsl(1,(const volatile signed int *)p);
+ u8 = vec_lvsl(1,(_41 *)p);
+ u8 = vec_lvsr(1,(const volatile signed int *)p);
+ u8 = vec_lvsr(1,(_41 *)p);
+ u8 = vec_lvsl(1,(const signed int *)p);
+ u8 = vec_lvsl(1,(_42 *)p);
+ u8 = vec_lvsr(1,(const signed int *)p);
+ u8 = vec_lvsr(1,(_42 *)p);
+ s32 = vec_ld(1,(const signed int *)p);
+ s32 = vec_ld(1,(_42 *)p);
+ s32 = vec_lde(1,(const signed int *)p);
+ s32 = vec_lde(1,(_42 *)p);
+ s32 = vec_ldl(1,(const signed int *)p);
+ s32 = vec_ldl(1,(_42 *)p);
+ vec_dst((const signed int *)p,1,1);
+ vec_dstst((const signed int *)p,1,1);
+ vec_dststt((const signed int *)p,1,1);
+ vec_dstt((const signed int *)p,1,1);
+ vec_dst((_42 *)p,1,1);
+ vec_dstst((_42 *)p,1,1);
+ vec_dststt((_42 *)p,1,1);
+ vec_dstt((_42 *)p,1,1);
+ u8 = vec_lvsl(1,( volatile signed int *)p);
+ u8 = vec_lvsl(1,(_43 *)p);
+ u8 = vec_lvsr(1,( volatile signed int *)p);
+ u8 = vec_lvsr(1,(_43 *)p);
+ u8 = vec_lvsl(1,( signed int *)p);
+ u8 = vec_lvsl(1,(_44 *)p);
+ u8 = vec_lvsr(1,( signed int *)p);
+ u8 = vec_lvsr(1,(_44 *)p);
+ s32 = vec_ld(1,( signed int *)p);
+ s32 = vec_ld(1,(_44 *)p);
+ s32 = vec_lde(1,( signed int *)p);
+ s32 = vec_lde(1,(_44 *)p);
+ s32 = vec_ldl(1,( signed int *)p);
+ s32 = vec_ldl(1,(_44 *)p);
+ vec_dst(( signed int *)p,1,1);
+ vec_dstst(( signed int *)p,1,1);
+ vec_dststt(( signed int *)p,1,1);
+ vec_dstt(( signed int *)p,1,1);
+ vec_dst((_44 *)p,1,1);
+ vec_dstst((_44 *)p,1,1);
+ vec_dststt((_44 *)p,1,1);
+ vec_dstt((_44 *)p,1,1);
+ vec_st(s32,1,( signed int *)p);
+ vec_st(s32,1,(_44 *)p);
+ vec_ste(s32,1,( signed int *)p);
+ vec_ste(s32,1,(_44 *)p);
+ vec_stl(s32,1,( signed int *)p);
+ vec_stl(s32,1,(_44 *)p);
+ f32 = vec_ld(1,(const vector float *)p);
+ f32 = vec_ld(1,(_46 *)p);
+ f32 = vec_ldl(1,(const vector float *)p);
+ f32 = vec_ldl(1,(_46 *)p);
+ vec_dst((const vector float *)p,1,1);
+ vec_dstst((const vector float *)p,1,1);
+ vec_dststt((const vector float *)p,1,1);
+ vec_dstt((const vector float *)p,1,1);
+ vec_dst((_46 *)p,1,1);
+ vec_dstst((_46 *)p,1,1);
+ vec_dststt((_46 *)p,1,1);
+ vec_dstt((_46 *)p,1,1);
+ f32 = vec_ld(1,( vector float *)p);
+ f32 = vec_ld(1,(_48 *)p);
+ f32 = vec_ldl(1,( vector float *)p);
+ f32 = vec_ldl(1,(_48 *)p);
+ vec_dst(( vector float *)p,1,1);
+ vec_dstst(( vector float *)p,1,1);
+ vec_dststt(( vector float *)p,1,1);
+ vec_dstt(( vector float *)p,1,1);
+ vec_dst((_48 *)p,1,1);
+ vec_dstst((_48 *)p,1,1);
+ vec_dststt((_48 *)p,1,1);
+ vec_dstt((_48 *)p,1,1);
+ vec_st(f32,1,( vector float *)p);
+ vec_st(f32,1,(_48 *)p);
+ vec_stl(f32,1,( vector float *)p);
+ vec_stl(f32,1,(_48 *)p);
+ s16 = vec_ld(1,(const vector signed short *)p);
+ s16 = vec_ld(1,(_50 *)p);
+ s16 = vec_ldl(1,(const vector signed short *)p);
+ s16 = vec_ldl(1,(_50 *)p);
+ vec_dst((const vector signed short *)p,1,1);
+ vec_dstst((const vector signed short *)p,1,1);
+ vec_dststt((const vector signed short *)p,1,1);
+ vec_dstt((const vector signed short *)p,1,1);
+ vec_dst((_50 *)p,1,1);
+ vec_dstst((_50 *)p,1,1);
+ vec_dststt((_50 *)p,1,1);
+ vec_dstt((_50 *)p,1,1);
+ s16 = vec_ld(1,( vector signed short *)p);
+ s16 = vec_ld(1,(_52 *)p);
+ s16 = vec_ldl(1,( vector signed short *)p);
+ s16 = vec_ldl(1,(_52 *)p);
+ vec_dst(( vector signed short *)p,1,1);
+ vec_dstst(( vector signed short *)p,1,1);
+ vec_dststt(( vector signed short *)p,1,1);
+ vec_dstt(( vector signed short *)p,1,1);
+ vec_dst((_52 *)p,1,1);
+ vec_dstst((_52 *)p,1,1);
+ vec_dststt((_52 *)p,1,1);
+ vec_dstt((_52 *)p,1,1);
+ vec_st(s16,1,( vector signed short *)p);
+ vec_st(s16,1,(_52 *)p);
+ vec_stl(s16,1,( vector signed short *)p);
+ vec_stl(s16,1,(_52 *)p);
+ u8 = vec_lvsl(1,(const volatile unsigned char *)p);
+ u8 = vec_lvsl(1,(_53 *)p);
+ u8 = vec_lvsr(1,(const volatile unsigned char *)p);
+ u8 = vec_lvsr(1,(_53 *)p);
+ u8 = vec_lvsl(1,(const unsigned char *)p);
+ u8 = vec_lvsl(1,(_54 *)p);
+ u8 = vec_lvsr(1,(const unsigned char *)p);
+ u8 = vec_lvsr(1,(_54 *)p);
+ u8 = vec_ld(1,(const unsigned char *)p);
+ u8 = vec_ld(1,(_54 *)p);
+ u8 = vec_lde(1,(const unsigned char *)p);
+ u8 = vec_lde(1,(_54 *)p);
+ u8 = vec_ldl(1,(const unsigned char *)p);
+ u8 = vec_ldl(1,(_54 *)p);
+ vec_dst((const unsigned char *)p,1,1);
+ vec_dstst((const unsigned char *)p,1,1);
+ vec_dststt((const unsigned char *)p,1,1);
+ vec_dstt((const unsigned char *)p,1,1);
+ vec_dst((_54 *)p,1,1);
+ vec_dstst((_54 *)p,1,1);
+ vec_dststt((_54 *)p,1,1);
+ vec_dstt((_54 *)p,1,1);
+ u8 = vec_lvsl(1,( volatile unsigned char *)p);
+ u8 = vec_lvsl(1,(_55 *)p);
+ u8 = vec_lvsr(1,( volatile unsigned char *)p);
+ u8 = vec_lvsr(1,(_55 *)p);
+ u8 = vec_lvsl(1,( unsigned char *)p);
+ u8 = vec_lvsl(1,(_56 *)p);
+ u8 = vec_lvsr(1,( unsigned char *)p);
+ u8 = vec_lvsr(1,(_56 *)p);
+ u8 = vec_ld(1,( unsigned char *)p);
+ u8 = vec_ld(1,(_56 *)p);
+ u8 = vec_lde(1,( unsigned char *)p);
+ u8 = vec_lde(1,(_56 *)p);
+ u8 = vec_ldl(1,( unsigned char *)p);
+ u8 = vec_ldl(1,(_56 *)p);
+ vec_dst(( unsigned char *)p,1,1);
+ vec_dstst(( unsigned char *)p,1,1);
+ vec_dststt(( unsigned char *)p,1,1);
+ vec_dstt(( unsigned char *)p,1,1);
+ vec_dst((_56 *)p,1,1);
+ vec_dstst((_56 *)p,1,1);
+ vec_dststt((_56 *)p,1,1);
+ vec_dstt((_56 *)p,1,1);
+ vec_st(u8,1,( unsigned char *)p);
+ vec_st(u8,1,(_56 *)p);
+ vec_ste(u8,1,( unsigned char *)p);
+ vec_ste(u8,1,(_56 *)p);
+ vec_stl(u8,1,( unsigned char *)p);
+ vec_stl(u8,1,(_56 *)p);
+ u8 = vec_lvsl(1,(const volatile signed int *)p);
+ u8 = vec_lvsl(1,(_57 *)p);
+ u8 = vec_lvsr(1,(const volatile signed int *)p);
+ u8 = vec_lvsr(1,(_57 *)p);
+ u8 = vec_lvsl(1,(const signed int *)p);
+ u8 = vec_lvsl(1,(_58 *)p);
+ u8 = vec_lvsr(1,(const signed int *)p);
+ u8 = vec_lvsr(1,(_58 *)p);
+ s32 = vec_ld(1,(const signed int *)p);
+ s32 = vec_ld(1,(_58 *)p);
+ s32 = vec_lde(1,(const signed int *)p);
+ s32 = vec_lde(1,(_58 *)p);
+ s32 = vec_ldl(1,(const signed int *)p);
+ s32 = vec_ldl(1,(_58 *)p);
+ vec_dst((const signed int *)p,1,1);
+ vec_dstst((const signed int *)p,1,1);
+ vec_dststt((const signed int *)p,1,1);
+ vec_dstt((const signed int *)p,1,1);
+ vec_dst((_58 *)p,1,1);
+ vec_dstst((_58 *)p,1,1);
+ vec_dststt((_58 *)p,1,1);
+ vec_dstt((_58 *)p,1,1);
+ u8 = vec_lvsl(1,( volatile signed int *)p);
+ u8 = vec_lvsl(1,(_59 *)p);
+ u8 = vec_lvsr(1,( volatile signed int *)p);
+ u8 = vec_lvsr(1,(_59 *)p);
+ u8 = vec_lvsl(1,( signed int *)p);
+ u8 = vec_lvsl(1,(_60 *)p);
+ u8 = vec_lvsr(1,( signed int *)p);
+ u8 = vec_lvsr(1,(_60 *)p);
+ s32 = vec_ld(1,( signed int *)p);
+ s32 = vec_ld(1,(_60 *)p);
+ s32 = vec_lde(1,( signed int *)p);
+ s32 = vec_lde(1,(_60 *)p);
+ s32 = vec_ldl(1,( signed int *)p);
+ s32 = vec_ldl(1,(_60 *)p);
+ vec_dst(( signed int *)p,1,1);
+ vec_dstst(( signed int *)p,1,1);
+ vec_dststt(( signed int *)p,1,1);
+ vec_dstt(( signed int *)p,1,1);
+ vec_dst((_60 *)p,1,1);
+ vec_dstst((_60 *)p,1,1);
+ vec_dststt((_60 *)p,1,1);
+ vec_dstt((_60 *)p,1,1);
+ vec_st(s32,1,( signed int *)p);
+ vec_st(s32,1,(_60 *)p);
+ vec_ste(s32,1,( signed int *)p);
+ vec_ste(s32,1,(_60 *)p);
+ vec_stl(s32,1,( signed int *)p);
+ vec_stl(s32,1,(_60 *)p);
+ u8 = vec_lvsl(1,(const volatile unsigned int *)p);
+ u8 = vec_lvsl(1,(_61 *)p);
+ u8 = vec_lvsr(1,(const volatile unsigned int *)p);
+ u8 = vec_lvsr(1,(_61 *)p);
+ u8 = vec_lvsl(1,(const unsigned int *)p);
+ u8 = vec_lvsl(1,(_62 *)p);
+ u8 = vec_lvsr(1,(const unsigned int *)p);
+ u8 = vec_lvsr(1,(_62 *)p);
+ u32 = vec_ld(1,(const unsigned int *)p);
+ u32 = vec_ld(1,(_62 *)p);
+ u32 = vec_lde(1,(const unsigned int *)p);
+ u32 = vec_lde(1,(_62 *)p);
+ u32 = vec_ldl(1,(const unsigned int *)p);
+ u32 = vec_ldl(1,(_62 *)p);
+ vec_dst((const unsigned int *)p,1,1);
+ vec_dstst((const unsigned int *)p,1,1);
+ vec_dststt((const unsigned int *)p,1,1);
+ vec_dstt((const unsigned int *)p,1,1);
+ vec_dst((_62 *)p,1,1);
+ vec_dstst((_62 *)p,1,1);
+ vec_dststt((_62 *)p,1,1);
+ vec_dstt((_62 *)p,1,1);
+ u8 = vec_lvsl(1,( volatile unsigned int *)p);
+ u8 = vec_lvsl(1,(_63 *)p);
+ u8 = vec_lvsr(1,( volatile unsigned int *)p);
+ u8 = vec_lvsr(1,(_63 *)p);
+ u8 = vec_lvsl(1,( unsigned int *)p);
+ u8 = vec_lvsl(1,(_64 *)p);
+ u8 = vec_lvsr(1,( unsigned int *)p);
+ u8 = vec_lvsr(1,(_64 *)p);
+ u32 = vec_ld(1,( unsigned int *)p);
+ u32 = vec_ld(1,(_64 *)p);
+ u32 = vec_lde(1,( unsigned int *)p);
+ u32 = vec_lde(1,(_64 *)p);
+ u32 = vec_ldl(1,( unsigned int *)p);
+ u32 = vec_ldl(1,(_64 *)p);
+ vec_dst(( unsigned int *)p,1,1);
+ vec_dstst(( unsigned int *)p,1,1);
+ vec_dststt(( unsigned int *)p,1,1);
+ vec_dstt(( unsigned int *)p,1,1);
+ vec_dst((_64 *)p,1,1);
+ vec_dstst((_64 *)p,1,1);
+ vec_dststt((_64 *)p,1,1);
+ vec_dstt((_64 *)p,1,1);
+ vec_st(u32,1,( unsigned int *)p);
+ vec_st(u32,1,(_64 *)p);
+ vec_ste(u32,1,( unsigned int *)p);
+ vec_ste(u32,1,(_64 *)p);
+ vec_stl(u32,1,( unsigned int *)p);
+ vec_stl(u32,1,(_64 *)p);
+ u8 = vec_lvsl(1,(const volatile unsigned short *)p);
+ u8 = vec_lvsl(1,(_65 *)p);
+ u8 = vec_lvsr(1,(const volatile unsigned short *)p);
+ u8 = vec_lvsr(1,(_65 *)p);
+ u8 = vec_lvsl(1,(const unsigned short *)p);
+ u8 = vec_lvsl(1,(_66 *)p);
+ u8 = vec_lvsr(1,(const unsigned short *)p);
+ u8 = vec_lvsr(1,(_66 *)p);
+ u16 = vec_ld(1,(const unsigned short *)p);
+ u16 = vec_ld(1,(_66 *)p);
+ u16 = vec_lde(1,(const unsigned short *)p);
+ u16 = vec_lde(1,(_66 *)p);
+ u16 = vec_ldl(1,(const unsigned short *)p);
+ u16 = vec_ldl(1,(_66 *)p);
+ vec_dst((const unsigned short *)p,1,1);
+ vec_dstst((const unsigned short *)p,1,1);
+ vec_dststt((const unsigned short *)p,1,1);
+ vec_dstt((const unsigned short *)p,1,1);
+ vec_dst((_66 *)p,1,1);
+ vec_dstst((_66 *)p,1,1);
+ vec_dststt((_66 *)p,1,1);
+ vec_dstt((_66 *)p,1,1);
+ u8 = vec_lvsl(1,( volatile unsigned short *)p);
+ u8 = vec_lvsl(1,(_67 *)p);
+ u8 = vec_lvsr(1,( volatile unsigned short *)p);
+ u8 = vec_lvsr(1,(_67 *)p);
+ u8 = vec_lvsl(1,( unsigned short *)p);
+ u8 = vec_lvsl(1,(_68 *)p);
+ u8 = vec_lvsr(1,( unsigned short *)p);
+ u8 = vec_lvsr(1,(_68 *)p);
+ u16 = vec_ld(1,( unsigned short *)p);
+ u16 = vec_ld(1,(_68 *)p);
+ u16 = vec_lde(1,( unsigned short *)p);
+ u16 = vec_lde(1,(_68 *)p);
+ u16 = vec_ldl(1,( unsigned short *)p);
+ u16 = vec_ldl(1,(_68 *)p);
+ vec_dst(( unsigned short *)p,1,1);
+ vec_dstst(( unsigned short *)p,1,1);
+ vec_dststt(( unsigned short *)p,1,1);
+ vec_dstt(( unsigned short *)p,1,1);
+ vec_dst((_68 *)p,1,1);
+ vec_dstst((_68 *)p,1,1);
+ vec_dststt((_68 *)p,1,1);
+ vec_dstt((_68 *)p,1,1);
+ vec_st(u16,1,( unsigned short *)p);
+ vec_st(u16,1,(_68 *)p);
+ vec_ste(u16,1,( unsigned short *)p);
+ vec_ste(u16,1,(_68 *)p);
+ vec_stl(u16,1,( unsigned short *)p);
+ vec_stl(u16,1,(_68 *)p);
+ u8 = vec_lvsl(1,(const volatile short *)p);
+ u8 = vec_lvsl(1,(_69 *)p);
+ u8 = vec_lvsr(1,(const volatile short *)p);
+ u8 = vec_lvsr(1,(_69 *)p);
+ u8 = vec_lvsl(1,(const short *)p);
+ u8 = vec_lvsl(1,(_70 *)p);
+ u8 = vec_lvsr(1,(const short *)p);
+ u8 = vec_lvsr(1,(_70 *)p);
+ s16 = vec_ld(1,(const short *)p);
+ s16 = vec_ld(1,(_70 *)p);
+ s16 = vec_lde(1,(const short *)p);
+ s16 = vec_lde(1,(_70 *)p);
+ s16 = vec_ldl(1,(const short *)p);
+ s16 = vec_ldl(1,(_70 *)p);
+ vec_dst((const short *)p,1,1);
+ vec_dstst((const short *)p,1,1);
+ vec_dststt((const short *)p,1,1);
+ vec_dstt((const short *)p,1,1);
+ vec_dst((_70 *)p,1,1);
+ vec_dstst((_70 *)p,1,1);
+ vec_dststt((_70 *)p,1,1);
+ vec_dstt((_70 *)p,1,1);
+ u8 = vec_lvsl(1,( volatile short *)p);
+ u8 = vec_lvsl(1,(_71 *)p);
+ u8 = vec_lvsr(1,( volatile short *)p);
+ u8 = vec_lvsr(1,(_71 *)p);
+ u8 = vec_lvsl(1,( short *)p);
+ u8 = vec_lvsl(1,(_72 *)p);
+ u8 = vec_lvsr(1,( short *)p);
+ u8 = vec_lvsr(1,(_72 *)p);
+ s16 = vec_ld(1,( short *)p);
+ s16 = vec_ld(1,(_72 *)p);
+ s16 = vec_lde(1,( short *)p);
+ s16 = vec_lde(1,(_72 *)p);
+ s16 = vec_ldl(1,( short *)p);
+ s16 = vec_ldl(1,(_72 *)p);
+ vec_dst(( short *)p,1,1);
+ vec_dstst(( short *)p,1,1);
+ vec_dststt(( short *)p,1,1);
+ vec_dstt(( short *)p,1,1);
+ vec_dst((_72 *)p,1,1);
+ vec_dstst((_72 *)p,1,1);
+ vec_dststt((_72 *)p,1,1);
+ vec_dstt((_72 *)p,1,1);
+ vec_st(s16,1,( short *)p);
+ vec_st(s16,1,(_72 *)p);
+ vec_ste(s16,1,( short *)p);
+ vec_ste(s16,1,(_72 *)p);
+ vec_stl(s16,1,( short *)p);
+ vec_stl(s16,1,(_72 *)p);
+ u8 = vec_lvsl(1,(const int volatile *)p);
+ u8 = vec_lvsl(1,(_73 *)p);
+ u8 = vec_lvsr(1,(const int volatile *)p);
+ u8 = vec_lvsr(1,(_73 *)p);
+ u8 = vec_lvsl(1,(const int *)p);
+ u8 = vec_lvsl(1,(_74 *)p);
+ u8 = vec_lvsr(1,(const int *)p);
+ u8 = vec_lvsr(1,(_74 *)p);
+ s32 = vec_ld(1,(const int *)p);
+ s32 = vec_ld(1,(_74 *)p);
+ s32 = vec_lde(1,(const int *)p);
+ s32 = vec_lde(1,(_74 *)p);
+ s32 = vec_ldl(1,(const int *)p);
+ s32 = vec_ldl(1,(_74 *)p);
+ vec_dst((const int *)p,1,1);
+ vec_dstst((const int *)p,1,1);
+ vec_dststt((const int *)p,1,1);
+ vec_dstt((const int *)p,1,1);
+ vec_dst((_74 *)p,1,1);
+ vec_dstst((_74 *)p,1,1);
+ vec_dststt((_74 *)p,1,1);
+ vec_dstt((_74 *)p,1,1);
+ u8 = vec_lvsl(1,( volatile int *)p);
+ u8 = vec_lvsl(1,(_75 *)p);
+ u8 = vec_lvsr(1,( volatile int *)p);
+ u8 = vec_lvsr(1,(_75 *)p);
+ u8 = vec_lvsl(1,( int *)p);
+ u8 = vec_lvsl(1,(_76 *)p);
+ u8 = vec_lvsr(1,( int *)p);
+ u8 = vec_lvsr(1,(_76 *)p);
+ s32 = vec_ld(1,( int *)p);
+ s32 = vec_ld(1,(_76 *)p);
+ s32 = vec_lde(1,(int *)p);
+ s32 = vec_lde(1,(_76 *)p);
+ s32 = vec_ldl(1,(int *)p);
+ s32 = vec_ldl(1,(_76 *)p);
+ vec_dst((int *)p,1,1);
+ vec_dstst((int *)p,1,1);
+ vec_dststt((int *)p,1,1);
+ vec_dstt((int *)p,1,1);
+ vec_dst((_76 *)p,1,1);
+ vec_dstst((_76 *)p,1,1);
+ vec_dststt((_76 *)p,1,1);
+ vec_dstt((_76 *)p,1,1);
+ vec_st(s32,1,(int *)p);
+ vec_st(s32,1,(_76 *)p);
+ vec_ste(s32,1,(int *)p);
+ vec_ste(s32,1,(_76 *)p);
+ vec_stl(s32,1,(int *)p);
+ vec_stl(s32,1,(_76 *)p);
+ u16 = vec_ld(1,(const vector unsigned short *)p);
+ u16 = vec_ld(1,(_78 *)p);
+ u16 = vec_ldl(1,(const vector unsigned short *)p);
+ u16 = vec_ldl(1,(_78 *)p);
+ vec_dst((const vector unsigned short *)p,1,1);
+ vec_dstst((const vector unsigned short *)p,1,1);
+ vec_dststt((const vector unsigned short *)p,1,1);
+ vec_dstt((const vector unsigned short *)p,1,1);
+ vec_dst((_78 *)p,1,1);
+ vec_dstst((_78 *)p,1,1);
+ vec_dststt((_78 *)p,1,1);
+ vec_dstt((_78 *)p,1,1);
+ u16 = vec_ld(1,( vector unsigned short *)p);
+ u16 = vec_ld(1,(_80 *)p);
+ u16 = vec_ldl(1,( vector unsigned short *)p);
+ u16 = vec_ldl(1,(_80 *)p);
+ vec_dst(( vector unsigned short *)p,1,1);
+ vec_dstst(( vector unsigned short *)p,1,1);
+ vec_dststt(( vector unsigned short *)p,1,1);
+ vec_dstt(( vector unsigned short *)p,1,1);
+ vec_dst((_80 *)p,1,1);
+ vec_dstst((_80 *)p,1,1);
+ vec_dststt((_80 *)p,1,1);
+ vec_dstt((_80 *)p,1,1);
+ vec_st(u16,1,( vector unsigned short *)p);
+ vec_st(u16,1,(_80 *)p);
+ vec_stl(u16,1,( vector unsigned short *)p);
+ vec_stl(u16,1,(_80 *)p);
+ b8 = vec_ld(1,(const vector bool char *)p);
+ b8 = vec_ld(1,(_82 *)p);
+ b8 = vec_ldl(1,(const vector bool char *)p);
+ b8 = vec_ldl(1,(_82 *)p);
+ vec_dst((const vector bool char *)p,1,1);
+ vec_dstst((const vector bool char *)p,1,1);
+ vec_dststt((const vector bool char *)p,1,1);
+ vec_dstt((const vector bool char *)p,1,1);
+ vec_dst((_82 *)p,1,1);
+ vec_dstst((_82 *)p,1,1);
+ vec_dststt((_82 *)p,1,1);
+ vec_dstt((_82 *)p,1,1);
+ b8 = vec_ld(1,( vector bool char *)p);
+ b8 = vec_ld(1,(_84 *)p);
+ b8 = vec_ldl(1,( vector bool char *)p);
+ b8 = vec_ldl(1,(_84 *)p);
+ vec_dst(( vector bool char *)p,1,1);
+ vec_dstst(( vector bool char *)p,1,1);
+ vec_dststt(( vector bool char *)p,1,1);
+ vec_dstt(( vector bool char *)p,1,1);
+ vec_dst((_84 *)p,1,1);
+ vec_dstst((_84 *)p,1,1);
+ vec_dststt((_84 *)p,1,1);
+ vec_dstt((_84 *)p,1,1);
+ vec_st(b8,1,( vector bool char *)p);
+ vec_st(b8,1,(_84 *)p);
+ vec_stl(b8,1,( vector bool char *)p);
+ vec_stl(b8,1,(_84 *)p);
+ u8 = vec_lvsl(1,(const volatile int signed *)p);
+ u8 = vec_lvsl(1,(_85 *)p);
+ u8 = vec_lvsr(1,(const volatile int signed *)p);
+ u8 = vec_lvsr(1,(_85 *)p);
+ u8 = vec_lvsl(1,(const int signed *)p);
+ u8 = vec_lvsl(1,(_86 *)p);
+ u8 = vec_lvsr(1,(const int signed *)p);
+ u8 = vec_lvsr(1,(_86 *)p);
+ s32 = vec_ld(1,(const int signed *)p);
+ s32 = vec_ld(1,(_86 *)p);
+ s32 = vec_lde(1,(const int signed *)p);
+ s32 = vec_lde(1,(_86 *)p);
+ s32 = vec_ldl(1,(const int signed *)p);
+ s32 = vec_ldl(1,(_86 *)p);
+ vec_dst((const int signed *)p,1,1);
+ vec_dstst((const int signed *)p,1,1);
+ vec_dststt((const int signed *)p,1,1);
+ vec_dstt((const int signed *)p,1,1);
+ vec_dst((_86 *)p,1,1);
+ vec_dstst((_86 *)p,1,1);
+ vec_dststt((_86 *)p,1,1);
+ vec_dstt((_86 *)p,1,1);
+ u8 = vec_lvsl(1,( volatile int signed *)p);
+ u8 = vec_lvsl(1,(_87 *)p);
+ u8 = vec_lvsr(1,( volatile int signed *)p);
+ u8 = vec_lvsr(1,(_87 *)p);
+ u8 = vec_lvsl(1,(int signed *)p);
+ u8 = vec_lvsl(1,(_88 *)p);
+ u8 = vec_lvsr(1,(int signed *)p);
+ u8 = vec_lvsr(1,(_88 *)p);
+ s32 = vec_ld(1,(int signed *)p);
+ s32 = vec_ld(1,(_88 *)p);
+ s32 = vec_lde(1,(int signed *)p);
+ s32 = vec_lde(1,(_88 *)p);
+ s32 = vec_ldl(1,(int signed *)p);
+ s32 = vec_ldl(1,(_88 *)p);
+ vec_dst((int signed *)p,1,1);
+ vec_dstst((int signed *)p,1,1);
+ vec_dststt((int signed *)p,1,1);
+ vec_dstt((int signed *)p,1,1);
+ vec_dst((_88 *)p,1,1);
+ vec_dstst((_88 *)p,1,1);
+ vec_dststt((_88 *)p,1,1);
+ vec_dstt((_88 *)p,1,1);
+ vec_st(s32,1,(int signed *)p);
+ vec_st(s32,1,(_88 *)p);
+ vec_ste(s32,1,(int signed *)p);
+ vec_ste(s32,1,(_88 *)p);
+ vec_stl(s32,1,(int signed *)p);
+ vec_stl(s32,1,(_88 *)p);
+ s32 = vec_ld(1,(const vector signed int *)p);
+ s32 = vec_ld(1,(_90 *)p);
+ s32 = vec_ldl(1,(const vector signed int *)p);
+ s32 = vec_ldl(1,(_90 *)p);
+ vec_dst((const vector signed int *)p,1,1);
+ vec_dstst((const vector signed int *)p,1,1);
+ vec_dststt((const vector signed int *)p,1,1);
+ vec_dstt((const vector signed int *)p,1,1);
+ vec_dst((_90 *)p,1,1);
+ vec_dstst((_90 *)p,1,1);
+ vec_dststt((_90 *)p,1,1);
+ vec_dstt((_90 *)p,1,1);
+ s32 = vec_ld(1,( vector signed int *)p);
+ s32 = vec_ld(1,(_92 *)p);
+ s32 = vec_ldl(1,( vector signed int *)p);
+ s32 = vec_ldl(1,(_92 *)p);
+ vec_dst(( vector signed int *)p,1,1);
+ vec_dstst(( vector signed int *)p,1,1);
+ vec_dststt(( vector signed int *)p,1,1);
+ vec_dstt(( vector signed int *)p,1,1);
+ vec_dst((_92 *)p,1,1);
+ vec_dstst((_92 *)p,1,1);
+ vec_dststt((_92 *)p,1,1);
+ vec_dstt((_92 *)p,1,1);
+ vec_st(s32,1,( vector signed int *)p);
+ vec_st(s32,1,(_92 *)p);
+ vec_stl(s32,1,( vector signed int *)p);
+ vec_stl(s32,1,(_92 *)p);
+ u32 = vec_ld(1,(const vector unsigned int *)p);
+ u32 = vec_ld(1,(_94 *)p);
+ u32 = vec_ldl(1,(const vector unsigned int *)p);
+ u32 = vec_ldl(1,(_94 *)p);
+ vec_dst((const vector unsigned int *)p,1,1);
+ vec_dstst((const vector unsigned int *)p,1,1);
+ vec_dststt((const vector unsigned int *)p,1,1);
+ vec_dstt((const vector unsigned int *)p,1,1);
+ vec_dst((_94 *)p,1,1);
+ vec_dstst((_94 *)p,1,1);
+ vec_dststt((_94 *)p,1,1);
+ vec_dstt((_94 *)p,1,1);
+ u32 = vec_ld(1,( vector unsigned int *)p);
+ u32 = vec_ld(1,(_96 *)p);
+ u32 = vec_ldl(1,( vector unsigned int *)p);
+ u32 = vec_ldl(1,(_96 *)p);
+ vec_dst(( vector unsigned int *)p,1,1);
+ vec_dstst(( vector unsigned int *)p,1,1);
+ vec_dststt(( vector unsigned int *)p,1,1);
+ vec_dstt(( vector unsigned int *)p,1,1);
+ vec_dst((_96 *)p,1,1);
+ vec_dstst((_96 *)p,1,1);
+ vec_dststt((_96 *)p,1,1);
+ vec_dstt((_96 *)p,1,1);
+ vec_st(u32,1,( vector unsigned int *)p);
+ vec_st(u32,1,(_96 *)p);
+ vec_stl(u32,1,( vector unsigned int *)p);
+ vec_stl(u32,1,(_96 *)p);
+ u8 = vec_lvsl(1,(const volatile int signed *)p);
+ u8 = vec_lvsl(1,(_97 *)p);
+ u8 = vec_lvsr(1,(const volatile int signed *)p);
+ u8 = vec_lvsr(1,(_97 *)p);
+ u8 = vec_lvsl(1,(const int signed *)p);
+ u8 = vec_lvsl(1,(_98 *)p);
+ u8 = vec_lvsr(1,(const int signed *)p);
+ u8 = vec_lvsr(1,(_98 *)p);
+ s32 = vec_ld(1,(const int signed *)p);
+ s32 = vec_ld(1,(_98 *)p);
+ s32 = vec_lde(1,(const int signed *)p);
+ s32 = vec_lde(1,(_98 *)p);
+ s32 = vec_ldl(1,(const int signed *)p);
+ s32 = vec_ldl(1,(_98 *)p);
+ vec_dst((const int signed *)p,1,1);
+ vec_dstst((const int signed *)p,1,1);
+ vec_dststt((const int signed *)p,1,1);
+ vec_dstt((const int signed *)p,1,1);
+ vec_dst((_98 *)p,1,1);
+ vec_dstst((_98 *)p,1,1);
+ vec_dststt((_98 *)p,1,1);
+ vec_dstt((_98 *)p,1,1);
+ u8 = vec_lvsl(1,( volatile int signed *)p);
+ u8 = vec_lvsl(1,(_99 *)p);
+ u8 = vec_lvsr(1,( volatile int signed *)p);
+ u8 = vec_lvsr(1,(_99 *)p);
+ u8 = vec_lvsl(1,(int signed *)p);
+ u8 = vec_lvsl(1,(_100 *)p);
+ u8 = vec_lvsr(1,(int signed *)p);
+ u8 = vec_lvsr(1,(_100 *)p);
+ s32 = vec_ld(1,(int signed *)p);
+ s32 = vec_ld(1,(_100 *)p);
+ s32 = vec_lde(1,(int signed *)p);
+ s32 = vec_lde(1,(_100 *)p);
+ s32 = vec_ldl(1,(int signed *)p);
+ s32 = vec_ldl(1,(_100 *)p);
+ vec_dst((int signed *)p,1,1);
+ vec_dstst((int signed *)p,1,1);
+ vec_dststt((int signed *)p,1,1);
+ vec_dstt((int signed *)p,1,1);
+ vec_dst((_100 *)p,1,1);
+ vec_dstst((_100 *)p,1,1);
+ vec_dststt((_100 *)p,1,1);
+ vec_dstt((_100 *)p,1,1);
+ vec_st(s32,1,(int signed *)p);
+ vec_st(s32,1,(_100 *)p);
+ vec_ste(s32,1,(int signed *)p);
+ vec_ste(s32,1,(_100 *)p);
+ vec_stl(s32,1,(int signed *)p);
+ vec_stl(s32,1,(_100 *)p);
+ u8 = vec_lvsl(1,(const volatile short int *)p);
+ u8 = vec_lvsl(1,(_101 *)p);
+ u8 = vec_lvsr(1,(const volatile short int *)p);
+ u8 = vec_lvsr(1,(_101 *)p);
+ u8 = vec_lvsl(1,(const short int *)p);
+ u8 = vec_lvsl(1,(_102 *)p);
+ u8 = vec_lvsr(1,(const short int *)p);
+ u8 = vec_lvsr(1,(_102 *)p);
+ s16 = vec_ld(1,(const short int *)p);
+ s16 = vec_ld(1,(_102 *)p);
+ s16 = vec_lde(1,(const short int *)p);
+ s16 = vec_lde(1,(_102 *)p);
+ s16 = vec_ldl(1,(const short int *)p);
+ s16 = vec_ldl(1,(_102 *)p);
+ vec_dst((const short int *)p,1,1);
+ vec_dstst((const short int *)p,1,1);
+ vec_dststt((const short int *)p,1,1);
+ vec_dstt((const short int *)p,1,1);
+ vec_dst((_102 *)p,1,1);
+ vec_dstst((_102 *)p,1,1);
+ vec_dststt((_102 *)p,1,1);
+ vec_dstt((_102 *)p,1,1);
+ u8 = vec_lvsl(1,( volatile short int *)p);
+ u8 = vec_lvsl(1,(_103 *)p);
+ u8 = vec_lvsr(1,( volatile short int *)p);
+ u8 = vec_lvsr(1,(_103 *)p);
+ u8 = vec_lvsl(1,( short int *)p);
+ u8 = vec_lvsl(1,(_104 *)p);
+ u8 = vec_lvsr(1,( short int *)p);
+ u8 = vec_lvsr(1,(_104 *)p);
+ s16 = vec_ld(1,( short int *)p);
+ s16 = vec_ld(1,(_104 *)p);
+ s16 = vec_lde(1,( short int *)p);
+ s16 = vec_lde(1,(_104 *)p);
+ s16 = vec_ldl(1,( short int *)p);
+ s16 = vec_ldl(1,(_104 *)p);
+ vec_dst(( short int *)p,1,1);
+ vec_dstst(( short int *)p,1,1);
+ vec_dststt(( short int *)p,1,1);
+ vec_dstt(( short int *)p,1,1);
+ vec_dst((_104 *)p,1,1);
+ vec_dstst((_104 *)p,1,1);
+ vec_dststt((_104 *)p,1,1);
+ vec_dstt((_104 *)p,1,1);
+ vec_st(s16,1,( short int *)p);
+ vec_st(s16,1,(_104 *)p);
+ vec_ste(s16,1,( short int *)p);
+ vec_ste(s16,1,(_104 *)p);
+ vec_stl(s16,1,( short int *)p);
+ vec_stl(s16,1,(_104 *)p);
+ u8 = vec_lvsl(1,(const volatile int *)p);
+ u8 = vec_lvsl(1,(_105 *)p);
+ u8 = vec_lvsr(1,(const volatile int *)p);
+ u8 = vec_lvsr(1,(_105 *)p);
+ u8 = vec_lvsl(1,(const int *)p);
+ u8 = vec_lvsl(1,(_106 *)p);
+ u8 = vec_lvsr(1,(const int *)p);
+ u8 = vec_lvsr(1,(_106 *)p);
+ s32 = vec_ld(1,(const int *)p);
+ s32 = vec_ld(1,(_106 *)p);
+ s32 = vec_lde(1,(const int *)p);
+ s32 = vec_lde(1,(_106 *)p);
+ s32 = vec_ldl(1,(const int *)p);
+ s32 = vec_ldl(1,(_106 *)p);
+ vec_dst((const int *)p,1,1);
+ vec_dstst((const int *)p,1,1);
+ vec_dststt((const int *)p,1,1);
+ vec_dstt((const int *)p,1,1);
+ vec_dst((_106 *)p,1,1);
+ vec_dstst((_106 *)p,1,1);
+ vec_dststt((_106 *)p,1,1);
+ vec_dstt((_106 *)p,1,1);
+ u8 = vec_lvsl(1,( volatile int *)p);
+ u8 = vec_lvsl(1,(_107 *)p);
+ u8 = vec_lvsr(1,( volatile int *)p);
+ u8 = vec_lvsr(1,(_107 *)p);
+ u8 = vec_lvsl(1,( int *)p);
+ u8 = vec_lvsl(1,(_108 *)p);
+ u8 = vec_lvsr(1,( int *)p);
+ u8 = vec_lvsr(1,(_108 *)p);
+ s32 = vec_ld(1,( int *)p);
+ s32 = vec_ld(1,(_108 *)p);
+ s32 = vec_lde(1,( int *)p);
+ s32 = vec_lde(1,(_108 *)p);
+ s32 = vec_ldl(1,( int *)p);
+ s32 = vec_ldl(1,(_108 *)p);
+ vec_dst(( int *)p,1,1);
+ vec_dstst(( int *)p,1,1);
+ vec_dststt(( int *)p,1,1);
+ vec_dstt(( int *)p,1,1);
+ vec_dst((_108 *)p,1,1);
+ vec_dstst((_108 *)p,1,1);
+ vec_dststt((_108 *)p,1,1);
+ vec_dstt((_108 *)p,1,1);
+ vec_st(s32,1,( int *)p);
+ vec_st(s32,1,(_108 *)p);
+ vec_ste(s32,1,( int *)p);
+ vec_ste(s32,1,(_108 *)p);
+ vec_stl(s32,1,( int *)p);
+ vec_stl(s32,1,(_108 *)p);
+ u8 = vec_lvsl(1,(const volatile int *)p);
+ u8 = vec_lvsl(1,(_109 *)p);
+ u8 = vec_lvsr(1,(const volatile int *)p);
+ u8 = vec_lvsr(1,(_109 *)p);
+ u8 = vec_lvsl(1,(const int *)p);
+ u8 = vec_lvsl(1,(_110 *)p);
+ u8 = vec_lvsr(1,(const int *)p);
+ u8 = vec_lvsr(1,(_110 *)p);
+ s32 = vec_ld(1,(const int *)p);
+ s32 = vec_ld(1,(_110 *)p);
+ s32 = vec_lde(1,(const int *)p);
+ s32 = vec_lde(1,(_110 *)p);
+ s32 = vec_ldl(1,(const int *)p);
+ s32 = vec_ldl(1,(_110 *)p);
+ vec_dst((const int *)p,1,1);
+ vec_dstst((const int *)p,1,1);
+ vec_dststt((const int *)p,1,1);
+ vec_dstt((const int *)p,1,1);
+ vec_dst((_110 *)p,1,1);
+ vec_dstst((_110 *)p,1,1);
+ vec_dststt((_110 *)p,1,1);
+ vec_dstt((_110 *)p,1,1);
+ u8 = vec_lvsl(1,( volatile int *)p);
+ u8 = vec_lvsl(1,(_111 *)p);
+ u8 = vec_lvsr(1,( volatile int *)p);
+ u8 = vec_lvsr(1,(_111 *)p);
+ u8 = vec_lvsl(1,( int *)p);
+ u8 = vec_lvsl(1,(_112 *)p);
+ u8 = vec_lvsr(1,( int *)p);
+ u8 = vec_lvsr(1,(_112 *)p);
+ s32 = vec_ld(1,( int *)p);
+ s32 = vec_ld(1,(_112 *)p);
+ s32 = vec_lde(1,( int *)p);
+ s32 = vec_lde(1,(_112 *)p);
+ s32 = vec_ldl(1,( int *)p);
+ s32 = vec_ldl(1,(_112 *)p);
+ vec_dst(( int *)p,1,1);
+ vec_dstst(( int *)p,1,1);
+ vec_dststt(( int *)p,1,1);
+ vec_dstt(( int *)p,1,1);
+ vec_dst((_112 *)p,1,1);
+ vec_dstst((_112 *)p,1,1);
+ vec_dststt((_112 *)p,1,1);
+ vec_dstt((_112 *)p,1,1);
+ vec_st(s32,1,( int *)p);
+ vec_st(s32,1,(_112 *)p);
+ vec_ste(s32,1,( int *)p);
+ vec_ste(s32,1,(_112 *)p);
+ vec_stl(s32,1,( int *)p);
+ vec_stl(s32,1,(_112 *)p);
+ u8 = vec_ld(1,(const vector unsigned char *)p);
+ u8 = vec_ld(1,(_114 *)p);
+ u8 = vec_ldl(1,(const vector unsigned char *)p);
+ u8 = vec_ldl(1,(_114 *)p);
+ vec_dst((const vector unsigned char *)p,1,1);
+ vec_dstst((const vector unsigned char *)p,1,1);
+ vec_dststt((const vector unsigned char *)p,1,1);
+ vec_dstt((const vector unsigned char *)p,1,1);
+ vec_dst((_114 *)p,1,1);
+ vec_dstst((_114 *)p,1,1);
+ vec_dststt((_114 *)p,1,1);
+ vec_dstt((_114 *)p,1,1);
+ u8 = vec_ld(1,( vector unsigned char *)p);
+ u8 = vec_ld(1,(_116 *)p);
+ u8 = vec_ldl(1,( vector unsigned char *)p);
+ u8 = vec_ldl(1,(_116 *)p);
+ vec_dst(( vector unsigned char *)p,1,1);
+ vec_dstst(( vector unsigned char *)p,1,1);
+ vec_dststt(( vector unsigned char *)p,1,1);
+ vec_dstt(( vector unsigned char *)p,1,1);
+ vec_dst((_116 *)p,1,1);
+ vec_dstst((_116 *)p,1,1);
+ vec_dststt((_116 *)p,1,1);
+ vec_dstt((_116 *)p,1,1);
+ vec_st(u8,1,( vector unsigned char *)p);
+ vec_st(u8,1,(_116 *)p);
+ vec_stl(u8,1,( vector unsigned char *)p);
+ vec_stl(u8,1,(_116 *)p);
+ u8 = vec_lvsl(1,(const volatile signed char *)p);
+ u8 = vec_lvsl(1,(_117 *)p);
+ u8 = vec_lvsr(1,(const volatile signed char *)p);
+ u8 = vec_lvsr(1,(_117 *)p);
+ u8 = vec_lvsl(1,(const signed char *)p);
+ u8 = vec_lvsl(1,(_118 *)p);
+ u8 = vec_lvsr(1,(const signed char *)p);
+ u8 = vec_lvsr(1,(_118 *)p);
+ s8 = vec_ld(1,(const signed char *)p);
+ s8 = vec_ld(1,(_118 *)p);
+ s8 = vec_lde(1,(const signed char *)p);
+ s8 = vec_lde(1,(_118 *)p);
+ s8 = vec_ldl(1,(const signed char *)p);
+ s8 = vec_ldl(1,(_118 *)p);
+ vec_dst((const signed char *)p,1,1);
+ vec_dstst((const signed char *)p,1,1);
+ vec_dststt((const signed char *)p,1,1);
+ vec_dstt((const signed char *)p,1,1);
+ vec_dst((_118 *)p,1,1);
+ vec_dstst((_118 *)p,1,1);
+ vec_dststt((_118 *)p,1,1);
+ vec_dstt((_118 *)p,1,1);
+ u8 = vec_lvsl(1,( volatile signed char *)p);
+ u8 = vec_lvsl(1,(_119 *)p);
+ u8 = vec_lvsr(1,( volatile signed char *)p);
+ u8 = vec_lvsr(1,(_119 *)p);
+ u8 = vec_lvsl(1,( signed char *)p);
+ u8 = vec_lvsl(1,(_120 *)p);
+ u8 = vec_lvsr(1,( signed char *)p);
+ u8 = vec_lvsr(1,(_120 *)p);
+ s8 = vec_ld(1,( signed char *)p);
+ s8 = vec_ld(1,(_120 *)p);
+ s8 = vec_lde(1,( signed char *)p);
+ s8 = vec_lde(1,(_120 *)p);
+ s8 = vec_ldl(1,( signed char *)p);
+ s8 = vec_ldl(1,(_120 *)p);
+ vec_dst(( signed char *)p,1,1);
+ vec_dstst(( signed char *)p,1,1);
+ vec_dststt(( signed char *)p,1,1);
+ vec_dstt(( signed char *)p,1,1);
+ vec_dst((_120 *)p,1,1);
+ vec_dstst((_120 *)p,1,1);
+ vec_dststt((_120 *)p,1,1);
+ vec_dstt((_120 *)p,1,1);
+ vec_st(s8,1,( signed char *)p);
+ vec_st(s8,1,(_120 *)p);
+ vec_ste(s8,1,( signed char *)p);
+ vec_ste(s8,1,(_120 *)p);
+ vec_stl(s8,1,( signed char *)p);
+ vec_stl(s8,1,(_120 *)p);
+ u8 = vec_lvsl(1,(const volatile float *)p);
+ u8 = vec_lvsl(1,(_121 *)p);
+ u8 = vec_lvsr(1,(const volatile float *)p);
+ u8 = vec_lvsr(1,(_121 *)p);
+ u8 = vec_lvsl(1,(const float *)p);
+ u8 = vec_lvsl(1,(_122 *)p);
+ u8 = vec_lvsr(1,(const float *)p);
+ u8 = vec_lvsr(1,(_122 *)p);
+ f32 = vec_ld(1,(const float *)p);
+ f32 = vec_ld(1,(_122 *)p);
+ f32 = vec_lde(1,(const float *)p);
+ f32 = vec_lde(1,(_122 *)p);
+ f32 = vec_ldl(1,(const float *)p);
+ f32 = vec_ldl(1,(_122 *)p);
+ vec_dst((const float *)p,1,1);
+ vec_dstst((const float *)p,1,1);
+ vec_dststt((const float *)p,1,1);
+ vec_dstt((const float *)p,1,1);
+ vec_dst((_122 *)p,1,1);
+ vec_dstst((_122 *)p,1,1);
+ vec_dststt((_122 *)p,1,1);
+ vec_dstt((_122 *)p,1,1);
+ u8 = vec_lvsl(1,( volatile float *)p);
+ u8 = vec_lvsl(1,(_123 *)p);
+ u8 = vec_lvsr(1,( volatile float *)p);
+ u8 = vec_lvsr(1,(_123 *)p);
+ u8 = vec_lvsl(1,( float *)p);
+ u8 = vec_lvsl(1,(_124 *)p);
+ u8 = vec_lvsr(1,( float *)p);
+ u8 = vec_lvsr(1,(_124 *)p);
+ f32 = vec_ld(1,( float *)p);
+ f32 = vec_ld(1,(_124 *)p);
+ f32 = vec_lde(1,( float *)p);
+ f32 = vec_lde(1,(_124 *)p);
+ f32 = vec_ldl(1,( float *)p);
+ f32 = vec_ldl(1,(_124 *)p);
+ vec_dst(( float *)p,1,1);
+ vec_dstst(( float *)p,1,1);
+ vec_dststt(( float *)p,1,1);
+ vec_dstt(( float *)p,1,1);
+ vec_dst((_124 *)p,1,1);
+ vec_dstst((_124 *)p,1,1);
+ vec_dststt((_124 *)p,1,1);
+ vec_dstt((_124 *)p,1,1);
+ vec_st(f32,1,( float *)p);
+ vec_st(f32,1,(_124 *)p);
+ vec_ste(f32,1,( float *)p);
+ vec_ste(f32,1,(_124 *)p);
+ vec_stl(f32,1,( float *)p);
+ vec_stl(f32,1,(_124 *)p);
+}
diff --git a/gcc/testsuite/gcc.dg/vmx/3c-02.c b/gcc/testsuite/gcc.dg/vmx/3c-02.c
new file mode 100644
index 0000000000..1d105dde40
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/3c-02.c
@@ -0,0 +1,17 @@
+#include "harness.h"
+
+vector unsigned char u8;
+
+static void f(void *p)
+{
+ u8 = vec_ld(1, (unsigned char *)p);
+}
+
+static void test()
+{
+ static vector unsigned int value = {1,-2,3,-4};
+ static vector unsigned int buffer[2];
+ buffer[1] = value;
+ f((void *)(-1+(char*)(buffer+1)));
+ check(vec_all_eq((vector unsigned int) u8, value), "u8");
+}
diff --git a/gcc/testsuite/gcc.dg/vmx/3c-03.c b/gcc/testsuite/gcc.dg/vmx/3c-03.c
new file mode 100644
index 0000000000..2228c03575
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/3c-03.c
@@ -0,0 +1,17 @@
+#include "harness.h"
+
+vector unsigned char u8;
+
+static void f(int i, void *p)
+{
+ u8 = vec_ld(i, (unsigned char *)p);
+}
+
+static void test()
+{
+ static vector unsigned int value = {1,-2,3,-4};
+ static vector unsigned int buffer[2];
+ buffer[1] = value;
+ f(37,(void *)(-37+(char*)(buffer+1)));
+ check(vec_all_eq((vector unsigned int) u8, value), "u8");
+}
diff --git a/gcc/testsuite/gcc.dg/vmx/3d-01.c b/gcc/testsuite/gcc.dg/vmx/3d-01.c
new file mode 100644
index 0000000000..48657c8669
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/3d-01.c
@@ -0,0 +1,171 @@
+/* { dg-do compile } */
+#include <altivec.h>
+int i;
+
+void
+test_vec_all_gt(vector unsigned char u8a, vector unsigned char u8b)
+{
+ if (vec_all_gt(u8a, u8b))
+ i = 1;
+}
+
+void
+test_vec_all_le(vector unsigned char u8a, vector unsigned char u8b)
+{
+ if (vec_all_le(u8a, u8b))
+ i = 1;
+}
+
+void
+test_vec_any_gt(vector unsigned char u8a, vector unsigned char u8b)
+{
+ if (vec_any_gt(u8a, u8b))
+ i = 1;
+}
+
+void
+test_vec_any_le(vector unsigned char u8a, vector unsigned char u8b)
+{
+ if (vec_any_le(u8a, u8b))
+ i = 1;
+}
+
+void
+test_vec_all_lt(vector unsigned char u8a, vector unsigned char u8b)
+{
+ if (vec_all_lt(u8a, u8b))
+ i = 1;
+}
+
+void
+test_vec_all_ge(vector unsigned char u8a, vector unsigned char u8b)
+{
+ if (vec_all_ge(u8a, u8b))
+ i = 1;
+}
+
+void
+test_vec_any_lt(vector unsigned char u8a, vector unsigned char u8b)
+{
+ if (vec_any_lt(u8a, u8b))
+ i = 1;
+}
+
+void
+test_vec_any_ge(vector unsigned char u8a, vector unsigned char u8b)
+{
+ if (vec_any_ge(u8a, u8b))
+ i = 1;
+}
+
+void
+test_vec_all_eq(vector unsigned char u8a, vector unsigned char u8b)
+{
+ if (vec_all_eq(u8a, u8b))
+ i = 1;
+}
+
+void
+test_vec_all_ne(vector unsigned char u8a, vector unsigned char u8b)
+{
+ if (vec_all_ne(u8a, u8b))
+ i = 1;
+}
+
+void
+test_vec_any_eq(vector unsigned char u8a, vector unsigned char u8b)
+{
+ if (vec_any_eq(u8a, u8b))
+ i = 1;
+}
+
+void
+test_vec_any_ne(vector unsigned char u8a, vector unsigned char u8b)
+{
+ if (vec_any_ne(u8a, u8b))
+ i = 1;
+}
+
+void
+test_not_vec_all_gt(vector unsigned char u8a, vector unsigned char u8b)
+{
+ if (!vec_all_gt(u8a, u8b))
+ i = 1;
+}
+
+void
+test_not_vec_all_le(vector unsigned char u8a, vector unsigned char u8b)
+{
+ if (!vec_all_le(u8a, u8b))
+ i = 1;
+}
+
+void
+test_not_vec_any_gt(vector unsigned char u8a, vector unsigned char u8b)
+{
+ if (!vec_any_gt(u8a, u8b))
+ i = 1;
+}
+
+void
+test_not_vec_any_le(vector unsigned char u8a, vector unsigned char u8b)
+{
+ if (!vec_any_le(u8a, u8b))
+ i = 1;
+}
+
+void
+test_not_vec_all_lt(vector unsigned char u8a, vector unsigned char u8b)
+{
+ if (!vec_all_lt(u8a, u8b))
+ i = 1;
+}
+
+void
+test_not_vec_all_ge(vector unsigned char u8a, vector unsigned char u8b)
+{
+ if (!vec_all_ge(u8a, u8b))
+ i = 1;
+}
+
+void
+test_not_vec_any_lt(vector unsigned char u8a, vector unsigned char u8b)
+{
+ if (!vec_any_lt(u8a, u8b))
+ i = 1;
+}
+
+void
+test_not_vec_any_ge(vector unsigned char u8a, vector unsigned char u8b)
+{
+ if (!vec_any_ge(u8a, u8b))
+ i = 1;
+}
+
+void
+test_not_vec_all_eq(vector unsigned char u8a, vector unsigned char u8b)
+{
+ if (!vec_all_eq(u8a, u8b))
+ i = 1;
+}
+
+void
+test_not_vec_all_ne(vector unsigned char u8a, vector unsigned char u8b)
+{
+ if (!vec_all_ne(u8a, u8b))
+ i = 1;
+}
+
+void
+test_not_vec_any_eq(vector unsigned char u8a, vector unsigned char u8b)
+{
+ if (!vec_any_eq(u8a, u8b))
+ i = 1;
+}
+
+void
+test_not_vec_any_ne(vector unsigned char u8a, vector unsigned char u8b)
+{
+ if (!vec_any_ne(u8a, u8b))
+ i = 1;
+}
diff --git a/gcc/testsuite/gcc.dg/vmx/4-01.c b/gcc/testsuite/gcc.dg/vmx/4-01.c
new file mode 100644
index 0000000000..f454a2edad
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/4-01.c
@@ -0,0 +1,7 @@
+/* { dg-do compile } */
+#include <altivec.h>
+vector unsigned int
+f(vector signed char a)
+{
+ return (vector unsigned int)(a);
+}
diff --git a/gcc/testsuite/gcc.dg/vmx/4-03.c b/gcc/testsuite/gcc.dg/vmx/4-03.c
new file mode 100644
index 0000000000..1c3ccd7469
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/4-03.c
@@ -0,0 +1,7 @@
+/* { dg-do compile } */
+#include <altivec.h>
+vector unsigned int
+f(vector signed char a)
+{
+ return (vector unsigned int)a;
+}
diff --git a/gcc/testsuite/gcc.dg/vmx/5-01.c b/gcc/testsuite/gcc.dg/vmx/5-01.c
new file mode 100644
index 0000000000..c3cb6e990d
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/5-01.c
@@ -0,0 +1,4 @@
+/* { dg-do compile } */
+#include <altivec.h>
+vector unsigned int a = {1,1,1,1};
+vector unsigned int b = {1,2,3,4};
diff --git a/gcc/testsuite/gcc.dg/vmx/5-02.c b/gcc/testsuite/gcc.dg/vmx/5-02.c
new file mode 100644
index 0000000000..0bb4eb0084
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/5-02.c
@@ -0,0 +1,4 @@
+/* { dg-do compile } */
+#include <altivec.h>
+vector float a = {1,1,1,1};
+vector float b = {1,2.0,3,4U};
diff --git a/gcc/testsuite/gcc.dg/vmx/5-03.c b/gcc/testsuite/gcc.dg/vmx/5-03.c
new file mode 100644
index 0000000000..3d048e87b0
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/5-03.c
@@ -0,0 +1,28 @@
+/* { dg-do compile } */
+#include <altivec.h>
+vector unsigned char u8 = {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1};
+vector signed char s8 = {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1};
+vector bool char b8 = {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1};
+vector unsigned short u16 = {1,1,1,1,1,1,1,1};
+vector signed short s16 = {1,1,1,1,1,1,1,1};
+vector bool short b16 = {1,1,1,1,1,1,1,1};
+vector unsigned int u32 = {1,1,1,1};
+vector signed int s32 = {1,1,1,1};
+vector bool int b32 = {1,1,1,1};
+vector float f32 = {1,1,1,1};
+vector pixel p16 = {1,1,1,1,1,1,1,1};
+
+vector unsigned char u8_
+ = {1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16};
+vector signed char s8_
+ = {1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16};
+vector bool char b8_
+ = {1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16};
+vector unsigned short u16_ = {1,2,3,4,5,6,7,8};
+vector signed short s16_ = {1,2,3,4,5,6,7,8};
+vector bool short b16_ = {1,2,3,4,5,6,7,8};
+vector unsigned int u32_ = {1,2,3,4};
+vector signed int s32_ = {1,2,3,4};
+vector bool int b32_ = {1,2,3,4};
+vector float f32_ = {1,2,3,4};
+vector pixel p16_ = {1,2,3,4,5,6,7,8};
diff --git a/gcc/testsuite/gcc.dg/vmx/5-04.c b/gcc/testsuite/gcc.dg/vmx/5-04.c
new file mode 100644
index 0000000000..c5135e8886
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/5-04.c
@@ -0,0 +1,10 @@
+/* { dg-do compile } */
+#include <altivec.h>
+vector unsigned int a;
+vector unsigned int b;
+void
+f(void)
+{
+ a = ((vector unsigned int){1,1,1,1});
+ b = ((vector unsigned int){1,2,3,4});
+}
diff --git a/gcc/testsuite/gcc.dg/vmx/5-07t.c b/gcc/testsuite/gcc.dg/vmx/5-07t.c
new file mode 100644
index 0000000000..c03e6558f1
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/5-07t.c
@@ -0,0 +1,37 @@
+/* { dg-do compile } */
+#include <altivec.h>
+typedef vector unsigned char t_u8;
+typedef vector signed char t_s8;
+typedef vector bool char t_b8;
+typedef vector unsigned short t_u16;
+typedef vector signed short t_s16;
+typedef vector bool short t_b16;
+typedef vector unsigned int t_u32;
+typedef vector signed int t_s32;
+typedef vector bool int t_b32;
+typedef vector float t_f32;
+typedef vector pixel t_p16;
+
+t_u8 u8 = {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1};
+t_s8 s8 = {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1};
+t_b8 b8 = {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1};
+t_u16 u16 = {1,1,1,1,1,1,1,1};
+t_s16 s16 = {1,1,1,1,1,1,1,1};
+t_b16 b16 = {1,1,1,1,1,1,1,1};
+t_u32 u32 = {1,1,1,1};
+t_s32 s32 = {1,1,1,1};
+t_b32 b32 = {1,1,1,1};
+t_f32 f32 = {1,1,1,1};
+t_p16 p16 = {1,1,1,1,1,1,1,1};
+
+t_u8 u8_ = {1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16};
+t_s8 s8_ = {1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16};
+t_b8 b8_ = {1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16};
+t_u16 u16_ = {1,2,3,4,5,6,7,8};
+t_s16 s16_ = {1,2,3,4,5,6,7,8};
+t_b16 b16_ = {1,2,3,4,5,6,7,8};
+t_u32 u32_ = {1,2,3,4};
+t_s32 s32_ = {1,2,3,4};
+t_b32 b32_ = {1,2,3,4};
+t_f32 f32_ = {1,2,3,4};
+t_p16 p16_ = {1,2,3,4,5,6,7,8};
diff --git a/gcc/testsuite/gcc.dg/vmx/5-10.c b/gcc/testsuite/gcc.dg/vmx/5-10.c
new file mode 100644
index 0000000000..5f1b6ddc1b
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/5-10.c
@@ -0,0 +1,1352 @@
+/* { dg-do compile } */
+#include <altivec.h>
+void fu32a(vector unsigned int *u32)
+{
+ *u32++ = ((vector unsigned int){0,0,0,0});
+ *u32++ = ((vector unsigned int){1,1,1,1});
+ *u32++ = ((vector unsigned int){2,2,2,2});
+ *u32++ = ((vector unsigned int){3,3,3,3});
+ *u32++ = ((vector unsigned int){4,4,4,4});
+ *u32++ = ((vector unsigned int){5,5,5,5});
+ *u32++ = ((vector unsigned int){6,6,6,6});
+ *u32++ = ((vector unsigned int){7,7,7,7});
+ *u32++ = ((vector unsigned int){8,8,8,8});
+ *u32++ = ((vector unsigned int){9,9,9,9});
+ *u32++ = ((vector unsigned int){10,10,10,10});
+ *u32++ = ((vector unsigned int){11,11,11,11});
+ *u32++ = ((vector unsigned int){12,12,12,12});
+ *u32++ = ((vector unsigned int){13,13,13,13});
+ *u32++ = ((vector unsigned int){14,14,14,14});
+ *u32++ = ((vector unsigned int){15,15,15,15});
+}
+void fu32b(vector unsigned int *u32)
+{
+ *u32++ = ((vector unsigned int){65537,65537,65537,65537});
+ *u32++ = ((vector unsigned int){131074,131074,131074,131074});
+ *u32++ = ((vector unsigned int){196611,196611,196611,196611});
+ *u32++ = ((vector unsigned int){262148,262148,262148,262148});
+ *u32++ = ((vector unsigned int){327685,327685,327685,327685});
+ *u32++ = ((vector unsigned int){393222,393222,393222,393222});
+ *u32++ = ((vector unsigned int){458759,458759,458759,458759});
+ *u32++ = ((vector unsigned int){524296,524296,524296,524296});
+ *u32++ = ((vector unsigned int){589833,589833,589833,589833});
+ *u32++ = ((vector unsigned int){655370,655370,655370,655370});
+ *u32++ = ((vector unsigned int){720907,720907,720907,720907});
+ *u32++ = ((vector unsigned int){786444,786444,786444,786444});
+ *u32++ = ((vector unsigned int){851981,851981,851981,851981});
+ *u32++ = ((vector unsigned int){917518,917518,917518,917518});
+ *u32++ = ((vector unsigned int){983055,983055,983055,983055});
+}
+void fu32c(vector unsigned int *u32)
+{
+ *u32++ = ((vector unsigned int){16843009,16843009,16843009,16843009});
+ *u32++ = ((vector unsigned int){33686018,33686018,33686018,33686018});
+ *u32++ = ((vector unsigned int){50529027,50529027,50529027,50529027});
+ *u32++ = ((vector unsigned int){67372036,67372036,67372036,67372036});
+ *u32++ = ((vector unsigned int){84215045,84215045,84215045,84215045});
+ *u32++ = ((vector unsigned int){101058054,101058054,101058054,101058054});
+ *u32++ = ((vector unsigned int){117901063,117901063,117901063,117901063});
+ *u32++ = ((vector unsigned int){134744072,134744072,134744072,134744072});
+ *u32++ = ((vector unsigned int){151587081,151587081,151587081,151587081});
+ *u32++ = ((vector unsigned int){168430090,168430090,168430090,168430090});
+ *u32++ = ((vector unsigned int){185273099,185273099,185273099,185273099});
+ *u32++ = ((vector unsigned int){202116108,202116108,202116108,202116108});
+ *u32++ = ((vector unsigned int){218959117,218959117,218959117,218959117});
+ *u32++ = ((vector unsigned int){235802126,235802126,235802126,235802126});
+ *u32++ = ((vector unsigned int){252645135,252645135,252645135,252645135});
+}
+void fu32d(vector unsigned int *u32)
+{
+ *u32++ = ((vector unsigned int){4042322160,4042322160,4042322160,4042322160});
+ *u32++ = ((vector unsigned int){4059165169,4059165169,4059165169,4059165169});
+ *u32++ = ((vector unsigned int){4076008178,4076008178,4076008178,4076008178});
+ *u32++ = ((vector unsigned int){4092851187,4092851187,4092851187,4092851187});
+ *u32++ = ((vector unsigned int){4109694196,4109694196,4109694196,4109694196});
+ *u32++ = ((vector unsigned int){4126537205,4126537205,4126537205,4126537205});
+ *u32++ = ((vector unsigned int){4143380214,4143380214,4143380214,4143380214});
+ *u32++ = ((vector unsigned int){4160223223,4160223223,4160223223,4160223223});
+ *u32++ = ((vector unsigned int){4177066232,4177066232,4177066232,4177066232});
+ *u32++ = ((vector unsigned int){4193909241,4193909241,4193909241,4193909241});
+ *u32++ = ((vector unsigned int){4210752250,4210752250,4210752250,4210752250});
+ *u32++ = ((vector unsigned int){4227595259,4227595259,4227595259,4227595259});
+ *u32++ = ((vector unsigned int){4244438268,4244438268,4244438268,4244438268});
+ *u32++ = ((vector unsigned int){4261281277,4261281277,4261281277,4261281277});
+ *u32++ = ((vector unsigned int){4278124286,4278124286,4278124286,4278124286});
+}
+void fu32e(vector unsigned int *u32)
+{
+ *u32++ = ((vector unsigned int){4293984240,4293984240,4293984240,4293984240});
+ *u32++ = ((vector unsigned int){4294049777,4294049777,4294049777,4294049777});
+ *u32++ = ((vector unsigned int){4294115314,4294115314,4294115314,4294115314});
+ *u32++ = ((vector unsigned int){4294180851,4294180851,4294180851,4294180851});
+ *u32++ = ((vector unsigned int){4294246388,4294246388,4294246388,4294246388});
+ *u32++ = ((vector unsigned int){4294311925,4294311925,4294311925,4294311925});
+ *u32++ = ((vector unsigned int){4294377462,4294377462,4294377462,4294377462});
+ *u32++ = ((vector unsigned int){4294442999,4294442999,4294442999,4294442999});
+ *u32++ = ((vector unsigned int){4294508536,4294508536,4294508536,4294508536});
+ *u32++ = ((vector unsigned int){4294574073,4294574073,4294574073,4294574073});
+ *u32++ = ((vector unsigned int){4294639610,4294639610,4294639610,4294639610});
+ *u32++ = ((vector unsigned int){4294705147,4294705147,4294705147,4294705147});
+ *u32++ = ((vector unsigned int){4294770684,4294770684,4294770684,4294770684});
+ *u32++ = ((vector unsigned int){4294836221,4294836221,4294836221,4294836221});
+ *u32++ = ((vector unsigned int){4294901758,4294901758,4294901758,4294901758});
+}
+void fu32f(vector unsigned int *u32)
+{
+ *u32++ = ((vector unsigned int){4294967280,4294967280,4294967280,4294967280});
+ *u32++ = ((vector unsigned int){4294967281,4294967281,4294967281,4294967281});
+ *u32++ = ((vector unsigned int){4294967282,4294967282,4294967282,4294967282});
+ *u32++ = ((vector unsigned int){4294967283,4294967283,4294967283,4294967283});
+ *u32++ = ((vector unsigned int){4294967284,4294967284,4294967284,4294967284});
+ *u32++ = ((vector unsigned int){4294967285,4294967285,4294967285,4294967285});
+ *u32++ = ((vector unsigned int){4294967286,4294967286,4294967286,4294967286});
+ *u32++ = ((vector unsigned int){4294967287,4294967287,4294967287,4294967287});
+ *u32++ = ((vector unsigned int){4294967288,4294967288,4294967288,4294967288});
+ *u32++ = ((vector unsigned int){4294967289,4294967289,4294967289,4294967289});
+ *u32++ = ((vector unsigned int){4294967290,4294967290,4294967290,4294967290});
+ *u32++ = ((vector unsigned int){4294967291,4294967291,4294967291,4294967291});
+ *u32++ = ((vector unsigned int){4294967292,4294967292,4294967292,4294967292});
+ *u32++ = ((vector unsigned int){4294967293,4294967293,4294967293,4294967293});
+ *u32++ = ((vector unsigned int){4294967294,4294967294,4294967294,4294967294});
+ *u32++ = ((vector unsigned int){4294967295,4294967295,4294967295,4294967295});
+}
+void fu32g(vector unsigned int *u32)
+{
+ *u32++ = ((vector unsigned int){-252645136,-252645136,-252645136,-252645136});
+ *u32++ = ((vector unsigned int){-235802127,-235802127,-235802127,-235802127});
+ *u32++ = ((vector unsigned int){-218959118,-218959118,-218959118,-218959118});
+ *u32++ = ((vector unsigned int){-202116109,-202116109,-202116109,-202116109});
+ *u32++ = ((vector unsigned int){-185273100,-185273100,-185273100,-185273100});
+ *u32++ = ((vector unsigned int){-168430091,-168430091,-168430091,-168430091});
+ *u32++ = ((vector unsigned int){-151587082,-151587082,-151587082,-151587082});
+ *u32++ = ((vector unsigned int){-134744073,-134744073,-134744073,-134744073});
+ *u32++ = ((vector unsigned int){-117901064,-117901064,-117901064,-117901064});
+ *u32++ = ((vector unsigned int){-101058055,-101058055,-101058055,-101058055});
+ *u32++ = ((vector unsigned int){-84215046,-84215046,-84215046,-84215046});
+ *u32++ = ((vector unsigned int){-67372037,-67372037,-67372037,-67372037});
+ *u32++ = ((vector unsigned int){-50529028,-50529028,-50529028,-50529028});
+ *u32++ = ((vector unsigned int){-33686019,-33686019,-33686019,-33686019});
+ *u32++ = ((vector unsigned int){-16843010,-16843010,-16843010,-16843010});
+}
+void fu32h(vector unsigned int *u32)
+{
+ *u32++ = ((vector unsigned int){-983056,-983056,-983056,-983056});
+ *u32++ = ((vector unsigned int){-917519,-917519,-917519,-917519});
+ *u32++ = ((vector unsigned int){-851982,-851982,-851982,-851982});
+ *u32++ = ((vector unsigned int){-786445,-786445,-786445,-786445});
+ *u32++ = ((vector unsigned int){-720908,-720908,-720908,-720908});
+ *u32++ = ((vector unsigned int){-655371,-655371,-655371,-655371});
+ *u32++ = ((vector unsigned int){-589834,-589834,-589834,-589834});
+ *u32++ = ((vector unsigned int){-524297,-524297,-524297,-524297});
+ *u32++ = ((vector unsigned int){-458760,-458760,-458760,-458760});
+ *u32++ = ((vector unsigned int){-393223,-393223,-393223,-393223});
+ *u32++ = ((vector unsigned int){-327686,-327686,-327686,-327686});
+ *u32++ = ((vector unsigned int){-262149,-262149,-262149,-262149});
+ *u32++ = ((vector unsigned int){-196612,-196612,-196612,-196612});
+ *u32++ = ((vector unsigned int){-131075,-131075,-131075,-131075});
+ *u32++ = ((vector unsigned int){-65538,-65538,-65538,-65538});
+}
+void fu32i(vector unsigned int *u32)
+{
+ *u32++ = ((vector unsigned int){-16,-16,-16,-16});
+ *u32++ = ((vector unsigned int){-15,-15,-15,-15});
+ *u32++ = ((vector unsigned int){-14,-14,-14,-14});
+ *u32++ = ((vector unsigned int){-13,-13,-13,-13});
+ *u32++ = ((vector unsigned int){-12,-12,-12,-12});
+ *u32++ = ((vector unsigned int){-11,-11,-11,-11});
+ *u32++ = ((vector unsigned int){-10,-10,-10,-10});
+ *u32++ = ((vector unsigned int){-9,-9,-9,-9});
+ *u32++ = ((vector unsigned int){-8,-8,-8,-8});
+ *u32++ = ((vector unsigned int){-7,-7,-7,-7});
+ *u32++ = ((vector unsigned int){-6,-6,-6,-6});
+ *u32++ = ((vector unsigned int){-5,-5,-5,-5});
+ *u32++ = ((vector unsigned int){-4,-4,-4,-4});
+ *u32++ = ((vector unsigned int){-3,-3,-3,-3});
+ *u32++ = ((vector unsigned int){-2,-2,-2,-2});
+ *u32++ = ((vector unsigned int){-1,-1,-1,-1});
+}
+void fu32j(vector unsigned int *u32)
+{
+ *u32++ = ((vector unsigned int){0xfffffff0,0xfffffff0,0xfffffff0,0xfffffff0});
+ *u32++ = ((vector unsigned int){0xfffffff1,0xfffffff1,0xfffffff1,0xfffffff1});
+ *u32++ = ((vector unsigned int){0xfffffff2,0xfffffff2,0xfffffff2,0xfffffff2});
+ *u32++ = ((vector unsigned int){0xfffffff3,0xfffffff3,0xfffffff3,0xfffffff3});
+ *u32++ = ((vector unsigned int){0xfffffff4,0xfffffff4,0xfffffff4,0xfffffff4});
+ *u32++ = ((vector unsigned int){0xfffffff5,0xfffffff5,0xfffffff5,0xfffffff5});
+ *u32++ = ((vector unsigned int){0xfffffff6,0xfffffff6,0xfffffff6,0xfffffff6});
+ *u32++ = ((vector unsigned int){0xfffffff7,0xfffffff7,0xfffffff7,0xfffffff7});
+ *u32++ = ((vector unsigned int){0xfffffff8,0xfffffff8,0xfffffff8,0xfffffff8});
+ *u32++ = ((vector unsigned int){0xfffffff9,0xfffffff9,0xfffffff9,0xfffffff9});
+ *u32++ = ((vector unsigned int){0xfffffffa,0xfffffffa,0xfffffffa,0xfffffffa});
+ *u32++ = ((vector unsigned int){0xfffffffb,0xfffffffb,0xfffffffb,0xfffffffb});
+ *u32++ = ((vector unsigned int){0xfffffffc,0xfffffffc,0xfffffffc,0xfffffffc});
+ *u32++ = ((vector unsigned int){0xfffffffd,0xfffffffd,0xfffffffd,0xfffffffd});
+ *u32++ = ((vector unsigned int){0xfffffffe,0xfffffffe,0xfffffffe,0xfffffffe});
+ *u32++ = ((vector unsigned int){0xffffffff,0xffffffff,0xffffffff,0xffffffff});
+}
+void fu32k(vector unsigned int *u32)
+{
+ *u32++ = ((vector unsigned int){0x00000000,0x00000000,0x00000000,0x00000000});
+ *u32++ = ((vector unsigned int){0x00000001,0x00000001,0x00000001,0x00000001});
+ *u32++ = ((vector unsigned int){0x00000002,0x00000002,0x00000002,0x00000002});
+ *u32++ = ((vector unsigned int){0x00000003,0x00000003,0x00000003,0x00000003});
+ *u32++ = ((vector unsigned int){0x00000004,0x00000004,0x00000004,0x00000004});
+ *u32++ = ((vector unsigned int){0x00000005,0x00000005,0x00000005,0x00000005});
+ *u32++ = ((vector unsigned int){0x00000006,0x00000006,0x00000006,0x00000006});
+ *u32++ = ((vector unsigned int){0x00000007,0x00000007,0x00000007,0x00000007});
+ *u32++ = ((vector unsigned int){0x00000008,0x00000008,0x00000008,0x00000008});
+ *u32++ = ((vector unsigned int){0x00000009,0x00000009,0x00000009,0x00000009});
+ *u32++ = ((vector unsigned int){0x0000000a,0x0000000a,0x0000000a,0x0000000a});
+ *u32++ = ((vector unsigned int){0x0000000b,0x0000000b,0x0000000b,0x0000000b});
+ *u32++ = ((vector unsigned int){0x0000000c,0x0000000c,0x0000000c,0x0000000c});
+ *u32++ = ((vector unsigned int){0x0000000d,0x0000000d,0x0000000d,0x0000000d});
+ *u32++ = ((vector unsigned int){0x0000000e,0x0000000e,0x0000000e,0x0000000e});
+ *u32++ = ((vector unsigned int){0x0000000f,0x0000000f,0x0000000f,0x0000000f});
+}
+void fu32l(vector unsigned int *u32)
+{
+ *u32++ = ((vector unsigned int){0xfff0fff0,0xfff0fff0,0xfff0fff0,0xfff0fff0});
+ *u32++ = ((vector unsigned int){0xfff1fff1,0xfff1fff1,0xfff1fff1,0xfff1fff1});
+ *u32++ = ((vector unsigned int){0xfff2fff2,0xfff2fff2,0xfff2fff2,0xfff2fff2});
+ *u32++ = ((vector unsigned int){0xfff3fff3,0xfff3fff3,0xfff3fff3,0xfff3fff3});
+ *u32++ = ((vector unsigned int){0xfff4fff4,0xfff4fff4,0xfff4fff4,0xfff4fff4});
+ *u32++ = ((vector unsigned int){0xfff5fff5,0xfff5fff5,0xfff5fff5,0xfff5fff5});
+ *u32++ = ((vector unsigned int){0xfff6fff6,0xfff6fff6,0xfff6fff6,0xfff6fff6});
+ *u32++ = ((vector unsigned int){0xfff7fff7,0xfff7fff7,0xfff7fff7,0xfff7fff7});
+ *u32++ = ((vector unsigned int){0xfff8fff8,0xfff8fff8,0xfff8fff8,0xfff8fff8});
+ *u32++ = ((vector unsigned int){0xfff9fff9,0xfff9fff9,0xfff9fff9,0xfff9fff9});
+ *u32++ = ((vector unsigned int){0xfffafffa,0xfffafffa,0xfffafffa,0xfffafffa});
+ *u32++ = ((vector unsigned int){0xfffbfffb,0xfffbfffb,0xfffbfffb,0xfffbfffb});
+ *u32++ = ((vector unsigned int){0xfffcfffc,0xfffcfffc,0xfffcfffc,0xfffcfffc});
+ *u32++ = ((vector unsigned int){0xfffdfffd,0xfffdfffd,0xfffdfffd,0xfffdfffd});
+ *u32++ = ((vector unsigned int){0xfffefffe,0xfffefffe,0xfffefffe,0xfffefffe});
+ *u32++ = ((vector unsigned int){0xffffffff,0xffffffff,0xffffffff,0xffffffff});
+}
+void fu32m(vector unsigned int *u32)
+{
+ *u32++ = ((vector unsigned int){0x00000000,0x00000000,0x00000000,0x00000000});
+ *u32++ = ((vector unsigned int){0x00010001,0x00010001,0x00010001,0x00010001});
+ *u32++ = ((vector unsigned int){0x00020002,0x00020002,0x00020002,0x00020002});
+ *u32++ = ((vector unsigned int){0x00030003,0x00030003,0x00030003,0x00030003});
+ *u32++ = ((vector unsigned int){0x00040004,0x00040004,0x00040004,0x00040004});
+ *u32++ = ((vector unsigned int){0x00050005,0x00050005,0x00050005,0x00050005});
+ *u32++ = ((vector unsigned int){0x00060006,0x00060006,0x00060006,0x00060006});
+ *u32++ = ((vector unsigned int){0x00070007,0x00070007,0x00070007,0x00070007});
+ *u32++ = ((vector unsigned int){0x00080008,0x00080008,0x00080008,0x00080008});
+ *u32++ = ((vector unsigned int){0x00090009,0x00090009,0x00090009,0x00090009});
+ *u32++ = ((vector unsigned int){0x000a000a,0x000a000a,0x000a000a,0x000a000a});
+ *u32++ = ((vector unsigned int){0x000b000b,0x000b000b,0x000b000b,0x000b000b});
+ *u32++ = ((vector unsigned int){0x000c000c,0x000c000c,0x000c000c,0x000c000c});
+ *u32++ = ((vector unsigned int){0x000d000d,0x000d000d,0x000d000d,0x000d000d});
+ *u32++ = ((vector unsigned int){0x000e000e,0x000e000e,0x000e000e,0x000e000e});
+ *u32++ = ((vector unsigned int){0x000f000f,0x000f000f,0x000f000f,0x000f000f});
+}
+void fu32n(vector unsigned int *u32)
+{
+ *u32++ = ((vector unsigned int){0xf0f0f0f0,0xf0f0f0f0,0xf0f0f0f0,0xf0f0f0f0});
+ *u32++ = ((vector unsigned int){0xf1f1f1f1,0xf1f1f1f1,0xf1f1f1f1,0xf1f1f1f1});
+ *u32++ = ((vector unsigned int){0xf2f2f2f2,0xf2f2f2f2,0xf2f2f2f2,0xf2f2f2f2});
+ *u32++ = ((vector unsigned int){0xf3f3f3f3,0xf3f3f3f3,0xf3f3f3f3,0xf3f3f3f3});
+ *u32++ = ((vector unsigned int){0xf4f4f4f4,0xf4f4f4f4,0xf4f4f4f4,0xf4f4f4f4});
+ *u32++ = ((vector unsigned int){0xf5f5f5f5,0xf5f5f5f5,0xf5f5f5f5,0xf5f5f5f5});
+ *u32++ = ((vector unsigned int){0xf6f6f6f6,0xf6f6f6f6,0xf6f6f6f6,0xf6f6f6f6});
+ *u32++ = ((vector unsigned int){0xf7f7f7f7,0xf7f7f7f7,0xf7f7f7f7,0xf7f7f7f7});
+ *u32++ = ((vector unsigned int){0xf8f8f8f8,0xf8f8f8f8,0xf8f8f8f8,0xf8f8f8f8});
+ *u32++ = ((vector unsigned int){0xf9f9f9f9,0xf9f9f9f9,0xf9f9f9f9,0xf9f9f9f9});
+ *u32++ = ((vector unsigned int){0xfafafafa,0xfafafafa,0xfafafafa,0xfafafafa});
+ *u32++ = ((vector unsigned int){0xfbfbfbfb,0xfbfbfbfb,0xfbfbfbfb,0xfbfbfbfb});
+ *u32++ = ((vector unsigned int){0xfcfcfcfc,0xfcfcfcfc,0xfcfcfcfc,0xfcfcfcfc});
+ *u32++ = ((vector unsigned int){0xfdfdfdfd,0xfdfdfdfd,0xfdfdfdfd,0xfdfdfdfd});
+ *u32++ = ((vector unsigned int){0xfefefefe,0xfefefefe,0xfefefefe,0xfefefefe});
+ *u32++ = ((vector unsigned int){0xffffffff,0xffffffff,0xffffffff,0xffffffff});
+}
+void fu32o(vector unsigned int *u32)
+{
+ *u32++ = ((vector unsigned int){0x00000000,0x00000000,0x00000000,0x00000000});
+ *u32++ = ((vector unsigned int){0x01010101,0x01010101,0x01010101,0x01010101});
+ *u32++ = ((vector unsigned int){0x02020202,0x02020202,0x02020202,0x02020202});
+ *u32++ = ((vector unsigned int){0x03030303,0x03030303,0x03030303,0x03030303});
+ *u32++ = ((vector unsigned int){0x04040404,0x04040404,0x04040404,0x04040404});
+ *u32++ = ((vector unsigned int){0x05050505,0x05050505,0x05050505,0x05050505});
+ *u32++ = ((vector unsigned int){0x06060606,0x06060606,0x06060606,0x06060606});
+ *u32++ = ((vector unsigned int){0x07070707,0x07070707,0x07070707,0x07070707});
+ *u32++ = ((vector unsigned int){0x08080808,0x08080808,0x08080808,0x08080808});
+ *u32++ = ((vector unsigned int){0x09090909,0x09090909,0x09090909,0x09090909});
+ *u32++ = ((vector unsigned int){0x0a0a0a0a,0x0a0a0a0a,0x0a0a0a0a,0x0a0a0a0a});
+ *u32++ = ((vector unsigned int){0x0b0b0b0b,0x0b0b0b0b,0x0b0b0b0b,0x0b0b0b0b});
+ *u32++ = ((vector unsigned int){0x0c0c0c0c,0x0c0c0c0c,0x0c0c0c0c,0x0c0c0c0c});
+ *u32++ = ((vector unsigned int){0x0d0d0d0d,0x0d0d0d0d,0x0d0d0d0d,0x0d0d0d0d});
+ *u32++ = ((vector unsigned int){0x0e0e0e0e,0x0e0e0e0e,0x0e0e0e0e,0x0e0e0e0e});
+ *u32++ = ((vector unsigned int){0x0f0f0f0f,0x0f0f0f0f,0x0f0f0f0f,0x0f0f0f0f});
+}
+void fu16a(vector unsigned short *u16)
+{
+ *u16++ = ((vector unsigned short){0xffff,0xfff0,0xffff,0xfff0,0xffff,0xfff0,0xffff,0xfff0});
+ *u16++ = ((vector unsigned short){0xffff,0xfff1,0xffff,0xfff1,0xffff,0xfff1,0xffff,0xfff1});
+ *u16++ = ((vector unsigned short){0xffff,0xfff2,0xffff,0xfff2,0xffff,0xfff2,0xffff,0xfff2});
+ *u16++ = ((vector unsigned short){0xffff,0xfff3,0xffff,0xfff3,0xffff,0xfff3,0xffff,0xfff3});
+ *u16++ = ((vector unsigned short){0xffff,0xfff4,0xffff,0xfff4,0xffff,0xfff4,0xffff,0xfff4});
+ *u16++ = ((vector unsigned short){0xffff,0xfff5,0xffff,0xfff5,0xffff,0xfff5,0xffff,0xfff5});
+ *u16++ = ((vector unsigned short){0xffff,0xfff6,0xffff,0xfff6,0xffff,0xfff6,0xffff,0xfff6});
+ *u16++ = ((vector unsigned short){0xffff,0xfff7,0xffff,0xfff7,0xffff,0xfff7,0xffff,0xfff7});
+ *u16++ = ((vector unsigned short){0xffff,0xfff8,0xffff,0xfff8,0xffff,0xfff8,0xffff,0xfff8});
+ *u16++ = ((vector unsigned short){0xffff,0xfff9,0xffff,0xfff9,0xffff,0xfff9,0xffff,0xfff9});
+ *u16++ = ((vector unsigned short){0xffff,0xfffa,0xffff,0xfffa,0xffff,0xfffa,0xffff,0xfffa});
+ *u16++ = ((vector unsigned short){0xffff,0xfffb,0xffff,0xfffb,0xffff,0xfffb,0xffff,0xfffb});
+ *u16++ = ((vector unsigned short){0xffff,0xfffc,0xffff,0xfffc,0xffff,0xfffc,0xffff,0xfffc});
+ *u16++ = ((vector unsigned short){0xffff,0xfffd,0xffff,0xfffd,0xffff,0xfffd,0xffff,0xfffd});
+ *u16++ = ((vector unsigned short){0xffff,0xfffe,0xffff,0xfffe,0xffff,0xfffe,0xffff,0xfffe});
+ *u16++ = ((vector unsigned short){0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff});
+}
+void fu16b(vector unsigned short *u16)
+{
+ *u16++ = ((vector unsigned short){0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000});
+ *u16++ = ((vector unsigned short){0x0000,0x0001,0x0000,0x0001,0x0000,0x0001,0x0000,0x0001});
+ *u16++ = ((vector unsigned short){0x0000,0x0002,0x0000,0x0002,0x0000,0x0002,0x0000,0x0002});
+ *u16++ = ((vector unsigned short){0x0000,0x0003,0x0000,0x0003,0x0000,0x0003,0x0000,0x0003});
+ *u16++ = ((vector unsigned short){0x0000,0x0004,0x0000,0x0004,0x0000,0x0004,0x0000,0x0004});
+ *u16++ = ((vector unsigned short){0x0000,0x0005,0x0000,0x0005,0x0000,0x0005,0x0000,0x0005});
+ *u16++ = ((vector unsigned short){0x0000,0x0006,0x0000,0x0006,0x0000,0x0006,0x0000,0x0006});
+ *u16++ = ((vector unsigned short){0x0000,0x0007,0x0000,0x0007,0x0000,0x0007,0x0000,0x0007});
+ *u16++ = ((vector unsigned short){0x0000,0x0008,0x0000,0x0008,0x0000,0x0008,0x0000,0x0008});
+ *u16++ = ((vector unsigned short){0x0000,0x0009,0x0000,0x0009,0x0000,0x0009,0x0000,0x0009});
+ *u16++ = ((vector unsigned short){0x0000,0x000a,0x0000,0x000a,0x0000,0x000a,0x0000,0x000a});
+ *u16++ = ((vector unsigned short){0x0000,0x000b,0x0000,0x000b,0x0000,0x000b,0x0000,0x000b});
+ *u16++ = ((vector unsigned short){0x0000,0x000c,0x0000,0x000c,0x0000,0x000c,0x0000,0x000c});
+ *u16++ = ((vector unsigned short){0x0000,0x000d,0x0000,0x000d,0x0000,0x000d,0x0000,0x000d});
+ *u16++ = ((vector unsigned short){0x0000,0x000e,0x0000,0x000e,0x0000,0x000e,0x0000,0x000e});
+ *u16++ = ((vector unsigned short){0x0000,0x000f,0x0000,0x000f,0x0000,0x000f,0x0000,0x000f});
+}
+void fu16c(vector unsigned short *u16)
+{
+ *u16++ = ((vector unsigned short){0xfff0,0xfff0,0xfff0,0xfff0,0xfff0,0xfff0,0xfff0,0xfff0});
+ *u16++ = ((vector unsigned short){0xfff1,0xfff1,0xfff1,0xfff1,0xfff1,0xfff1,0xfff1,0xfff1});
+ *u16++ = ((vector unsigned short){0xfff2,0xfff2,0xfff2,0xfff2,0xfff2,0xfff2,0xfff2,0xfff2});
+ *u16++ = ((vector unsigned short){0xfff3,0xfff3,0xfff3,0xfff3,0xfff3,0xfff3,0xfff3,0xfff3});
+ *u16++ = ((vector unsigned short){0xfff4,0xfff4,0xfff4,0xfff4,0xfff4,0xfff4,0xfff4,0xfff4});
+ *u16++ = ((vector unsigned short){0xfff5,0xfff5,0xfff5,0xfff5,0xfff5,0xfff5,0xfff5,0xfff5});
+ *u16++ = ((vector unsigned short){0xfff6,0xfff6,0xfff6,0xfff6,0xfff6,0xfff6,0xfff6,0xfff6});
+ *u16++ = ((vector unsigned short){0xfff7,0xfff7,0xfff7,0xfff7,0xfff7,0xfff7,0xfff7,0xfff7});
+ *u16++ = ((vector unsigned short){0xfff8,0xfff8,0xfff8,0xfff8,0xfff8,0xfff8,0xfff8,0xfff8});
+ *u16++ = ((vector unsigned short){0xfff9,0xfff9,0xfff9,0xfff9,0xfff9,0xfff9,0xfff9,0xfff9});
+ *u16++ = ((vector unsigned short){0xfffa,0xfffa,0xfffa,0xfffa,0xfffa,0xfffa,0xfffa,0xfffa});
+ *u16++ = ((vector unsigned short){0xfffb,0xfffb,0xfffb,0xfffb,0xfffb,0xfffb,0xfffb,0xfffb});
+ *u16++ = ((vector unsigned short){0xfffc,0xfffc,0xfffc,0xfffc,0xfffc,0xfffc,0xfffc,0xfffc});
+ *u16++ = ((vector unsigned short){0xfffd,0xfffd,0xfffd,0xfffd,0xfffd,0xfffd,0xfffd,0xfffd});
+ *u16++ = ((vector unsigned short){0xfffe,0xfffe,0xfffe,0xfffe,0xfffe,0xfffe,0xfffe,0xfffe});
+ *u16++ = ((vector unsigned short){0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff});
+}
+void fu16d(vector unsigned short *u16)
+{
+ *u16++ = ((vector unsigned short){0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000});
+ *u16++ = ((vector unsigned short){0x0001,0x0001,0x0001,0x0001,0x0001,0x0001,0x0001,0x0001});
+ *u16++ = ((vector unsigned short){0x0002,0x0002,0x0002,0x0002,0x0002,0x0002,0x0002,0x0002});
+ *u16++ = ((vector unsigned short){0x0003,0x0003,0x0003,0x0003,0x0003,0x0003,0x0003,0x0003});
+ *u16++ = ((vector unsigned short){0x0004,0x0004,0x0004,0x0004,0x0004,0x0004,0x0004,0x0004});
+ *u16++ = ((vector unsigned short){0x0005,0x0005,0x0005,0x0005,0x0005,0x0005,0x0005,0x0005});
+ *u16++ = ((vector unsigned short){0x0006,0x0006,0x0006,0x0006,0x0006,0x0006,0x0006,0x0006});
+ *u16++ = ((vector unsigned short){0x0007,0x0007,0x0007,0x0007,0x0007,0x0007,0x0007,0x0007});
+ *u16++ = ((vector unsigned short){0x0008,0x0008,0x0008,0x0008,0x0008,0x0008,0x0008,0x0008});
+ *u16++ = ((vector unsigned short){0x0009,0x0009,0x0009,0x0009,0x0009,0x0009,0x0009,0x0009});
+ *u16++ = ((vector unsigned short){0x000a,0x000a,0x000a,0x000a,0x000a,0x000a,0x000a,0x000a});
+ *u16++ = ((vector unsigned short){0x000b,0x000b,0x000b,0x000b,0x000b,0x000b,0x000b,0x000b});
+ *u16++ = ((vector unsigned short){0x000c,0x000c,0x000c,0x000c,0x000c,0x000c,0x000c,0x000c});
+ *u16++ = ((vector unsigned short){0x000d,0x000d,0x000d,0x000d,0x000d,0x000d,0x000d,0x000d});
+ *u16++ = ((vector unsigned short){0x000e,0x000e,0x000e,0x000e,0x000e,0x000e,0x000e,0x000e});
+ *u16++ = ((vector unsigned short){0x000f,0x000f,0x000f,0x000f,0x000f,0x000f,0x000f,0x000f});
+}
+void fu16e(vector unsigned short *u16)
+{
+ *u16++ = ((vector unsigned short){0xf0f0,0xf0f0,0xf0f0,0xf0f0,0xf0f0,0xf0f0,0xf0f0,0xf0f0});
+ *u16++ = ((vector unsigned short){0xf1f1,0xf1f1,0xf1f1,0xf1f1,0xf1f1,0xf1f1,0xf1f1,0xf1f1});
+ *u16++ = ((vector unsigned short){0xf2f2,0xf2f2,0xf2f2,0xf2f2,0xf2f2,0xf2f2,0xf2f2,0xf2f2});
+ *u16++ = ((vector unsigned short){0xf3f3,0xf3f3,0xf3f3,0xf3f3,0xf3f3,0xf3f3,0xf3f3,0xf3f3});
+ *u16++ = ((vector unsigned short){0xf4f4,0xf4f4,0xf4f4,0xf4f4,0xf4f4,0xf4f4,0xf4f4,0xf4f4});
+ *u16++ = ((vector unsigned short){0xf5f5,0xf5f5,0xf5f5,0xf5f5,0xf5f5,0xf5f5,0xf5f5,0xf5f5});
+ *u16++ = ((vector unsigned short){0xf6f6,0xf6f6,0xf6f6,0xf6f6,0xf6f6,0xf6f6,0xf6f6,0xf6f6});
+ *u16++ = ((vector unsigned short){0xf7f7,0xf7f7,0xf7f7,0xf7f7,0xf7f7,0xf7f7,0xf7f7,0xf7f7});
+ *u16++ = ((vector unsigned short){0xf8f8,0xf8f8,0xf8f8,0xf8f8,0xf8f8,0xf8f8,0xf8f8,0xf8f8});
+ *u16++ = ((vector unsigned short){0xf9f9,0xf9f9,0xf9f9,0xf9f9,0xf9f9,0xf9f9,0xf9f9,0xf9f9});
+ *u16++ = ((vector unsigned short){0xfafa,0xfafa,0xfafa,0xfafa,0xfafa,0xfafa,0xfafa,0xfafa});
+ *u16++ = ((vector unsigned short){0xfbfb,0xfbfb,0xfbfb,0xfbfb,0xfbfb,0xfbfb,0xfbfb,0xfbfb});
+ *u16++ = ((vector unsigned short){0xfcfc,0xfcfc,0xfcfc,0xfcfc,0xfcfc,0xfcfc,0xfcfc,0xfcfc});
+ *u16++ = ((vector unsigned short){0xfdfd,0xfdfd,0xfdfd,0xfdfd,0xfdfd,0xfdfd,0xfdfd,0xfdfd});
+ *u16++ = ((vector unsigned short){0xfefe,0xfefe,0xfefe,0xfefe,0xfefe,0xfefe,0xfefe,0xfefe});
+ *u16++ = ((vector unsigned short){0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff});
+}
+void fu16f(vector unsigned short *u16)
+{
+ *u16++ = ((vector unsigned short){0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000});
+ *u16++ = ((vector unsigned short){0x0101,0x0101,0x0101,0x0101,0x0101,0x0101,0x0101,0x0101});
+ *u16++ = ((vector unsigned short){0x0202,0x0202,0x0202,0x0202,0x0202,0x0202,0x0202,0x0202});
+ *u16++ = ((vector unsigned short){0x0303,0x0303,0x0303,0x0303,0x0303,0x0303,0x0303,0x0303});
+ *u16++ = ((vector unsigned short){0x0404,0x0404,0x0404,0x0404,0x0404,0x0404,0x0404,0x0404});
+ *u16++ = ((vector unsigned short){0x0505,0x0505,0x0505,0x0505,0x0505,0x0505,0x0505,0x0505});
+ *u16++ = ((vector unsigned short){0x0606,0x0606,0x0606,0x0606,0x0606,0x0606,0x0606,0x0606});
+ *u16++ = ((vector unsigned short){0x0707,0x0707,0x0707,0x0707,0x0707,0x0707,0x0707,0x0707});
+ *u16++ = ((vector unsigned short){0x0808,0x0808,0x0808,0x0808,0x0808,0x0808,0x0808,0x0808});
+ *u16++ = ((vector unsigned short){0x0909,0x0909,0x0909,0x0909,0x0909,0x0909,0x0909,0x0909});
+ *u16++ = ((vector unsigned short){0x0a0a,0x0a0a,0x0a0a,0x0a0a,0x0a0a,0x0a0a,0x0a0a,0x0a0a});
+ *u16++ = ((vector unsigned short){0x0b0b,0x0b0b,0x0b0b,0x0b0b,0x0b0b,0x0b0b,0x0b0b,0x0b0b});
+ *u16++ = ((vector unsigned short){0x0c0c,0x0c0c,0x0c0c,0x0c0c,0x0c0c,0x0c0c,0x0c0c,0x0c0c});
+ *u16++ = ((vector unsigned short){0x0d0d,0x0d0d,0x0d0d,0x0d0d,0x0d0d,0x0d0d,0x0d0d,0x0d0d});
+ *u16++ = ((vector unsigned short){0x0e0e,0x0e0e,0x0e0e,0x0e0e,0x0e0e,0x0e0e,0x0e0e,0x0e0e});
+ *u16++ = ((vector unsigned short){0x0f0f,0x0f0f,0x0f0f,0x0f0f,0x0f0f,0x0f0f,0x0f0f,0x0f0f});
+}
+void fu16g(vector unsigned short *u16)
+{
+ *u16++ = ((vector unsigned short){0,0,0,0,0,0,0,0});
+ *u16++ = ((vector unsigned short){1,1,1,1,1,1,1,1});
+ *u16++ = ((vector unsigned short){2,2,2,2,2,2,2,2});
+ *u16++ = ((vector unsigned short){3,3,3,3,3,3,3,3});
+ *u16++ = ((vector unsigned short){4,4,4,4,4,4,4,4});
+ *u16++ = ((vector unsigned short){5,5,5,5,5,5,5,5});
+ *u16++ = ((vector unsigned short){6,6,6,6,6,6,6,6});
+ *u16++ = ((vector unsigned short){7,7,7,7,7,7,7,7});
+ *u16++ = ((vector unsigned short){8,8,8,8,8,8,8,8});
+ *u16++ = ((vector unsigned short){9,9,9,9,9,9,9,9});
+ *u16++ = ((vector unsigned short){10,10,10,10,10,10,10,10});
+ *u16++ = ((vector unsigned short){11,11,11,11,11,11,11,11});
+ *u16++ = ((vector unsigned short){12,12,12,12,12,12,12,12});
+ *u16++ = ((vector unsigned short){13,13,13,13,13,13,13,13});
+ *u16++ = ((vector unsigned short){14,14,14,14,14,14,14,14});
+ *u16++ = ((vector unsigned short){15,15,15,15,15,15,15,15});
+}
+void fu16h(vector unsigned short *u16)
+{
+ *u16++ = ((vector unsigned short){257,257,257,257,257,257,257,257});
+ *u16++ = ((vector unsigned short){514,514,514,514,514,514,514,514});
+ *u16++ = ((vector unsigned short){771,771,771,771,771,771,771,771});
+ *u16++ = ((vector unsigned short){1028,1028,1028,1028,1028,1028,1028,1028});
+ *u16++ = ((vector unsigned short){1285,1285,1285,1285,1285,1285,1285,1285});
+ *u16++ = ((vector unsigned short){1542,1542,1542,1542,1542,1542,1542,1542});
+ *u16++ = ((vector unsigned short){1799,1799,1799,1799,1799,1799,1799,1799});
+ *u16++ = ((vector unsigned short){2056,2056,2056,2056,2056,2056,2056,2056});
+ *u16++ = ((vector unsigned short){2313,2313,2313,2313,2313,2313,2313,2313});
+ *u16++ = ((vector unsigned short){2570,2570,2570,2570,2570,2570,2570,2570});
+ *u16++ = ((vector unsigned short){2827,2827,2827,2827,2827,2827,2827,2827});
+ *u16++ = ((vector unsigned short){3084,3084,3084,3084,3084,3084,3084,3084});
+ *u16++ = ((vector unsigned short){3341,3341,3341,3341,3341,3341,3341,3341});
+ *u16++ = ((vector unsigned short){3598,3598,3598,3598,3598,3598,3598,3598});
+ *u16++ = ((vector unsigned short){3855,3855,3855,3855,3855,3855,3855,3855});
+}
+void fu16i(vector unsigned short *u16)
+{
+ *u16++ = ((vector unsigned short){61680,61680,61680,61680,61680,61680,61680,61680});
+ *u16++ = ((vector unsigned short){61937,61937,61937,61937,61937,61937,61937,61937});
+ *u16++ = ((vector unsigned short){62194,62194,62194,62194,62194,62194,62194,62194});
+ *u16++ = ((vector unsigned short){62451,62451,62451,62451,62451,62451,62451,62451});
+ *u16++ = ((vector unsigned short){62708,62708,62708,62708,62708,62708,62708,62708});
+ *u16++ = ((vector unsigned short){62965,62965,62965,62965,62965,62965,62965,62965});
+ *u16++ = ((vector unsigned short){63222,63222,63222,63222,63222,63222,63222,63222});
+ *u16++ = ((vector unsigned short){63479,63479,63479,63479,63479,63479,63479,63479});
+ *u16++ = ((vector unsigned short){63736,63736,63736,63736,63736,63736,63736,63736});
+ *u16++ = ((vector unsigned short){63993,63993,63993,63993,63993,63993,63993,63993});
+ *u16++ = ((vector unsigned short){64250,64250,64250,64250,64250,64250,64250,64250});
+ *u16++ = ((vector unsigned short){64507,64507,64507,64507,64507,64507,64507,64507});
+ *u16++ = ((vector unsigned short){64764,64764,64764,64764,64764,64764,64764,64764});
+ *u16++ = ((vector unsigned short){65021,65021,65021,65021,65021,65021,65021,65021});
+ *u16++ = ((vector unsigned short){65278,65278,65278,65278,65278,65278,65278,65278});
+}
+void fu16j(vector unsigned short *u16)
+{
+ *u16++ = ((vector unsigned short){65520,65520,65520,65520,65520,65520,65520,65520});
+ *u16++ = ((vector unsigned short){65521,65521,65521,65521,65521,65521,65521,65521});
+ *u16++ = ((vector unsigned short){65522,65522,65522,65522,65522,65522,65522,65522});
+ *u16++ = ((vector unsigned short){65523,65523,65523,65523,65523,65523,65523,65523});
+ *u16++ = ((vector unsigned short){65524,65524,65524,65524,65524,65524,65524,65524});
+ *u16++ = ((vector unsigned short){65525,65525,65525,65525,65525,65525,65525,65525});
+ *u16++ = ((vector unsigned short){65526,65526,65526,65526,65526,65526,65526,65526});
+ *u16++ = ((vector unsigned short){65527,65527,65527,65527,65527,65527,65527,65527});
+ *u16++ = ((vector unsigned short){65528,65528,65528,65528,65528,65528,65528,65528});
+ *u16++ = ((vector unsigned short){65529,65529,65529,65529,65529,65529,65529,65529});
+ *u16++ = ((vector unsigned short){65530,65530,65530,65530,65530,65530,65530,65530});
+ *u16++ = ((vector unsigned short){65531,65531,65531,65531,65531,65531,65531,65531});
+ *u16++ = ((vector unsigned short){65532,65532,65532,65532,65532,65532,65532,65532});
+ *u16++ = ((vector unsigned short){65533,65533,65533,65533,65533,65533,65533,65533});
+ *u16++ = ((vector unsigned short){65534,65534,65534,65534,65534,65534,65534,65534});
+ *u16++ = ((vector unsigned short){65535,65535,65535,65535,65535,65535,65535,65535});
+}
+void fu16k(vector unsigned short *u16)
+{
+ *u16++ = ((vector unsigned short){-3856,-3856,-3856,-3856,-3856,-3856,-3856,-3856});
+ *u16++ = ((vector unsigned short){-3599,-3599,-3599,-3599,-3599,-3599,-3599,-3599});
+ *u16++ = ((vector unsigned short){-3342,-3342,-3342,-3342,-3342,-3342,-3342,-3342});
+ *u16++ = ((vector unsigned short){-3085,-3085,-3085,-3085,-3085,-3085,-3085,-3085});
+ *u16++ = ((vector unsigned short){-2828,-2828,-2828,-2828,-2828,-2828,-2828,-2828});
+ *u16++ = ((vector unsigned short){-2571,-2571,-2571,-2571,-2571,-2571,-2571,-2571});
+ *u16++ = ((vector unsigned short){-2314,-2314,-2314,-2314,-2314,-2314,-2314,-2314});
+ *u16++ = ((vector unsigned short){-2057,-2057,-2057,-2057,-2057,-2057,-2057,-2057});
+ *u16++ = ((vector unsigned short){-1800,-1800,-1800,-1800,-1800,-1800,-1800,-1800});
+ *u16++ = ((vector unsigned short){-1543,-1543,-1543,-1543,-1543,-1543,-1543,-1543});
+ *u16++ = ((vector unsigned short){-1286,-1286,-1286,-1286,-1286,-1286,-1286,-1286});
+ *u16++ = ((vector unsigned short){-1029,-1029,-1029,-1029,-1029,-1029,-1029,-1029});
+ *u16++ = ((vector unsigned short){-772,-772,-772,-772,-772,-772,-772,-772});
+ *u16++ = ((vector unsigned short){-515,-515,-515,-515,-515,-515,-515,-515});
+ *u16++ = ((vector unsigned short){-258,-258,-258,-258,-258,-258,-258,-258});
+}
+void fu16l(vector unsigned short *u16)
+{
+ *u16++ = ((vector unsigned short){-16,-16,-16,-16,-16,-16,-16,-16});
+ *u16++ = ((vector unsigned short){-15,-15,-15,-15,-15,-15,-15,-15});
+ *u16++ = ((vector unsigned short){-14,-14,-14,-14,-14,-14,-14,-14});
+ *u16++ = ((vector unsigned short){-13,-13,-13,-13,-13,-13,-13,-13});
+ *u16++ = ((vector unsigned short){-12,-12,-12,-12,-12,-12,-12,-12});
+ *u16++ = ((vector unsigned short){-11,-11,-11,-11,-11,-11,-11,-11});
+ *u16++ = ((vector unsigned short){-10,-10,-10,-10,-10,-10,-10,-10});
+ *u16++ = ((vector unsigned short){-9,-9,-9,-9,-9,-9,-9,-9});
+ *u16++ = ((vector unsigned short){-8,-8,-8,-8,-8,-8,-8,-8});
+ *u16++ = ((vector unsigned short){-7,-7,-7,-7,-7,-7,-7,-7});
+ *u16++ = ((vector unsigned short){-6,-6,-6,-6,-6,-6,-6,-6});
+ *u16++ = ((vector unsigned short){-5,-5,-5,-5,-5,-5,-5,-5});
+ *u16++ = ((vector unsigned short){-4,-4,-4,-4,-4,-4,-4,-4});
+ *u16++ = ((vector unsigned short){-3,-3,-3,-3,-3,-3,-3,-3});
+ *u16++ = ((vector unsigned short){-2,-2,-2,-2,-2,-2,-2,-2});
+ *u16++ = ((vector unsigned short){-1,-1,-1,-1,-1,-1,-1,-1});
+}
+void fu8a(vector unsigned char *u8)
+{
+ *u8++ = ((vector unsigned char){0xff,0xff,0xff,0xf0,0xff,0xff,0xff,0xf0,0xff,0xff,0xff,0xf0,0xff,0xff,0xff,0xf0});
+ *u8++ = ((vector unsigned char){0xff,0xff,0xff,0xf1,0xff,0xff,0xff,0xf1,0xff,0xff,0xff,0xf1,0xff,0xff,0xff,0xf1});
+ *u8++ = ((vector unsigned char){0xff,0xff,0xff,0xf2,0xff,0xff,0xff,0xf2,0xff,0xff,0xff,0xf2,0xff,0xff,0xff,0xf2});
+ *u8++ = ((vector unsigned char){0xff,0xff,0xff,0xf3,0xff,0xff,0xff,0xf3,0xff,0xff,0xff,0xf3,0xff,0xff,0xff,0xf3});
+ *u8++ = ((vector unsigned char){0xff,0xff,0xff,0xf4,0xff,0xff,0xff,0xf4,0xff,0xff,0xff,0xf4,0xff,0xff,0xff,0xf4});
+ *u8++ = ((vector unsigned char){0xff,0xff,0xff,0xf5,0xff,0xff,0xff,0xf5,0xff,0xff,0xff,0xf5,0xff,0xff,0xff,0xf5});
+ *u8++ = ((vector unsigned char){0xff,0xff,0xff,0xf6,0xff,0xff,0xff,0xf6,0xff,0xff,0xff,0xf6,0xff,0xff,0xff,0xf6});
+ *u8++ = ((vector unsigned char){0xff,0xff,0xff,0xf7,0xff,0xff,0xff,0xf7,0xff,0xff,0xff,0xf7,0xff,0xff,0xff,0xf7});
+ *u8++ = ((vector unsigned char){0xff,0xff,0xff,0xf8,0xff,0xff,0xff,0xf8,0xff,0xff,0xff,0xf8,0xff,0xff,0xff,0xf8});
+ *u8++ = ((vector unsigned char){0xff,0xff,0xff,0xf9,0xff,0xff,0xff,0xf9,0xff,0xff,0xff,0xf9,0xff,0xff,0xff,0xf9});
+ *u8++ = ((vector unsigned char){0xff,0xff,0xff,0xfa,0xff,0xff,0xff,0xfa,0xff,0xff,0xff,0xfa,0xff,0xff,0xff,0xfa});
+ *u8++ = ((vector unsigned char){0xff,0xff,0xff,0xfb,0xff,0xff,0xff,0xfb,0xff,0xff,0xff,0xfb,0xff,0xff,0xff,0xfb});
+ *u8++ = ((vector unsigned char){0xff,0xff,0xff,0xfc,0xff,0xff,0xff,0xfc,0xff,0xff,0xff,0xfc,0xff,0xff,0xff,0xfc});
+ *u8++ = ((vector unsigned char){0xff,0xff,0xff,0xfd,0xff,0xff,0xff,0xfd,0xff,0xff,0xff,0xfd,0xff,0xff,0xff,0xfd});
+ *u8++ = ((vector unsigned char){0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xfe});
+ *u8++ = ((vector unsigned char){0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff});
+}
+void fu8b(vector unsigned char *u8)
+{
+ *u8++ = ((vector unsigned char){0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00});
+ *u8++ = ((vector unsigned char){0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x01});
+ *u8++ = ((vector unsigned char){0x00,0x00,0x00,0x02,0x00,0x00,0x00,0x02,0x00,0x00,0x00,0x02,0x00,0x00,0x00,0x02});
+ *u8++ = ((vector unsigned char){0x00,0x00,0x00,0x03,0x00,0x00,0x00,0x03,0x00,0x00,0x00,0x03,0x00,0x00,0x00,0x03});
+ *u8++ = ((vector unsigned char){0x00,0x00,0x00,0x04,0x00,0x00,0x00,0x04,0x00,0x00,0x00,0x04,0x00,0x00,0x00,0x04});
+ *u8++ = ((vector unsigned char){0x00,0x00,0x00,0x05,0x00,0x00,0x00,0x05,0x00,0x00,0x00,0x05,0x00,0x00,0x00,0x05});
+ *u8++ = ((vector unsigned char){0x00,0x00,0x00,0x06,0x00,0x00,0x00,0x06,0x00,0x00,0x00,0x06,0x00,0x00,0x00,0x06});
+ *u8++ = ((vector unsigned char){0x00,0x00,0x00,0x07,0x00,0x00,0x00,0x07,0x00,0x00,0x00,0x07,0x00,0x00,0x00,0x07});
+ *u8++ = ((vector unsigned char){0x00,0x00,0x00,0x08,0x00,0x00,0x00,0x08,0x00,0x00,0x00,0x08,0x00,0x00,0x00,0x08});
+ *u8++ = ((vector unsigned char){0x00,0x00,0x00,0x09,0x00,0x00,0x00,0x09,0x00,0x00,0x00,0x09,0x00,0x00,0x00,0x09});
+ *u8++ = ((vector unsigned char){0x00,0x00,0x00,0x0a,0x00,0x00,0x00,0x0a,0x00,0x00,0x00,0x0a,0x00,0x00,0x00,0x0a});
+ *u8++ = ((vector unsigned char){0x00,0x00,0x00,0x0b,0x00,0x00,0x00,0x0b,0x00,0x00,0x00,0x0b,0x00,0x00,0x00,0x0b});
+ *u8++ = ((vector unsigned char){0x00,0x00,0x00,0x0c,0x00,0x00,0x00,0x0c,0x00,0x00,0x00,0x0c,0x00,0x00,0x00,0x0c});
+ *u8++ = ((vector unsigned char){0x00,0x00,0x00,0x0d,0x00,0x00,0x00,0x0d,0x00,0x00,0x00,0x0d,0x00,0x00,0x00,0x0d});
+ *u8++ = ((vector unsigned char){0x00,0x00,0x00,0x0e,0x00,0x00,0x00,0x0e,0x00,0x00,0x00,0x0e,0x00,0x00,0x00,0x0e});
+ *u8++ = ((vector unsigned char){0x00,0x00,0x00,0x0f,0x00,0x00,0x00,0x0f,0x00,0x00,0x00,0x0f,0x00,0x00,0x00,0x0f});
+}
+void fu8c(vector unsigned char *u8)
+{
+ *u8++ = ((vector unsigned char){0xff,0xf0,0xff,0xf0,0xff,0xf0,0xff,0xf0,0xff,0xf0,0xff,0xf0,0xff,0xf0,0xff,0xf0});
+ *u8++ = ((vector unsigned char){0xff,0xf1,0xff,0xf1,0xff,0xf1,0xff,0xf1,0xff,0xf1,0xff,0xf1,0xff,0xf1,0xff,0xf1});
+ *u8++ = ((vector unsigned char){0xff,0xf2,0xff,0xf2,0xff,0xf2,0xff,0xf2,0xff,0xf2,0xff,0xf2,0xff,0xf2,0xff,0xf2});
+ *u8++ = ((vector unsigned char){0xff,0xf3,0xff,0xf3,0xff,0xf3,0xff,0xf3,0xff,0xf3,0xff,0xf3,0xff,0xf3,0xff,0xf3});
+ *u8++ = ((vector unsigned char){0xff,0xf4,0xff,0xf4,0xff,0xf4,0xff,0xf4,0xff,0xf4,0xff,0xf4,0xff,0xf4,0xff,0xf4});
+ *u8++ = ((vector unsigned char){0xff,0xf5,0xff,0xf5,0xff,0xf5,0xff,0xf5,0xff,0xf5,0xff,0xf5,0xff,0xf5,0xff,0xf5});
+ *u8++ = ((vector unsigned char){0xff,0xf6,0xff,0xf6,0xff,0xf6,0xff,0xf6,0xff,0xf6,0xff,0xf6,0xff,0xf6,0xff,0xf6});
+ *u8++ = ((vector unsigned char){0xff,0xf7,0xff,0xf7,0xff,0xf7,0xff,0xf7,0xff,0xf7,0xff,0xf7,0xff,0xf7,0xff,0xf7});
+ *u8++ = ((vector unsigned char){0xff,0xf8,0xff,0xf8,0xff,0xf8,0xff,0xf8,0xff,0xf8,0xff,0xf8,0xff,0xf8,0xff,0xf8});
+ *u8++ = ((vector unsigned char){0xff,0xf9,0xff,0xf9,0xff,0xf9,0xff,0xf9,0xff,0xf9,0xff,0xf9,0xff,0xf9,0xff,0xf9});
+ *u8++ = ((vector unsigned char){0xff,0xfa,0xff,0xfa,0xff,0xfa,0xff,0xfa,0xff,0xfa,0xff,0xfa,0xff,0xfa,0xff,0xfa});
+ *u8++ = ((vector unsigned char){0xff,0xfb,0xff,0xfb,0xff,0xfb,0xff,0xfb,0xff,0xfb,0xff,0xfb,0xff,0xfb,0xff,0xfb});
+ *u8++ = ((vector unsigned char){0xff,0xfc,0xff,0xfc,0xff,0xfc,0xff,0xfc,0xff,0xfc,0xff,0xfc,0xff,0xfc,0xff,0xfc});
+ *u8++ = ((vector unsigned char){0xff,0xfd,0xff,0xfd,0xff,0xfd,0xff,0xfd,0xff,0xfd,0xff,0xfd,0xff,0xfd,0xff,0xfd});
+ *u8++ = ((vector unsigned char){0xff,0xfe,0xff,0xfe,0xff,0xfe,0xff,0xfe,0xff,0xfe,0xff,0xfe,0xff,0xfe,0xff,0xfe});
+ *u8++ = ((vector unsigned char){0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff});
+}
+void fu8d(vector unsigned char *u8)
+{
+ *u8++ = ((vector unsigned char){0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00});
+ *u8++ = ((vector unsigned char){0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01});
+ *u8++ = ((vector unsigned char){0x00,0x02,0x00,0x02,0x00,0x02,0x00,0x02,0x00,0x02,0x00,0x02,0x00,0x02,0x00,0x02});
+ *u8++ = ((vector unsigned char){0x00,0x03,0x00,0x03,0x00,0x03,0x00,0x03,0x00,0x03,0x00,0x03,0x00,0x03,0x00,0x03});
+ *u8++ = ((vector unsigned char){0x00,0x04,0x00,0x04,0x00,0x04,0x00,0x04,0x00,0x04,0x00,0x04,0x00,0x04,0x00,0x04});
+ *u8++ = ((vector unsigned char){0x00,0x05,0x00,0x05,0x00,0x05,0x00,0x05,0x00,0x05,0x00,0x05,0x00,0x05,0x00,0x05});
+ *u8++ = ((vector unsigned char){0x00,0x06,0x00,0x06,0x00,0x06,0x00,0x06,0x00,0x06,0x00,0x06,0x00,0x06,0x00,0x06});
+ *u8++ = ((vector unsigned char){0x00,0x07,0x00,0x07,0x00,0x07,0x00,0x07,0x00,0x07,0x00,0x07,0x00,0x07,0x00,0x07});
+ *u8++ = ((vector unsigned char){0x00,0x08,0x00,0x08,0x00,0x08,0x00,0x08,0x00,0x08,0x00,0x08,0x00,0x08,0x00,0x08});
+ *u8++ = ((vector unsigned char){0x00,0x09,0x00,0x09,0x00,0x09,0x00,0x09,0x00,0x09,0x00,0x09,0x00,0x09,0x00,0x09});
+ *u8++ = ((vector unsigned char){0x00,0x0a,0x00,0x0a,0x00,0x0a,0x00,0x0a,0x00,0x0a,0x00,0x0a,0x00,0x0a,0x00,0x0a});
+ *u8++ = ((vector unsigned char){0x00,0x0b,0x00,0x0b,0x00,0x0b,0x00,0x0b,0x00,0x0b,0x00,0x0b,0x00,0x0b,0x00,0x0b});
+ *u8++ = ((vector unsigned char){0x00,0x0c,0x00,0x0c,0x00,0x0c,0x00,0x0c,0x00,0x0c,0x00,0x0c,0x00,0x0c,0x00,0x0c});
+ *u8++ = ((vector unsigned char){0x00,0x0d,0x00,0x0d,0x00,0x0d,0x00,0x0d,0x00,0x0d,0x00,0x0d,0x00,0x0d,0x00,0x0d});
+ *u8++ = ((vector unsigned char){0x00,0x0e,0x00,0x0e,0x00,0x0e,0x00,0x0e,0x00,0x0e,0x00,0x0e,0x00,0x0e,0x00,0x0e});
+ *u8++ = ((vector unsigned char){0x00,0x0f,0x00,0x0f,0x00,0x0f,0x00,0x0f,0x00,0x0f,0x00,0x0f,0x00,0x0f,0x00,0x0f});
+}
+void fu8e(vector unsigned char *u8)
+{
+ *u8++ = ((vector unsigned char){0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00});
+ *u8++ = ((vector unsigned char){0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01});
+ *u8++ = ((vector unsigned char){0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02});
+ *u8++ = ((vector unsigned char){0x03,0x03,0x03,0x03,0x03,0x03,0x03,0x03,0x03,0x03,0x03,0x03,0x03,0x03,0x03,0x03});
+ *u8++ = ((vector unsigned char){0x04,0x04,0x04,0x04,0x04,0x04,0x04,0x04,0x04,0x04,0x04,0x04,0x04,0x04,0x04,0x04});
+ *u8++ = ((vector unsigned char){0x05,0x05,0x05,0x05,0x05,0x05,0x05,0x05,0x05,0x05,0x05,0x05,0x05,0x05,0x05,0x05});
+ *u8++ = ((vector unsigned char){0x06,0x06,0x06,0x06,0x06,0x06,0x06,0x06,0x06,0x06,0x06,0x06,0x06,0x06,0x06,0x06});
+ *u8++ = ((vector unsigned char){0x07,0x07,0x07,0x07,0x07,0x07,0x07,0x07,0x07,0x07,0x07,0x07,0x07,0x07,0x07,0x07});
+ *u8++ = ((vector unsigned char){0x08,0x08,0x08,0x08,0x08,0x08,0x08,0x08,0x08,0x08,0x08,0x08,0x08,0x08,0x08,0x08});
+ *u8++ = ((vector unsigned char){0x09,0x09,0x09,0x09,0x09,0x09,0x09,0x09,0x09,0x09,0x09,0x09,0x09,0x09,0x09,0x09});
+ *u8++ = ((vector unsigned char){0x0a,0x0a,0x0a,0x0a,0x0a,0x0a,0x0a,0x0a,0x0a,0x0a,0x0a,0x0a,0x0a,0x0a,0x0a,0x0a});
+ *u8++ = ((vector unsigned char){0x0b,0x0b,0x0b,0x0b,0x0b,0x0b,0x0b,0x0b,0x0b,0x0b,0x0b,0x0b,0x0b,0x0b,0x0b,0x0b});
+ *u8++ = ((vector unsigned char){0x0c,0x0c,0x0c,0x0c,0x0c,0x0c,0x0c,0x0c,0x0c,0x0c,0x0c,0x0c,0x0c,0x0c,0x0c,0x0c});
+ *u8++ = ((vector unsigned char){0x0d,0x0d,0x0d,0x0d,0x0d,0x0d,0x0d,0x0d,0x0d,0x0d,0x0d,0x0d,0x0d,0x0d,0x0d,0x0d});
+ *u8++ = ((vector unsigned char){0x0e,0x0e,0x0e,0x0e,0x0e,0x0e,0x0e,0x0e,0x0e,0x0e,0x0e,0x0e,0x0e,0x0e,0x0e,0x0e});
+ *u8++ = ((vector unsigned char){0x0f,0x0f,0x0f,0x0f,0x0f,0x0f,0x0f,0x0f,0x0f,0x0f,0x0f,0x0f,0x0f,0x0f,0x0f,0x0f});
+}
+void fu8f(vector unsigned char *u8)
+{
+ *u8++ = ((vector unsigned char){0xf0,0xf0,0xf0,0xf0,0xf0,0xf0,0xf0,0xf0,0xf0,0xf0,0xf0,0xf0,0xf0,0xf0,0xf0,0xf0});
+ *u8++ = ((vector unsigned char){0xf1,0xf1,0xf1,0xf1,0xf1,0xf1,0xf1,0xf1,0xf1,0xf1,0xf1,0xf1,0xf1,0xf1,0xf1,0xf1});
+ *u8++ = ((vector unsigned char){0xf2,0xf2,0xf2,0xf2,0xf2,0xf2,0xf2,0xf2,0xf2,0xf2,0xf2,0xf2,0xf2,0xf2,0xf2,0xf2});
+ *u8++ = ((vector unsigned char){0xf3,0xf3,0xf3,0xf3,0xf3,0xf3,0xf3,0xf3,0xf3,0xf3,0xf3,0xf3,0xf3,0xf3,0xf3,0xf3});
+ *u8++ = ((vector unsigned char){0xf4,0xf4,0xf4,0xf4,0xf4,0xf4,0xf4,0xf4,0xf4,0xf4,0xf4,0xf4,0xf4,0xf4,0xf4,0xf4});
+ *u8++ = ((vector unsigned char){0xf5,0xf5,0xf5,0xf5,0xf5,0xf5,0xf5,0xf5,0xf5,0xf5,0xf5,0xf5,0xf5,0xf5,0xf5,0xf5});
+ *u8++ = ((vector unsigned char){0xf6,0xf6,0xf6,0xf6,0xf6,0xf6,0xf6,0xf6,0xf6,0xf6,0xf6,0xf6,0xf6,0xf6,0xf6,0xf6});
+ *u8++ = ((vector unsigned char){0xf7,0xf7,0xf7,0xf7,0xf7,0xf7,0xf7,0xf7,0xf7,0xf7,0xf7,0xf7,0xf7,0xf7,0xf7,0xf7});
+ *u8++ = ((vector unsigned char){0xf8,0xf8,0xf8,0xf8,0xf8,0xf8,0xf8,0xf8,0xf8,0xf8,0xf8,0xf8,0xf8,0xf8,0xf8,0xf8});
+ *u8++ = ((vector unsigned char){0xf9,0xf9,0xf9,0xf9,0xf9,0xf9,0xf9,0xf9,0xf9,0xf9,0xf9,0xf9,0xf9,0xf9,0xf9,0xf9});
+ *u8++ = ((vector unsigned char){0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa});
+ *u8++ = ((vector unsigned char){0xfb,0xfb,0xfb,0xfb,0xfb,0xfb,0xfb,0xfb,0xfb,0xfb,0xfb,0xfb,0xfb,0xfb,0xfb,0xfb});
+ *u8++ = ((vector unsigned char){0xfc,0xfc,0xfc,0xfc,0xfc,0xfc,0xfc,0xfc,0xfc,0xfc,0xfc,0xfc,0xfc,0xfc,0xfc,0xfc});
+ *u8++ = ((vector unsigned char){0xfd,0xfd,0xfd,0xfd,0xfd,0xfd,0xfd,0xfd,0xfd,0xfd,0xfd,0xfd,0xfd,0xfd,0xfd,0xfd});
+ *u8++ = ((vector unsigned char){0xfe,0xfe,0xfe,0xfe,0xfe,0xfe,0xfe,0xfe,0xfe,0xfe,0xfe,0xfe,0xfe,0xfe,0xfe,0xfe});
+ *u8++ = ((vector unsigned char){0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff});
+}
+void fu8g(vector unsigned char *u8)
+{
+ *u8++ = ((vector unsigned char){0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0});
+ *u8++ = ((vector unsigned char){1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1});
+ *u8++ = ((vector unsigned char){2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2});
+ *u8++ = ((vector unsigned char){3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3});
+ *u8++ = ((vector unsigned char){4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4});
+ *u8++ = ((vector unsigned char){5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5});
+ *u8++ = ((vector unsigned char){6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6});
+ *u8++ = ((vector unsigned char){7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7});
+ *u8++ = ((vector unsigned char){8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8});
+ *u8++ = ((vector unsigned char){9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9});
+ *u8++ = ((vector unsigned char){10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10});
+ *u8++ = ((vector unsigned char){11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11});
+ *u8++ = ((vector unsigned char){12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12});
+ *u8++ = ((vector unsigned char){13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13});
+ *u8++ = ((vector unsigned char){14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14});
+ *u8++ = ((vector unsigned char){15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15});
+}
+void fu8h(vector unsigned char *u8)
+{
+ *u8++ = ((vector unsigned char){240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240});
+ *u8++ = ((vector unsigned char){241,241,241,241,241,241,241,241,241,241,241,241,241,241,241,241});
+ *u8++ = ((vector unsigned char){242,242,242,242,242,242,242,242,242,242,242,242,242,242,242,242});
+ *u8++ = ((vector unsigned char){243,243,243,243,243,243,243,243,243,243,243,243,243,243,243,243});
+ *u8++ = ((vector unsigned char){244,244,244,244,244,244,244,244,244,244,244,244,244,244,244,244});
+ *u8++ = ((vector unsigned char){245,245,245,245,245,245,245,245,245,245,245,245,245,245,245,245});
+ *u8++ = ((vector unsigned char){246,246,246,246,246,246,246,246,246,246,246,246,246,246,246,246});
+ *u8++ = ((vector unsigned char){247,247,247,247,247,247,247,247,247,247,247,247,247,247,247,247});
+ *u8++ = ((vector unsigned char){248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248});
+ *u8++ = ((vector unsigned char){249,249,249,249,249,249,249,249,249,249,249,249,249,249,249,249});
+ *u8++ = ((vector unsigned char){250,250,250,250,250,250,250,250,250,250,250,250,250,250,250,250});
+ *u8++ = ((vector unsigned char){251,251,251,251,251,251,251,251,251,251,251,251,251,251,251,251});
+ *u8++ = ((vector unsigned char){252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252});
+ *u8++ = ((vector unsigned char){253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253});
+ *u8++ = ((vector unsigned char){254,254,254,254,254,254,254,254,254,254,254,254,254,254,254,254});
+ *u8++ = ((vector unsigned char){255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255});
+}
+void fu8i(vector unsigned char *u8)
+{
+ *u8++ = ((vector unsigned char){-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1});
+ *u8++ = ((vector unsigned char){-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2});
+ *u8++ = ((vector unsigned char){-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3});
+ *u8++ = ((vector unsigned char){-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4});
+ *u8++ = ((vector unsigned char){-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5});
+ *u8++ = ((vector unsigned char){-6,-6,-6,-6,-6,-6,-6,-6,-6,-6,-6,-6,-6,-6,-6,-6});
+ *u8++ = ((vector unsigned char){-7,-7,-7,-7,-7,-7,-7,-7,-7,-7,-7,-7,-7,-7,-7,-7});
+ *u8++ = ((vector unsigned char){-8,-8,-8,-8,-8,-8,-8,-8,-8,-8,-8,-8,-8,-8,-8,-8});
+ *u8++ = ((vector unsigned char){-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9});
+ *u8++ = ((vector unsigned char){-10,-10,-10,-10,-10,-10,-10,-10,-10,-10,-10,-10,-10,-10,-10,-10});
+ *u8++ = ((vector unsigned char){-11,-11,-11,-11,-11,-11,-11,-11,-11,-11,-11,-11,-11,-11,-11,-11});
+ *u8++ = ((vector unsigned char){-12,-12,-12,-12,-12,-12,-12,-12,-12,-12,-12,-12,-12,-12,-12,-12});
+ *u8++ = ((vector unsigned char){-13,-13,-13,-13,-13,-13,-13,-13,-13,-13,-13,-13,-13,-13,-13,-13});
+ *u8++ = ((vector unsigned char){-14,-14,-14,-14,-14,-14,-14,-14,-14,-14,-14,-14,-14,-14,-14,-14});
+ *u8++ = ((vector unsigned char){-15,-15,-15,-15,-15,-15,-15,-15,-15,-15,-15,-15,-15,-15,-15,-15});
+ *u8++ = ((vector unsigned char){-16,-16,-16,-16,-16,-16,-16,-16,-16,-16,-16,-16,-16,-16,-16,-16});
+}
+void fs32a(vector signed int *s32)
+{
+ *s32++ = ((vector signed int){0,0,0,0});
+ *s32++ = ((vector signed int){1,1,1,1});
+ *s32++ = ((vector signed int){2,2,2,2});
+ *s32++ = ((vector signed int){3,3,3,3});
+ *s32++ = ((vector signed int){4,4,4,4});
+ *s32++ = ((vector signed int){5,5,5,5});
+ *s32++ = ((vector signed int){6,6,6,6});
+ *s32++ = ((vector signed int){7,7,7,7});
+ *s32++ = ((vector signed int){8,8,8,8});
+ *s32++ = ((vector signed int){9,9,9,9});
+ *s32++ = ((vector signed int){10,10,10,10});
+ *s32++ = ((vector signed int){11,11,11,11});
+ *s32++ = ((vector signed int){12,12,12,12});
+ *s32++ = ((vector signed int){13,13,13,13});
+ *s32++ = ((vector signed int){14,14,14,14});
+ *s32++ = ((vector signed int){15,15,15,15});
+}
+void fs32b(vector signed int *s32)
+{
+ *s32++ = ((vector signed int){65537,65537,65537,65537});
+ *s32++ = ((vector signed int){131074,131074,131074,131074});
+ *s32++ = ((vector signed int){196611,196611,196611,196611});
+ *s32++ = ((vector signed int){262148,262148,262148,262148});
+ *s32++ = ((vector signed int){327685,327685,327685,327685});
+ *s32++ = ((vector signed int){393222,393222,393222,393222});
+ *s32++ = ((vector signed int){458759,458759,458759,458759});
+ *s32++ = ((vector signed int){524296,524296,524296,524296});
+ *s32++ = ((vector signed int){589833,589833,589833,589833});
+ *s32++ = ((vector signed int){655370,655370,655370,655370});
+ *s32++ = ((vector signed int){720907,720907,720907,720907});
+ *s32++ = ((vector signed int){786444,786444,786444,786444});
+ *s32++ = ((vector signed int){851981,851981,851981,851981});
+ *s32++ = ((vector signed int){917518,917518,917518,917518});
+ *s32++ = ((vector signed int){983055,983055,983055,983055});
+}
+void fs32c(vector signed int *s32)
+{
+ *s32++ = ((vector signed int){16843009,16843009,16843009,16843009});
+ *s32++ = ((vector signed int){33686018,33686018,33686018,33686018});
+ *s32++ = ((vector signed int){50529027,50529027,50529027,50529027});
+ *s32++ = ((vector signed int){67372036,67372036,67372036,67372036});
+ *s32++ = ((vector signed int){84215045,84215045,84215045,84215045});
+ *s32++ = ((vector signed int){101058054,101058054,101058054,101058054});
+ *s32++ = ((vector signed int){117901063,117901063,117901063,117901063});
+ *s32++ = ((vector signed int){134744072,134744072,134744072,134744072});
+ *s32++ = ((vector signed int){151587081,151587081,151587081,151587081});
+ *s32++ = ((vector signed int){168430090,168430090,168430090,168430090});
+ *s32++ = ((vector signed int){185273099,185273099,185273099,185273099});
+ *s32++ = ((vector signed int){202116108,202116108,202116108,202116108});
+ *s32++ = ((vector signed int){218959117,218959117,218959117,218959117});
+ *s32++ = ((vector signed int){235802126,235802126,235802126,235802126});
+ *s32++ = ((vector signed int){252645135,252645135,252645135,252645135});
+}
+void fs32d(vector signed int *s32)
+{
+ *s32++ = ((vector signed int){4042322160,4042322160,4042322160,4042322160});
+ *s32++ = ((vector signed int){4059165169,4059165169,4059165169,4059165169});
+ *s32++ = ((vector signed int){4076008178,4076008178,4076008178,4076008178});
+ *s32++ = ((vector signed int){4092851187,4092851187,4092851187,4092851187});
+ *s32++ = ((vector signed int){4109694196,4109694196,4109694196,4109694196});
+ *s32++ = ((vector signed int){4126537205,4126537205,4126537205,4126537205});
+ *s32++ = ((vector signed int){4143380214,4143380214,4143380214,4143380214});
+ *s32++ = ((vector signed int){4160223223,4160223223,4160223223,4160223223});
+ *s32++ = ((vector signed int){4177066232,4177066232,4177066232,4177066232});
+ *s32++ = ((vector signed int){4193909241,4193909241,4193909241,4193909241});
+ *s32++ = ((vector signed int){4210752250,4210752250,4210752250,4210752250});
+ *s32++ = ((vector signed int){4227595259,4227595259,4227595259,4227595259});
+ *s32++ = ((vector signed int){4244438268,4244438268,4244438268,4244438268});
+ *s32++ = ((vector signed int){4261281277,4261281277,4261281277,4261281277});
+ *s32++ = ((vector signed int){4278124286,4278124286,4278124286,4278124286});
+}
+void fs32e(vector signed int *s32)
+{
+ *s32++ = ((vector signed int){4293984240,4293984240,4293984240,4293984240});
+ *s32++ = ((vector signed int){4294049777,4294049777,4294049777,4294049777});
+ *s32++ = ((vector signed int){4294115314,4294115314,4294115314,4294115314});
+ *s32++ = ((vector signed int){4294180851,4294180851,4294180851,4294180851});
+ *s32++ = ((vector signed int){4294246388,4294246388,4294246388,4294246388});
+ *s32++ = ((vector signed int){4294311925,4294311925,4294311925,4294311925});
+ *s32++ = ((vector signed int){4294377462,4294377462,4294377462,4294377462});
+ *s32++ = ((vector signed int){4294442999,4294442999,4294442999,4294442999});
+ *s32++ = ((vector signed int){4294508536,4294508536,4294508536,4294508536});
+ *s32++ = ((vector signed int){4294574073,4294574073,4294574073,4294574073});
+ *s32++ = ((vector signed int){4294639610,4294639610,4294639610,4294639610});
+ *s32++ = ((vector signed int){4294705147,4294705147,4294705147,4294705147});
+ *s32++ = ((vector signed int){4294770684,4294770684,4294770684,4294770684});
+ *s32++ = ((vector signed int){4294836221,4294836221,4294836221,4294836221});
+ *s32++ = ((vector signed int){4294901758,4294901758,4294901758,4294901758});
+}
+void fs32f(vector signed int *s32)
+{
+ *s32++ = ((vector signed int){4294967280,4294967280,4294967280,4294967280});
+ *s32++ = ((vector signed int){4294967281,4294967281,4294967281,4294967281});
+ *s32++ = ((vector signed int){4294967282,4294967282,4294967282,4294967282});
+ *s32++ = ((vector signed int){4294967283,4294967283,4294967283,4294967283});
+ *s32++ = ((vector signed int){4294967284,4294967284,4294967284,4294967284});
+ *s32++ = ((vector signed int){4294967285,4294967285,4294967285,4294967285});
+ *s32++ = ((vector signed int){4294967286,4294967286,4294967286,4294967286});
+ *s32++ = ((vector signed int){4294967287,4294967287,4294967287,4294967287});
+ *s32++ = ((vector signed int){4294967288,4294967288,4294967288,4294967288});
+ *s32++ = ((vector signed int){4294967289,4294967289,4294967289,4294967289});
+ *s32++ = ((vector signed int){4294967290,4294967290,4294967290,4294967290});
+ *s32++ = ((vector signed int){4294967291,4294967291,4294967291,4294967291});
+ *s32++ = ((vector signed int){4294967292,4294967292,4294967292,4294967292});
+ *s32++ = ((vector signed int){4294967293,4294967293,4294967293,4294967293});
+ *s32++ = ((vector signed int){4294967294,4294967294,4294967294,4294967294});
+ *s32++ = ((vector signed int){4294967295,4294967295,4294967295,4294967295});
+}
+void fs32g(vector signed int *s32)
+{
+ *s32++ = ((vector signed int){-252645136,-252645136,-252645136,-252645136});
+ *s32++ = ((vector signed int){-235802127,-235802127,-235802127,-235802127});
+ *s32++ = ((vector signed int){-218959118,-218959118,-218959118,-218959118});
+ *s32++ = ((vector signed int){-202116109,-202116109,-202116109,-202116109});
+ *s32++ = ((vector signed int){-185273100,-185273100,-185273100,-185273100});
+ *s32++ = ((vector signed int){-168430091,-168430091,-168430091,-168430091});
+ *s32++ = ((vector signed int){-151587082,-151587082,-151587082,-151587082});
+ *s32++ = ((vector signed int){-134744073,-134744073,-134744073,-134744073});
+ *s32++ = ((vector signed int){-117901064,-117901064,-117901064,-117901064});
+ *s32++ = ((vector signed int){-101058055,-101058055,-101058055,-101058055});
+ *s32++ = ((vector signed int){-84215046,-84215046,-84215046,-84215046});
+ *s32++ = ((vector signed int){-67372037,-67372037,-67372037,-67372037});
+ *s32++ = ((vector signed int){-50529028,-50529028,-50529028,-50529028});
+ *s32++ = ((vector signed int){-33686019,-33686019,-33686019,-33686019});
+ *s32++ = ((vector signed int){-16843010,-16843010,-16843010,-16843010});
+}
+void fs32h(vector signed int *s32)
+{
+ *s32++ = ((vector signed int){-983056,-983056,-983056,-983056});
+ *s32++ = ((vector signed int){-917519,-917519,-917519,-917519});
+ *s32++ = ((vector signed int){-851982,-851982,-851982,-851982});
+ *s32++ = ((vector signed int){-786445,-786445,-786445,-786445});
+ *s32++ = ((vector signed int){-720908,-720908,-720908,-720908});
+ *s32++ = ((vector signed int){-655371,-655371,-655371,-655371});
+ *s32++ = ((vector signed int){-589834,-589834,-589834,-589834});
+ *s32++ = ((vector signed int){-524297,-524297,-524297,-524297});
+ *s32++ = ((vector signed int){-458760,-458760,-458760,-458760});
+ *s32++ = ((vector signed int){-393223,-393223,-393223,-393223});
+ *s32++ = ((vector signed int){-327686,-327686,-327686,-327686});
+ *s32++ = ((vector signed int){-262149,-262149,-262149,-262149});
+ *s32++ = ((vector signed int){-196612,-196612,-196612,-196612});
+ *s32++ = ((vector signed int){-131075,-131075,-131075,-131075});
+ *s32++ = ((vector signed int){-65538,-65538,-65538,-65538});
+}
+void fs32i(vector signed int *s32)
+{
+ *s32++ = ((vector signed int){-16,-16,-16,-16});
+ *s32++ = ((vector signed int){-15,-15,-15,-15});
+ *s32++ = ((vector signed int){-14,-14,-14,-14});
+ *s32++ = ((vector signed int){-13,-13,-13,-13});
+ *s32++ = ((vector signed int){-12,-12,-12,-12});
+ *s32++ = ((vector signed int){-11,-11,-11,-11});
+ *s32++ = ((vector signed int){-10,-10,-10,-10});
+ *s32++ = ((vector signed int){-9,-9,-9,-9});
+ *s32++ = ((vector signed int){-8,-8,-8,-8});
+ *s32++ = ((vector signed int){-7,-7,-7,-7});
+ *s32++ = ((vector signed int){-6,-6,-6,-6});
+ *s32++ = ((vector signed int){-5,-5,-5,-5});
+ *s32++ = ((vector signed int){-4,-4,-4,-4});
+ *s32++ = ((vector signed int){-3,-3,-3,-3});
+ *s32++ = ((vector signed int){-2,-2,-2,-2});
+ *s32++ = ((vector signed int){-1,-1,-1,-1});
+}
+void fs32j(vector signed int *s32)
+{
+ *s32++ = ((vector signed int){0xfffffff0,0xfffffff0,0xfffffff0,0xfffffff0});
+ *s32++ = ((vector signed int){0xfffffff1,0xfffffff1,0xfffffff1,0xfffffff1});
+ *s32++ = ((vector signed int){0xfffffff2,0xfffffff2,0xfffffff2,0xfffffff2});
+ *s32++ = ((vector signed int){0xfffffff3,0xfffffff3,0xfffffff3,0xfffffff3});
+ *s32++ = ((vector signed int){0xfffffff4,0xfffffff4,0xfffffff4,0xfffffff4});
+ *s32++ = ((vector signed int){0xfffffff5,0xfffffff5,0xfffffff5,0xfffffff5});
+ *s32++ = ((vector signed int){0xfffffff6,0xfffffff6,0xfffffff6,0xfffffff6});
+ *s32++ = ((vector signed int){0xfffffff7,0xfffffff7,0xfffffff7,0xfffffff7});
+ *s32++ = ((vector signed int){0xfffffff8,0xfffffff8,0xfffffff8,0xfffffff8});
+ *s32++ = ((vector signed int){0xfffffff9,0xfffffff9,0xfffffff9,0xfffffff9});
+ *s32++ = ((vector signed int){0xfffffffa,0xfffffffa,0xfffffffa,0xfffffffa});
+ *s32++ = ((vector signed int){0xfffffffb,0xfffffffb,0xfffffffb,0xfffffffb});
+ *s32++ = ((vector signed int){0xfffffffc,0xfffffffc,0xfffffffc,0xfffffffc});
+ *s32++ = ((vector signed int){0xfffffffd,0xfffffffd,0xfffffffd,0xfffffffd});
+ *s32++ = ((vector signed int){0xfffffffe,0xfffffffe,0xfffffffe,0xfffffffe});
+ *s32++ = ((vector signed int){0xffffffff,0xffffffff,0xffffffff,0xffffffff});
+}
+void fs32k(vector signed int *s32)
+{
+ *s32++ = ((vector signed int){0x00000000,0x00000000,0x00000000,0x00000000});
+ *s32++ = ((vector signed int){0x00000001,0x00000001,0x00000001,0x00000001});
+ *s32++ = ((vector signed int){0x00000002,0x00000002,0x00000002,0x00000002});
+ *s32++ = ((vector signed int){0x00000003,0x00000003,0x00000003,0x00000003});
+ *s32++ = ((vector signed int){0x00000004,0x00000004,0x00000004,0x00000004});
+ *s32++ = ((vector signed int){0x00000005,0x00000005,0x00000005,0x00000005});
+ *s32++ = ((vector signed int){0x00000006,0x00000006,0x00000006,0x00000006});
+ *s32++ = ((vector signed int){0x00000007,0x00000007,0x00000007,0x00000007});
+ *s32++ = ((vector signed int){0x00000008,0x00000008,0x00000008,0x00000008});
+ *s32++ = ((vector signed int){0x00000009,0x00000009,0x00000009,0x00000009});
+ *s32++ = ((vector signed int){0x0000000a,0x0000000a,0x0000000a,0x0000000a});
+ *s32++ = ((vector signed int){0x0000000b,0x0000000b,0x0000000b,0x0000000b});
+ *s32++ = ((vector signed int){0x0000000c,0x0000000c,0x0000000c,0x0000000c});
+ *s32++ = ((vector signed int){0x0000000d,0x0000000d,0x0000000d,0x0000000d});
+ *s32++ = ((vector signed int){0x0000000e,0x0000000e,0x0000000e,0x0000000e});
+ *s32++ = ((vector signed int){0x0000000f,0x0000000f,0x0000000f,0x0000000f});
+}
+void fs32l(vector signed int *s32)
+{
+ *s32++ = ((vector signed int){0xfff0fff0,0xfff0fff0,0xfff0fff0,0xfff0fff0});
+ *s32++ = ((vector signed int){0xfff1fff1,0xfff1fff1,0xfff1fff1,0xfff1fff1});
+ *s32++ = ((vector signed int){0xfff2fff2,0xfff2fff2,0xfff2fff2,0xfff2fff2});
+ *s32++ = ((vector signed int){0xfff3fff3,0xfff3fff3,0xfff3fff3,0xfff3fff3});
+ *s32++ = ((vector signed int){0xfff4fff4,0xfff4fff4,0xfff4fff4,0xfff4fff4});
+ *s32++ = ((vector signed int){0xfff5fff5,0xfff5fff5,0xfff5fff5,0xfff5fff5});
+ *s32++ = ((vector signed int){0xfff6fff6,0xfff6fff6,0xfff6fff6,0xfff6fff6});
+ *s32++ = ((vector signed int){0xfff7fff7,0xfff7fff7,0xfff7fff7,0xfff7fff7});
+ *s32++ = ((vector signed int){0xfff8fff8,0xfff8fff8,0xfff8fff8,0xfff8fff8});
+ *s32++ = ((vector signed int){0xfff9fff9,0xfff9fff9,0xfff9fff9,0xfff9fff9});
+ *s32++ = ((vector signed int){0xfffafffa,0xfffafffa,0xfffafffa,0xfffafffa});
+ *s32++ = ((vector signed int){0xfffbfffb,0xfffbfffb,0xfffbfffb,0xfffbfffb});
+ *s32++ = ((vector signed int){0xfffcfffc,0xfffcfffc,0xfffcfffc,0xfffcfffc});
+ *s32++ = ((vector signed int){0xfffdfffd,0xfffdfffd,0xfffdfffd,0xfffdfffd});
+ *s32++ = ((vector signed int){0xfffefffe,0xfffefffe,0xfffefffe,0xfffefffe});
+ *s32++ = ((vector signed int){0xffffffff,0xffffffff,0xffffffff,0xffffffff});
+}
+void fs32m(vector signed int *s32)
+{
+ *s32++ = ((vector signed int){0x00000000,0x00000000,0x00000000,0x00000000});
+ *s32++ = ((vector signed int){0x00010001,0x00010001,0x00010001,0x00010001});
+ *s32++ = ((vector signed int){0x00020002,0x00020002,0x00020002,0x00020002});
+ *s32++ = ((vector signed int){0x00030003,0x00030003,0x00030003,0x00030003});
+ *s32++ = ((vector signed int){0x00040004,0x00040004,0x00040004,0x00040004});
+ *s32++ = ((vector signed int){0x00050005,0x00050005,0x00050005,0x00050005});
+ *s32++ = ((vector signed int){0x00060006,0x00060006,0x00060006,0x00060006});
+ *s32++ = ((vector signed int){0x00070007,0x00070007,0x00070007,0x00070007});
+ *s32++ = ((vector signed int){0x00080008,0x00080008,0x00080008,0x00080008});
+ *s32++ = ((vector signed int){0x00090009,0x00090009,0x00090009,0x00090009});
+ *s32++ = ((vector signed int){0x000a000a,0x000a000a,0x000a000a,0x000a000a});
+ *s32++ = ((vector signed int){0x000b000b,0x000b000b,0x000b000b,0x000b000b});
+ *s32++ = ((vector signed int){0x000c000c,0x000c000c,0x000c000c,0x000c000c});
+ *s32++ = ((vector signed int){0x000d000d,0x000d000d,0x000d000d,0x000d000d});
+ *s32++ = ((vector signed int){0x000e000e,0x000e000e,0x000e000e,0x000e000e});
+ *s32++ = ((vector signed int){0x000f000f,0x000f000f,0x000f000f,0x000f000f});
+}
+void fs32n(vector signed int *s32)
+{
+ *s32++ = ((vector signed int){0xf0f0f0f0,0xf0f0f0f0,0xf0f0f0f0,0xf0f0f0f0});
+ *s32++ = ((vector signed int){0xf1f1f1f1,0xf1f1f1f1,0xf1f1f1f1,0xf1f1f1f1});
+ *s32++ = ((vector signed int){0xf2f2f2f2,0xf2f2f2f2,0xf2f2f2f2,0xf2f2f2f2});
+ *s32++ = ((vector signed int){0xf3f3f3f3,0xf3f3f3f3,0xf3f3f3f3,0xf3f3f3f3});
+ *s32++ = ((vector signed int){0xf4f4f4f4,0xf4f4f4f4,0xf4f4f4f4,0xf4f4f4f4});
+ *s32++ = ((vector signed int){0xf5f5f5f5,0xf5f5f5f5,0xf5f5f5f5,0xf5f5f5f5});
+ *s32++ = ((vector signed int){0xf6f6f6f6,0xf6f6f6f6,0xf6f6f6f6,0xf6f6f6f6});
+ *s32++ = ((vector signed int){0xf7f7f7f7,0xf7f7f7f7,0xf7f7f7f7,0xf7f7f7f7});
+ *s32++ = ((vector signed int){0xf8f8f8f8,0xf8f8f8f8,0xf8f8f8f8,0xf8f8f8f8});
+ *s32++ = ((vector signed int){0xf9f9f9f9,0xf9f9f9f9,0xf9f9f9f9,0xf9f9f9f9});
+ *s32++ = ((vector signed int){0xfafafafa,0xfafafafa,0xfafafafa,0xfafafafa});
+ *s32++ = ((vector signed int){0xfbfbfbfb,0xfbfbfbfb,0xfbfbfbfb,0xfbfbfbfb});
+ *s32++ = ((vector signed int){0xfcfcfcfc,0xfcfcfcfc,0xfcfcfcfc,0xfcfcfcfc});
+ *s32++ = ((vector signed int){0xfdfdfdfd,0xfdfdfdfd,0xfdfdfdfd,0xfdfdfdfd});
+ *s32++ = ((vector signed int){0xfefefefe,0xfefefefe,0xfefefefe,0xfefefefe});
+ *s32++ = ((vector signed int){0xffffffff,0xffffffff,0xffffffff,0xffffffff});
+}
+void fs32o(vector signed int *s32)
+{
+ *s32++ = ((vector signed int){0x00000000,0x00000000,0x00000000,0x00000000});
+ *s32++ = ((vector signed int){0x01010101,0x01010101,0x01010101,0x01010101});
+ *s32++ = ((vector signed int){0x02020202,0x02020202,0x02020202,0x02020202});
+ *s32++ = ((vector signed int){0x03030303,0x03030303,0x03030303,0x03030303});
+ *s32++ = ((vector signed int){0x04040404,0x04040404,0x04040404,0x04040404});
+ *s32++ = ((vector signed int){0x05050505,0x05050505,0x05050505,0x05050505});
+ *s32++ = ((vector signed int){0x06060606,0x06060606,0x06060606,0x06060606});
+ *s32++ = ((vector signed int){0x07070707,0x07070707,0x07070707,0x07070707});
+ *s32++ = ((vector signed int){0x08080808,0x08080808,0x08080808,0x08080808});
+ *s32++ = ((vector signed int){0x09090909,0x09090909,0x09090909,0x09090909});
+ *s32++ = ((vector signed int){0x0a0a0a0a,0x0a0a0a0a,0x0a0a0a0a,0x0a0a0a0a});
+ *s32++ = ((vector signed int){0x0b0b0b0b,0x0b0b0b0b,0x0b0b0b0b,0x0b0b0b0b});
+ *s32++ = ((vector signed int){0x0c0c0c0c,0x0c0c0c0c,0x0c0c0c0c,0x0c0c0c0c});
+ *s32++ = ((vector signed int){0x0d0d0d0d,0x0d0d0d0d,0x0d0d0d0d,0x0d0d0d0d});
+ *s32++ = ((vector signed int){0x0e0e0e0e,0x0e0e0e0e,0x0e0e0e0e,0x0e0e0e0e});
+ *s32++ = ((vector signed int){0x0f0f0f0f,0x0f0f0f0f,0x0f0f0f0f,0x0f0f0f0f});
+}
+void fs16a(vector signed short *s16)
+{
+ *s16++ = ((vector signed short){0xffff,0xfff0,0xffff,0xfff0,0xffff,0xfff0,0xffff,0xfff0});
+ *s16++ = ((vector signed short){0xffff,0xfff1,0xffff,0xfff1,0xffff,0xfff1,0xffff,0xfff1});
+ *s16++ = ((vector signed short){0xffff,0xfff2,0xffff,0xfff2,0xffff,0xfff2,0xffff,0xfff2});
+ *s16++ = ((vector signed short){0xffff,0xfff3,0xffff,0xfff3,0xffff,0xfff3,0xffff,0xfff3});
+ *s16++ = ((vector signed short){0xffff,0xfff4,0xffff,0xfff4,0xffff,0xfff4,0xffff,0xfff4});
+ *s16++ = ((vector signed short){0xffff,0xfff5,0xffff,0xfff5,0xffff,0xfff5,0xffff,0xfff5});
+ *s16++ = ((vector signed short){0xffff,0xfff6,0xffff,0xfff6,0xffff,0xfff6,0xffff,0xfff6});
+ *s16++ = ((vector signed short){0xffff,0xfff7,0xffff,0xfff7,0xffff,0xfff7,0xffff,0xfff7});
+ *s16++ = ((vector signed short){0xffff,0xfff8,0xffff,0xfff8,0xffff,0xfff8,0xffff,0xfff8});
+ *s16++ = ((vector signed short){0xffff,0xfff9,0xffff,0xfff9,0xffff,0xfff9,0xffff,0xfff9});
+ *s16++ = ((vector signed short){0xffff,0xfffa,0xffff,0xfffa,0xffff,0xfffa,0xffff,0xfffa});
+ *s16++ = ((vector signed short){0xffff,0xfffb,0xffff,0xfffb,0xffff,0xfffb,0xffff,0xfffb});
+ *s16++ = ((vector signed short){0xffff,0xfffc,0xffff,0xfffc,0xffff,0xfffc,0xffff,0xfffc});
+ *s16++ = ((vector signed short){0xffff,0xfffd,0xffff,0xfffd,0xffff,0xfffd,0xffff,0xfffd});
+ *s16++ = ((vector signed short){0xffff,0xfffe,0xffff,0xfffe,0xffff,0xfffe,0xffff,0xfffe});
+ *s16++ = ((vector signed short){0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff});
+}
+void fs16b(vector signed short *s16)
+{
+ *s16++ = ((vector signed short){0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000});
+ *s16++ = ((vector signed short){0x0000,0x0001,0x0000,0x0001,0x0000,0x0001,0x0000,0x0001});
+ *s16++ = ((vector signed short){0x0000,0x0002,0x0000,0x0002,0x0000,0x0002,0x0000,0x0002});
+ *s16++ = ((vector signed short){0x0000,0x0003,0x0000,0x0003,0x0000,0x0003,0x0000,0x0003});
+ *s16++ = ((vector signed short){0x0000,0x0004,0x0000,0x0004,0x0000,0x0004,0x0000,0x0004});
+ *s16++ = ((vector signed short){0x0000,0x0005,0x0000,0x0005,0x0000,0x0005,0x0000,0x0005});
+ *s16++ = ((vector signed short){0x0000,0x0006,0x0000,0x0006,0x0000,0x0006,0x0000,0x0006});
+ *s16++ = ((vector signed short){0x0000,0x0007,0x0000,0x0007,0x0000,0x0007,0x0000,0x0007});
+ *s16++ = ((vector signed short){0x0000,0x0008,0x0000,0x0008,0x0000,0x0008,0x0000,0x0008});
+ *s16++ = ((vector signed short){0x0000,0x0009,0x0000,0x0009,0x0000,0x0009,0x0000,0x0009});
+ *s16++ = ((vector signed short){0x0000,0x000a,0x0000,0x000a,0x0000,0x000a,0x0000,0x000a});
+ *s16++ = ((vector signed short){0x0000,0x000b,0x0000,0x000b,0x0000,0x000b,0x0000,0x000b});
+ *s16++ = ((vector signed short){0x0000,0x000c,0x0000,0x000c,0x0000,0x000c,0x0000,0x000c});
+ *s16++ = ((vector signed short){0x0000,0x000d,0x0000,0x000d,0x0000,0x000d,0x0000,0x000d});
+ *s16++ = ((vector signed short){0x0000,0x000e,0x0000,0x000e,0x0000,0x000e,0x0000,0x000e});
+ *s16++ = ((vector signed short){0x0000,0x000f,0x0000,0x000f,0x0000,0x000f,0x0000,0x000f});
+}
+void fs16c(vector signed short *s16)
+{
+ *s16++ = ((vector signed short){0xfff0,0xfff0,0xfff0,0xfff0,0xfff0,0xfff0,0xfff0,0xfff0});
+ *s16++ = ((vector signed short){0xfff1,0xfff1,0xfff1,0xfff1,0xfff1,0xfff1,0xfff1,0xfff1});
+ *s16++ = ((vector signed short){0xfff2,0xfff2,0xfff2,0xfff2,0xfff2,0xfff2,0xfff2,0xfff2});
+ *s16++ = ((vector signed short){0xfff3,0xfff3,0xfff3,0xfff3,0xfff3,0xfff3,0xfff3,0xfff3});
+ *s16++ = ((vector signed short){0xfff4,0xfff4,0xfff4,0xfff4,0xfff4,0xfff4,0xfff4,0xfff4});
+ *s16++ = ((vector signed short){0xfff5,0xfff5,0xfff5,0xfff5,0xfff5,0xfff5,0xfff5,0xfff5});
+ *s16++ = ((vector signed short){0xfff6,0xfff6,0xfff6,0xfff6,0xfff6,0xfff6,0xfff6,0xfff6});
+ *s16++ = ((vector signed short){0xfff7,0xfff7,0xfff7,0xfff7,0xfff7,0xfff7,0xfff7,0xfff7});
+ *s16++ = ((vector signed short){0xfff8,0xfff8,0xfff8,0xfff8,0xfff8,0xfff8,0xfff8,0xfff8});
+ *s16++ = ((vector signed short){0xfff9,0xfff9,0xfff9,0xfff9,0xfff9,0xfff9,0xfff9,0xfff9});
+ *s16++ = ((vector signed short){0xfffa,0xfffa,0xfffa,0xfffa,0xfffa,0xfffa,0xfffa,0xfffa});
+ *s16++ = ((vector signed short){0xfffb,0xfffb,0xfffb,0xfffb,0xfffb,0xfffb,0xfffb,0xfffb});
+ *s16++ = ((vector signed short){0xfffc,0xfffc,0xfffc,0xfffc,0xfffc,0xfffc,0xfffc,0xfffc});
+ *s16++ = ((vector signed short){0xfffd,0xfffd,0xfffd,0xfffd,0xfffd,0xfffd,0xfffd,0xfffd});
+ *s16++ = ((vector signed short){0xfffe,0xfffe,0xfffe,0xfffe,0xfffe,0xfffe,0xfffe,0xfffe});
+ *s16++ = ((vector signed short){0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff});
+}
+void fs16d(vector signed short *s16)
+{
+ *s16++ = ((vector signed short){0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000});
+ *s16++ = ((vector signed short){0x0001,0x0001,0x0001,0x0001,0x0001,0x0001,0x0001,0x0001});
+ *s16++ = ((vector signed short){0x0002,0x0002,0x0002,0x0002,0x0002,0x0002,0x0002,0x0002});
+ *s16++ = ((vector signed short){0x0003,0x0003,0x0003,0x0003,0x0003,0x0003,0x0003,0x0003});
+ *s16++ = ((vector signed short){0x0004,0x0004,0x0004,0x0004,0x0004,0x0004,0x0004,0x0004});
+ *s16++ = ((vector signed short){0x0005,0x0005,0x0005,0x0005,0x0005,0x0005,0x0005,0x0005});
+ *s16++ = ((vector signed short){0x0006,0x0006,0x0006,0x0006,0x0006,0x0006,0x0006,0x0006});
+ *s16++ = ((vector signed short){0x0007,0x0007,0x0007,0x0007,0x0007,0x0007,0x0007,0x0007});
+ *s16++ = ((vector signed short){0x0008,0x0008,0x0008,0x0008,0x0008,0x0008,0x0008,0x0008});
+ *s16++ = ((vector signed short){0x0009,0x0009,0x0009,0x0009,0x0009,0x0009,0x0009,0x0009});
+ *s16++ = ((vector signed short){0x000a,0x000a,0x000a,0x000a,0x000a,0x000a,0x000a,0x000a});
+ *s16++ = ((vector signed short){0x000b,0x000b,0x000b,0x000b,0x000b,0x000b,0x000b,0x000b});
+ *s16++ = ((vector signed short){0x000c,0x000c,0x000c,0x000c,0x000c,0x000c,0x000c,0x000c});
+ *s16++ = ((vector signed short){0x000d,0x000d,0x000d,0x000d,0x000d,0x000d,0x000d,0x000d});
+ *s16++ = ((vector signed short){0x000e,0x000e,0x000e,0x000e,0x000e,0x000e,0x000e,0x000e});
+ *s16++ = ((vector signed short){0x000f,0x000f,0x000f,0x000f,0x000f,0x000f,0x000f,0x000f});
+}
+void fs16e(vector signed short *s16)
+{
+ *s16++ = ((vector signed short){0xf0f0,0xf0f0,0xf0f0,0xf0f0,0xf0f0,0xf0f0,0xf0f0,0xf0f0});
+ *s16++ = ((vector signed short){0xf1f1,0xf1f1,0xf1f1,0xf1f1,0xf1f1,0xf1f1,0xf1f1,0xf1f1});
+ *s16++ = ((vector signed short){0xf2f2,0xf2f2,0xf2f2,0xf2f2,0xf2f2,0xf2f2,0xf2f2,0xf2f2});
+ *s16++ = ((vector signed short){0xf3f3,0xf3f3,0xf3f3,0xf3f3,0xf3f3,0xf3f3,0xf3f3,0xf3f3});
+ *s16++ = ((vector signed short){0xf4f4,0xf4f4,0xf4f4,0xf4f4,0xf4f4,0xf4f4,0xf4f4,0xf4f4});
+ *s16++ = ((vector signed short){0xf5f5,0xf5f5,0xf5f5,0xf5f5,0xf5f5,0xf5f5,0xf5f5,0xf5f5});
+ *s16++ = ((vector signed short){0xf6f6,0xf6f6,0xf6f6,0xf6f6,0xf6f6,0xf6f6,0xf6f6,0xf6f6});
+ *s16++ = ((vector signed short){0xf7f7,0xf7f7,0xf7f7,0xf7f7,0xf7f7,0xf7f7,0xf7f7,0xf7f7});
+ *s16++ = ((vector signed short){0xf8f8,0xf8f8,0xf8f8,0xf8f8,0xf8f8,0xf8f8,0xf8f8,0xf8f8});
+ *s16++ = ((vector signed short){0xf9f9,0xf9f9,0xf9f9,0xf9f9,0xf9f9,0xf9f9,0xf9f9,0xf9f9});
+ *s16++ = ((vector signed short){0xfafa,0xfafa,0xfafa,0xfafa,0xfafa,0xfafa,0xfafa,0xfafa});
+ *s16++ = ((vector signed short){0xfbfb,0xfbfb,0xfbfb,0xfbfb,0xfbfb,0xfbfb,0xfbfb,0xfbfb});
+ *s16++ = ((vector signed short){0xfcfc,0xfcfc,0xfcfc,0xfcfc,0xfcfc,0xfcfc,0xfcfc,0xfcfc});
+ *s16++ = ((vector signed short){0xfdfd,0xfdfd,0xfdfd,0xfdfd,0xfdfd,0xfdfd,0xfdfd,0xfdfd});
+ *s16++ = ((vector signed short){0xfefe,0xfefe,0xfefe,0xfefe,0xfefe,0xfefe,0xfefe,0xfefe});
+ *s16++ = ((vector signed short){0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff});
+}
+void fs16f(vector signed short *s16)
+{
+ *s16++ = ((vector signed short){0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000});
+ *s16++ = ((vector signed short){0x0101,0x0101,0x0101,0x0101,0x0101,0x0101,0x0101,0x0101});
+ *s16++ = ((vector signed short){0x0202,0x0202,0x0202,0x0202,0x0202,0x0202,0x0202,0x0202});
+ *s16++ = ((vector signed short){0x0303,0x0303,0x0303,0x0303,0x0303,0x0303,0x0303,0x0303});
+ *s16++ = ((vector signed short){0x0404,0x0404,0x0404,0x0404,0x0404,0x0404,0x0404,0x0404});
+ *s16++ = ((vector signed short){0x0505,0x0505,0x0505,0x0505,0x0505,0x0505,0x0505,0x0505});
+ *s16++ = ((vector signed short){0x0606,0x0606,0x0606,0x0606,0x0606,0x0606,0x0606,0x0606});
+ *s16++ = ((vector signed short){0x0707,0x0707,0x0707,0x0707,0x0707,0x0707,0x0707,0x0707});
+ *s16++ = ((vector signed short){0x0808,0x0808,0x0808,0x0808,0x0808,0x0808,0x0808,0x0808});
+ *s16++ = ((vector signed short){0x0909,0x0909,0x0909,0x0909,0x0909,0x0909,0x0909,0x0909});
+ *s16++ = ((vector signed short){0x0a0a,0x0a0a,0x0a0a,0x0a0a,0x0a0a,0x0a0a,0x0a0a,0x0a0a});
+ *s16++ = ((vector signed short){0x0b0b,0x0b0b,0x0b0b,0x0b0b,0x0b0b,0x0b0b,0x0b0b,0x0b0b});
+ *s16++ = ((vector signed short){0x0c0c,0x0c0c,0x0c0c,0x0c0c,0x0c0c,0x0c0c,0x0c0c,0x0c0c});
+ *s16++ = ((vector signed short){0x0d0d,0x0d0d,0x0d0d,0x0d0d,0x0d0d,0x0d0d,0x0d0d,0x0d0d});
+ *s16++ = ((vector signed short){0x0e0e,0x0e0e,0x0e0e,0x0e0e,0x0e0e,0x0e0e,0x0e0e,0x0e0e});
+ *s16++ = ((vector signed short){0x0f0f,0x0f0f,0x0f0f,0x0f0f,0x0f0f,0x0f0f,0x0f0f,0x0f0f});
+}
+void fs16g(vector signed short *s16)
+{
+ *s16++ = ((vector signed short){0,0,0,0,0,0,0,0});
+ *s16++ = ((vector signed short){1,1,1,1,1,1,1,1});
+ *s16++ = ((vector signed short){2,2,2,2,2,2,2,2});
+ *s16++ = ((vector signed short){3,3,3,3,3,3,3,3});
+ *s16++ = ((vector signed short){4,4,4,4,4,4,4,4});
+ *s16++ = ((vector signed short){5,5,5,5,5,5,5,5});
+ *s16++ = ((vector signed short){6,6,6,6,6,6,6,6});
+ *s16++ = ((vector signed short){7,7,7,7,7,7,7,7});
+ *s16++ = ((vector signed short){8,8,8,8,8,8,8,8});
+ *s16++ = ((vector signed short){9,9,9,9,9,9,9,9});
+ *s16++ = ((vector signed short){10,10,10,10,10,10,10,10});
+ *s16++ = ((vector signed short){11,11,11,11,11,11,11,11});
+ *s16++ = ((vector signed short){12,12,12,12,12,12,12,12});
+ *s16++ = ((vector signed short){13,13,13,13,13,13,13,13});
+ *s16++ = ((vector signed short){14,14,14,14,14,14,14,14});
+ *s16++ = ((vector signed short){15,15,15,15,15,15,15,15});
+}
+void fs16h(vector signed short *s16)
+{
+ *s16++ = ((vector signed short){257,257,257,257,257,257,257,257});
+ *s16++ = ((vector signed short){514,514,514,514,514,514,514,514});
+ *s16++ = ((vector signed short){771,771,771,771,771,771,771,771});
+ *s16++ = ((vector signed short){1028,1028,1028,1028,1028,1028,1028,1028});
+ *s16++ = ((vector signed short){1285,1285,1285,1285,1285,1285,1285,1285});
+ *s16++ = ((vector signed short){1542,1542,1542,1542,1542,1542,1542,1542});
+ *s16++ = ((vector signed short){1799,1799,1799,1799,1799,1799,1799,1799});
+ *s16++ = ((vector signed short){2056,2056,2056,2056,2056,2056,2056,2056});
+ *s16++ = ((vector signed short){2313,2313,2313,2313,2313,2313,2313,2313});
+ *s16++ = ((vector signed short){2570,2570,2570,2570,2570,2570,2570,2570});
+ *s16++ = ((vector signed short){2827,2827,2827,2827,2827,2827,2827,2827});
+ *s16++ = ((vector signed short){3084,3084,3084,3084,3084,3084,3084,3084});
+ *s16++ = ((vector signed short){3341,3341,3341,3341,3341,3341,3341,3341});
+ *s16++ = ((vector signed short){3598,3598,3598,3598,3598,3598,3598,3598});
+ *s16++ = ((vector signed short){3855,3855,3855,3855,3855,3855,3855,3855});
+}
+void fs16i(vector signed short *s16)
+{
+ *s16++ = ((vector signed short){61680,61680,61680,61680,61680,61680,61680,61680});
+ *s16++ = ((vector signed short){61937,61937,61937,61937,61937,61937,61937,61937});
+ *s16++ = ((vector signed short){62194,62194,62194,62194,62194,62194,62194,62194});
+ *s16++ = ((vector signed short){62451,62451,62451,62451,62451,62451,62451,62451});
+ *s16++ = ((vector signed short){62708,62708,62708,62708,62708,62708,62708,62708});
+ *s16++ = ((vector signed short){62965,62965,62965,62965,62965,62965,62965,62965});
+ *s16++ = ((vector signed short){63222,63222,63222,63222,63222,63222,63222,63222});
+ *s16++ = ((vector signed short){63479,63479,63479,63479,63479,63479,63479,63479});
+ *s16++ = ((vector signed short){63736,63736,63736,63736,63736,63736,63736,63736});
+ *s16++ = ((vector signed short){63993,63993,63993,63993,63993,63993,63993,63993});
+ *s16++ = ((vector signed short){64250,64250,64250,64250,64250,64250,64250,64250});
+ *s16++ = ((vector signed short){64507,64507,64507,64507,64507,64507,64507,64507});
+ *s16++ = ((vector signed short){64764,64764,64764,64764,64764,64764,64764,64764});
+ *s16++ = ((vector signed short){65021,65021,65021,65021,65021,65021,65021,65021});
+ *s16++ = ((vector signed short){65278,65278,65278,65278,65278,65278,65278,65278});
+}
+void fs16j(vector signed short *s16)
+{
+ *s16++ = ((vector signed short){65520,65520,65520,65520,65520,65520,65520,65520});
+ *s16++ = ((vector signed short){65521,65521,65521,65521,65521,65521,65521,65521});
+ *s16++ = ((vector signed short){65522,65522,65522,65522,65522,65522,65522,65522});
+ *s16++ = ((vector signed short){65523,65523,65523,65523,65523,65523,65523,65523});
+ *s16++ = ((vector signed short){65524,65524,65524,65524,65524,65524,65524,65524});
+ *s16++ = ((vector signed short){65525,65525,65525,65525,65525,65525,65525,65525});
+ *s16++ = ((vector signed short){65526,65526,65526,65526,65526,65526,65526,65526});
+ *s16++ = ((vector signed short){65527,65527,65527,65527,65527,65527,65527,65527});
+ *s16++ = ((vector signed short){65528,65528,65528,65528,65528,65528,65528,65528});
+ *s16++ = ((vector signed short){65529,65529,65529,65529,65529,65529,65529,65529});
+ *s16++ = ((vector signed short){65530,65530,65530,65530,65530,65530,65530,65530});
+ *s16++ = ((vector signed short){65531,65531,65531,65531,65531,65531,65531,65531});
+ *s16++ = ((vector signed short){65532,65532,65532,65532,65532,65532,65532,65532});
+ *s16++ = ((vector signed short){65533,65533,65533,65533,65533,65533,65533,65533});
+ *s16++ = ((vector signed short){65534,65534,65534,65534,65534,65534,65534,65534});
+ *s16++ = ((vector signed short){65535,65535,65535,65535,65535,65535,65535,65535});
+}
+void fs16k(vector signed short *s16)
+{
+ *s16++ = ((vector signed short){-3856,-3856,-3856,-3856,-3856,-3856,-3856,-3856});
+ *s16++ = ((vector signed short){-3599,-3599,-3599,-3599,-3599,-3599,-3599,-3599});
+ *s16++ = ((vector signed short){-3342,-3342,-3342,-3342,-3342,-3342,-3342,-3342});
+ *s16++ = ((vector signed short){-3085,-3085,-3085,-3085,-3085,-3085,-3085,-3085});
+ *s16++ = ((vector signed short){-2828,-2828,-2828,-2828,-2828,-2828,-2828,-2828});
+ *s16++ = ((vector signed short){-2571,-2571,-2571,-2571,-2571,-2571,-2571,-2571});
+ *s16++ = ((vector signed short){-2314,-2314,-2314,-2314,-2314,-2314,-2314,-2314});
+ *s16++ = ((vector signed short){-2057,-2057,-2057,-2057,-2057,-2057,-2057,-2057});
+ *s16++ = ((vector signed short){-1800,-1800,-1800,-1800,-1800,-1800,-1800,-1800});
+ *s16++ = ((vector signed short){-1543,-1543,-1543,-1543,-1543,-1543,-1543,-1543});
+ *s16++ = ((vector signed short){-1286,-1286,-1286,-1286,-1286,-1286,-1286,-1286});
+ *s16++ = ((vector signed short){-1029,-1029,-1029,-1029,-1029,-1029,-1029,-1029});
+ *s16++ = ((vector signed short){-772,-772,-772,-772,-772,-772,-772,-772});
+ *s16++ = ((vector signed short){-515,-515,-515,-515,-515,-515,-515,-515});
+ *s16++ = ((vector signed short){-258,-258,-258,-258,-258,-258,-258,-258});
+}
+void fs16l(vector signed short *s16)
+{
+ *s16++ = ((vector signed short){-16,-16,-16,-16,-16,-16,-16,-16});
+ *s16++ = ((vector signed short){-15,-15,-15,-15,-15,-15,-15,-15});
+ *s16++ = ((vector signed short){-14,-14,-14,-14,-14,-14,-14,-14});
+ *s16++ = ((vector signed short){-13,-13,-13,-13,-13,-13,-13,-13});
+ *s16++ = ((vector signed short){-12,-12,-12,-12,-12,-12,-12,-12});
+ *s16++ = ((vector signed short){-11,-11,-11,-11,-11,-11,-11,-11});
+ *s16++ = ((vector signed short){-10,-10,-10,-10,-10,-10,-10,-10});
+ *s16++ = ((vector signed short){-9,-9,-9,-9,-9,-9,-9,-9});
+ *s16++ = ((vector signed short){-8,-8,-8,-8,-8,-8,-8,-8});
+ *s16++ = ((vector signed short){-7,-7,-7,-7,-7,-7,-7,-7});
+ *s16++ = ((vector signed short){-6,-6,-6,-6,-6,-6,-6,-6});
+ *s16++ = ((vector signed short){-5,-5,-5,-5,-5,-5,-5,-5});
+ *s16++ = ((vector signed short){-4,-4,-4,-4,-4,-4,-4,-4});
+ *s16++ = ((vector signed short){-3,-3,-3,-3,-3,-3,-3,-3});
+ *s16++ = ((vector signed short){-2,-2,-2,-2,-2,-2,-2,-2});
+ *s16++ = ((vector signed short){-1,-1,-1,-1,-1,-1,-1,-1});
+}
+void fs8a(vector signed char *s8)
+{
+ *s8++ = ((vector signed char){0xff,0xff,0xff,0xf0,0xff,0xff,0xff,0xf0,0xff,0xff,0xff,0xf0,0xff,0xff,0xff,0xf0});
+ *s8++ = ((vector signed char){0xff,0xff,0xff,0xf1,0xff,0xff,0xff,0xf1,0xff,0xff,0xff,0xf1,0xff,0xff,0xff,0xf1});
+ *s8++ = ((vector signed char){0xff,0xff,0xff,0xf2,0xff,0xff,0xff,0xf2,0xff,0xff,0xff,0xf2,0xff,0xff,0xff,0xf2});
+ *s8++ = ((vector signed char){0xff,0xff,0xff,0xf3,0xff,0xff,0xff,0xf3,0xff,0xff,0xff,0xf3,0xff,0xff,0xff,0xf3});
+ *s8++ = ((vector signed char){0xff,0xff,0xff,0xf4,0xff,0xff,0xff,0xf4,0xff,0xff,0xff,0xf4,0xff,0xff,0xff,0xf4});
+ *s8++ = ((vector signed char){0xff,0xff,0xff,0xf5,0xff,0xff,0xff,0xf5,0xff,0xff,0xff,0xf5,0xff,0xff,0xff,0xf5});
+ *s8++ = ((vector signed char){0xff,0xff,0xff,0xf6,0xff,0xff,0xff,0xf6,0xff,0xff,0xff,0xf6,0xff,0xff,0xff,0xf6});
+ *s8++ = ((vector signed char){0xff,0xff,0xff,0xf7,0xff,0xff,0xff,0xf7,0xff,0xff,0xff,0xf7,0xff,0xff,0xff,0xf7});
+ *s8++ = ((vector signed char){0xff,0xff,0xff,0xf8,0xff,0xff,0xff,0xf8,0xff,0xff,0xff,0xf8,0xff,0xff,0xff,0xf8});
+ *s8++ = ((vector signed char){0xff,0xff,0xff,0xf9,0xff,0xff,0xff,0xf9,0xff,0xff,0xff,0xf9,0xff,0xff,0xff,0xf9});
+ *s8++ = ((vector signed char){0xff,0xff,0xff,0xfa,0xff,0xff,0xff,0xfa,0xff,0xff,0xff,0xfa,0xff,0xff,0xff,0xfa});
+ *s8++ = ((vector signed char){0xff,0xff,0xff,0xfb,0xff,0xff,0xff,0xfb,0xff,0xff,0xff,0xfb,0xff,0xff,0xff,0xfb});
+ *s8++ = ((vector signed char){0xff,0xff,0xff,0xfc,0xff,0xff,0xff,0xfc,0xff,0xff,0xff,0xfc,0xff,0xff,0xff,0xfc});
+ *s8++ = ((vector signed char){0xff,0xff,0xff,0xfd,0xff,0xff,0xff,0xfd,0xff,0xff,0xff,0xfd,0xff,0xff,0xff,0xfd});
+ *s8++ = ((vector signed char){0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xfe,0xff,0xff,0xff,0xfe});
+ *s8++ = ((vector signed char){0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff});
+}
+void fs8b(vector signed char *s8)
+{
+ *s8++ = ((vector signed char){0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00});
+ *s8++ = ((vector signed char){0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x01});
+ *s8++ = ((vector signed char){0x00,0x00,0x00,0x02,0x00,0x00,0x00,0x02,0x00,0x00,0x00,0x02,0x00,0x00,0x00,0x02});
+ *s8++ = ((vector signed char){0x00,0x00,0x00,0x03,0x00,0x00,0x00,0x03,0x00,0x00,0x00,0x03,0x00,0x00,0x00,0x03});
+ *s8++ = ((vector signed char){0x00,0x00,0x00,0x04,0x00,0x00,0x00,0x04,0x00,0x00,0x00,0x04,0x00,0x00,0x00,0x04});
+ *s8++ = ((vector signed char){0x00,0x00,0x00,0x05,0x00,0x00,0x00,0x05,0x00,0x00,0x00,0x05,0x00,0x00,0x00,0x05});
+ *s8++ = ((vector signed char){0x00,0x00,0x00,0x06,0x00,0x00,0x00,0x06,0x00,0x00,0x00,0x06,0x00,0x00,0x00,0x06});
+ *s8++ = ((vector signed char){0x00,0x00,0x00,0x07,0x00,0x00,0x00,0x07,0x00,0x00,0x00,0x07,0x00,0x00,0x00,0x07});
+ *s8++ = ((vector signed char){0x00,0x00,0x00,0x08,0x00,0x00,0x00,0x08,0x00,0x00,0x00,0x08,0x00,0x00,0x00,0x08});
+ *s8++ = ((vector signed char){0x00,0x00,0x00,0x09,0x00,0x00,0x00,0x09,0x00,0x00,0x00,0x09,0x00,0x00,0x00,0x09});
+ *s8++ = ((vector signed char){0x00,0x00,0x00,0x0a,0x00,0x00,0x00,0x0a,0x00,0x00,0x00,0x0a,0x00,0x00,0x00,0x0a});
+ *s8++ = ((vector signed char){0x00,0x00,0x00,0x0b,0x00,0x00,0x00,0x0b,0x00,0x00,0x00,0x0b,0x00,0x00,0x00,0x0b});
+ *s8++ = ((vector signed char){0x00,0x00,0x00,0x0c,0x00,0x00,0x00,0x0c,0x00,0x00,0x00,0x0c,0x00,0x00,0x00,0x0c});
+ *s8++ = ((vector signed char){0x00,0x00,0x00,0x0d,0x00,0x00,0x00,0x0d,0x00,0x00,0x00,0x0d,0x00,0x00,0x00,0x0d});
+ *s8++ = ((vector signed char){0x00,0x00,0x00,0x0e,0x00,0x00,0x00,0x0e,0x00,0x00,0x00,0x0e,0x00,0x00,0x00,0x0e});
+ *s8++ = ((vector signed char){0x00,0x00,0x00,0x0f,0x00,0x00,0x00,0x0f,0x00,0x00,0x00,0x0f,0x00,0x00,0x00,0x0f});
+}
+void fs8c(vector signed char *s8)
+{
+ *s8++ = ((vector signed char){0xff,0xf0,0xff,0xf0,0xff,0xf0,0xff,0xf0,0xff,0xf0,0xff,0xf0,0xff,0xf0,0xff,0xf0});
+ *s8++ = ((vector signed char){0xff,0xf1,0xff,0xf1,0xff,0xf1,0xff,0xf1,0xff,0xf1,0xff,0xf1,0xff,0xf1,0xff,0xf1});
+ *s8++ = ((vector signed char){0xff,0xf2,0xff,0xf2,0xff,0xf2,0xff,0xf2,0xff,0xf2,0xff,0xf2,0xff,0xf2,0xff,0xf2});
+ *s8++ = ((vector signed char){0xff,0xf3,0xff,0xf3,0xff,0xf3,0xff,0xf3,0xff,0xf3,0xff,0xf3,0xff,0xf3,0xff,0xf3});
+ *s8++ = ((vector signed char){0xff,0xf4,0xff,0xf4,0xff,0xf4,0xff,0xf4,0xff,0xf4,0xff,0xf4,0xff,0xf4,0xff,0xf4});
+ *s8++ = ((vector signed char){0xff,0xf5,0xff,0xf5,0xff,0xf5,0xff,0xf5,0xff,0xf5,0xff,0xf5,0xff,0xf5,0xff,0xf5});
+ *s8++ = ((vector signed char){0xff,0xf6,0xff,0xf6,0xff,0xf6,0xff,0xf6,0xff,0xf6,0xff,0xf6,0xff,0xf6,0xff,0xf6});
+ *s8++ = ((vector signed char){0xff,0xf7,0xff,0xf7,0xff,0xf7,0xff,0xf7,0xff,0xf7,0xff,0xf7,0xff,0xf7,0xff,0xf7});
+ *s8++ = ((vector signed char){0xff,0xf8,0xff,0xf8,0xff,0xf8,0xff,0xf8,0xff,0xf8,0xff,0xf8,0xff,0xf8,0xff,0xf8});
+ *s8++ = ((vector signed char){0xff,0xf9,0xff,0xf9,0xff,0xf9,0xff,0xf9,0xff,0xf9,0xff,0xf9,0xff,0xf9,0xff,0xf9});
+ *s8++ = ((vector signed char){0xff,0xfa,0xff,0xfa,0xff,0xfa,0xff,0xfa,0xff,0xfa,0xff,0xfa,0xff,0xfa,0xff,0xfa});
+ *s8++ = ((vector signed char){0xff,0xfb,0xff,0xfb,0xff,0xfb,0xff,0xfb,0xff,0xfb,0xff,0xfb,0xff,0xfb,0xff,0xfb});
+ *s8++ = ((vector signed char){0xff,0xfc,0xff,0xfc,0xff,0xfc,0xff,0xfc,0xff,0xfc,0xff,0xfc,0xff,0xfc,0xff,0xfc});
+ *s8++ = ((vector signed char){0xff,0xfd,0xff,0xfd,0xff,0xfd,0xff,0xfd,0xff,0xfd,0xff,0xfd,0xff,0xfd,0xff,0xfd});
+ *s8++ = ((vector signed char){0xff,0xfe,0xff,0xfe,0xff,0xfe,0xff,0xfe,0xff,0xfe,0xff,0xfe,0xff,0xfe,0xff,0xfe});
+ *s8++ = ((vector signed char){0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff});
+}
+void fs8d(vector signed char *s8)
+{
+ *s8++ = ((vector signed char){0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00});
+ *s8++ = ((vector signed char){0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01});
+ *s8++ = ((vector signed char){0x00,0x02,0x00,0x02,0x00,0x02,0x00,0x02,0x00,0x02,0x00,0x02,0x00,0x02,0x00,0x02});
+ *s8++ = ((vector signed char){0x00,0x03,0x00,0x03,0x00,0x03,0x00,0x03,0x00,0x03,0x00,0x03,0x00,0x03,0x00,0x03});
+ *s8++ = ((vector signed char){0x00,0x04,0x00,0x04,0x00,0x04,0x00,0x04,0x00,0x04,0x00,0x04,0x00,0x04,0x00,0x04});
+ *s8++ = ((vector signed char){0x00,0x05,0x00,0x05,0x00,0x05,0x00,0x05,0x00,0x05,0x00,0x05,0x00,0x05,0x00,0x05});
+ *s8++ = ((vector signed char){0x00,0x06,0x00,0x06,0x00,0x06,0x00,0x06,0x00,0x06,0x00,0x06,0x00,0x06,0x00,0x06});
+ *s8++ = ((vector signed char){0x00,0x07,0x00,0x07,0x00,0x07,0x00,0x07,0x00,0x07,0x00,0x07,0x00,0x07,0x00,0x07});
+ *s8++ = ((vector signed char){0x00,0x08,0x00,0x08,0x00,0x08,0x00,0x08,0x00,0x08,0x00,0x08,0x00,0x08,0x00,0x08});
+ *s8++ = ((vector signed char){0x00,0x09,0x00,0x09,0x00,0x09,0x00,0x09,0x00,0x09,0x00,0x09,0x00,0x09,0x00,0x09});
+ *s8++ = ((vector signed char){0x00,0x0a,0x00,0x0a,0x00,0x0a,0x00,0x0a,0x00,0x0a,0x00,0x0a,0x00,0x0a,0x00,0x0a});
+ *s8++ = ((vector signed char){0x00,0x0b,0x00,0x0b,0x00,0x0b,0x00,0x0b,0x00,0x0b,0x00,0x0b,0x00,0x0b,0x00,0x0b});
+ *s8++ = ((vector signed char){0x00,0x0c,0x00,0x0c,0x00,0x0c,0x00,0x0c,0x00,0x0c,0x00,0x0c,0x00,0x0c,0x00,0x0c});
+ *s8++ = ((vector signed char){0x00,0x0d,0x00,0x0d,0x00,0x0d,0x00,0x0d,0x00,0x0d,0x00,0x0d,0x00,0x0d,0x00,0x0d});
+ *s8++ = ((vector signed char){0x00,0x0e,0x00,0x0e,0x00,0x0e,0x00,0x0e,0x00,0x0e,0x00,0x0e,0x00,0x0e,0x00,0x0e});
+ *s8++ = ((vector signed char){0x00,0x0f,0x00,0x0f,0x00,0x0f,0x00,0x0f,0x00,0x0f,0x00,0x0f,0x00,0x0f,0x00,0x0f});
+}
+void fs8e(vector signed char *s8)
+{
+ *s8++ = ((vector signed char){0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00});
+ *s8++ = ((vector signed char){0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01});
+ *s8++ = ((vector signed char){0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02,0x02});
+ *s8++ = ((vector signed char){0x03,0x03,0x03,0x03,0x03,0x03,0x03,0x03,0x03,0x03,0x03,0x03,0x03,0x03,0x03,0x03});
+ *s8++ = ((vector signed char){0x04,0x04,0x04,0x04,0x04,0x04,0x04,0x04,0x04,0x04,0x04,0x04,0x04,0x04,0x04,0x04});
+ *s8++ = ((vector signed char){0x05,0x05,0x05,0x05,0x05,0x05,0x05,0x05,0x05,0x05,0x05,0x05,0x05,0x05,0x05,0x05});
+ *s8++ = ((vector signed char){0x06,0x06,0x06,0x06,0x06,0x06,0x06,0x06,0x06,0x06,0x06,0x06,0x06,0x06,0x06,0x06});
+ *s8++ = ((vector signed char){0x07,0x07,0x07,0x07,0x07,0x07,0x07,0x07,0x07,0x07,0x07,0x07,0x07,0x07,0x07,0x07});
+ *s8++ = ((vector signed char){0x08,0x08,0x08,0x08,0x08,0x08,0x08,0x08,0x08,0x08,0x08,0x08,0x08,0x08,0x08,0x08});
+ *s8++ = ((vector signed char){0x09,0x09,0x09,0x09,0x09,0x09,0x09,0x09,0x09,0x09,0x09,0x09,0x09,0x09,0x09,0x09});
+ *s8++ = ((vector signed char){0x0a,0x0a,0x0a,0x0a,0x0a,0x0a,0x0a,0x0a,0x0a,0x0a,0x0a,0x0a,0x0a,0x0a,0x0a,0x0a});
+ *s8++ = ((vector signed char){0x0b,0x0b,0x0b,0x0b,0x0b,0x0b,0x0b,0x0b,0x0b,0x0b,0x0b,0x0b,0x0b,0x0b,0x0b,0x0b});
+ *s8++ = ((vector signed char){0x0c,0x0c,0x0c,0x0c,0x0c,0x0c,0x0c,0x0c,0x0c,0x0c,0x0c,0x0c,0x0c,0x0c,0x0c,0x0c});
+ *s8++ = ((vector signed char){0x0d,0x0d,0x0d,0x0d,0x0d,0x0d,0x0d,0x0d,0x0d,0x0d,0x0d,0x0d,0x0d,0x0d,0x0d,0x0d});
+ *s8++ = ((vector signed char){0x0e,0x0e,0x0e,0x0e,0x0e,0x0e,0x0e,0x0e,0x0e,0x0e,0x0e,0x0e,0x0e,0x0e,0x0e,0x0e});
+ *s8++ = ((vector signed char){0x0f,0x0f,0x0f,0x0f,0x0f,0x0f,0x0f,0x0f,0x0f,0x0f,0x0f,0x0f,0x0f,0x0f,0x0f,0x0f});
+}
+void fs8f(vector signed char *s8)
+{
+ *s8++ = ((vector signed char){0xf0,0xf0,0xf0,0xf0,0xf0,0xf0,0xf0,0xf0,0xf0,0xf0,0xf0,0xf0,0xf0,0xf0,0xf0,0xf0});
+ *s8++ = ((vector signed char){0xf1,0xf1,0xf1,0xf1,0xf1,0xf1,0xf1,0xf1,0xf1,0xf1,0xf1,0xf1,0xf1,0xf1,0xf1,0xf1});
+ *s8++ = ((vector signed char){0xf2,0xf2,0xf2,0xf2,0xf2,0xf2,0xf2,0xf2,0xf2,0xf2,0xf2,0xf2,0xf2,0xf2,0xf2,0xf2});
+ *s8++ = ((vector signed char){0xf3,0xf3,0xf3,0xf3,0xf3,0xf3,0xf3,0xf3,0xf3,0xf3,0xf3,0xf3,0xf3,0xf3,0xf3,0xf3});
+ *s8++ = ((vector signed char){0xf4,0xf4,0xf4,0xf4,0xf4,0xf4,0xf4,0xf4,0xf4,0xf4,0xf4,0xf4,0xf4,0xf4,0xf4,0xf4});
+ *s8++ = ((vector signed char){0xf5,0xf5,0xf5,0xf5,0xf5,0xf5,0xf5,0xf5,0xf5,0xf5,0xf5,0xf5,0xf5,0xf5,0xf5,0xf5});
+ *s8++ = ((vector signed char){0xf6,0xf6,0xf6,0xf6,0xf6,0xf6,0xf6,0xf6,0xf6,0xf6,0xf6,0xf6,0xf6,0xf6,0xf6,0xf6});
+ *s8++ = ((vector signed char){0xf7,0xf7,0xf7,0xf7,0xf7,0xf7,0xf7,0xf7,0xf7,0xf7,0xf7,0xf7,0xf7,0xf7,0xf7,0xf7});
+ *s8++ = ((vector signed char){0xf8,0xf8,0xf8,0xf8,0xf8,0xf8,0xf8,0xf8,0xf8,0xf8,0xf8,0xf8,0xf8,0xf8,0xf8,0xf8});
+ *s8++ = ((vector signed char){0xf9,0xf9,0xf9,0xf9,0xf9,0xf9,0xf9,0xf9,0xf9,0xf9,0xf9,0xf9,0xf9,0xf9,0xf9,0xf9});
+ *s8++ = ((vector signed char){0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa});
+ *s8++ = ((vector signed char){0xfb,0xfb,0xfb,0xfb,0xfb,0xfb,0xfb,0xfb,0xfb,0xfb,0xfb,0xfb,0xfb,0xfb,0xfb,0xfb});
+ *s8++ = ((vector signed char){0xfc,0xfc,0xfc,0xfc,0xfc,0xfc,0xfc,0xfc,0xfc,0xfc,0xfc,0xfc,0xfc,0xfc,0xfc,0xfc});
+ *s8++ = ((vector signed char){0xfd,0xfd,0xfd,0xfd,0xfd,0xfd,0xfd,0xfd,0xfd,0xfd,0xfd,0xfd,0xfd,0xfd,0xfd,0xfd});
+ *s8++ = ((vector signed char){0xfe,0xfe,0xfe,0xfe,0xfe,0xfe,0xfe,0xfe,0xfe,0xfe,0xfe,0xfe,0xfe,0xfe,0xfe,0xfe});
+ *s8++ = ((vector signed char){0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff});
+}
+void fs8g(vector signed char *s8)
+{
+ *s8++ = ((vector signed char){0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0});
+ *s8++ = ((vector signed char){1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1});
+ *s8++ = ((vector signed char){2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2});
+ *s8++ = ((vector signed char){3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3});
+ *s8++ = ((vector signed char){4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4});
+ *s8++ = ((vector signed char){5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5});
+ *s8++ = ((vector signed char){6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6});
+ *s8++ = ((vector signed char){7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7});
+ *s8++ = ((vector signed char){8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8});
+ *s8++ = ((vector signed char){9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9});
+ *s8++ = ((vector signed char){10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10});
+ *s8++ = ((vector signed char){11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11});
+ *s8++ = ((vector signed char){12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12});
+ *s8++ = ((vector signed char){13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13});
+ *s8++ = ((vector signed char){14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14});
+ *s8++ = ((vector signed char){15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15});
+}
+void fs8h(vector signed char *s8)
+{
+ *s8++ = ((vector signed char){240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240});
+ *s8++ = ((vector signed char){241,241,241,241,241,241,241,241,241,241,241,241,241,241,241,241});
+ *s8++ = ((vector signed char){242,242,242,242,242,242,242,242,242,242,242,242,242,242,242,242});
+ *s8++ = ((vector signed char){243,243,243,243,243,243,243,243,243,243,243,243,243,243,243,243});
+ *s8++ = ((vector signed char){244,244,244,244,244,244,244,244,244,244,244,244,244,244,244,244});
+ *s8++ = ((vector signed char){245,245,245,245,245,245,245,245,245,245,245,245,245,245,245,245});
+ *s8++ = ((vector signed char){246,246,246,246,246,246,246,246,246,246,246,246,246,246,246,246});
+ *s8++ = ((vector signed char){247,247,247,247,247,247,247,247,247,247,247,247,247,247,247,247});
+ *s8++ = ((vector signed char){248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248});
+ *s8++ = ((vector signed char){249,249,249,249,249,249,249,249,249,249,249,249,249,249,249,249});
+ *s8++ = ((vector signed char){250,250,250,250,250,250,250,250,250,250,250,250,250,250,250,250});
+ *s8++ = ((vector signed char){251,251,251,251,251,251,251,251,251,251,251,251,251,251,251,251});
+ *s8++ = ((vector signed char){252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252});
+ *s8++ = ((vector signed char){253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253});
+ *s8++ = ((vector signed char){254,254,254,254,254,254,254,254,254,254,254,254,254,254,254,254});
+ *s8++ = ((vector signed char){255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255});
+}
+void fs8i(vector signed char *s8)
+{
+ *s8++ = ((vector signed char){-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1});
+ *s8++ = ((vector signed char){-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2});
+ *s8++ = ((vector signed char){-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3});
+ *s8++ = ((vector signed char){-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4});
+ *s8++ = ((vector signed char){-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5});
+ *s8++ = ((vector signed char){-6,-6,-6,-6,-6,-6,-6,-6,-6,-6,-6,-6,-6,-6,-6,-6});
+ *s8++ = ((vector signed char){-7,-7,-7,-7,-7,-7,-7,-7,-7,-7,-7,-7,-7,-7,-7,-7});
+ *s8++ = ((vector signed char){-8,-8,-8,-8,-8,-8,-8,-8,-8,-8,-8,-8,-8,-8,-8,-8});
+ *s8++ = ((vector signed char){-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9});
+ *s8++ = ((vector signed char){-10,-10,-10,-10,-10,-10,-10,-10,-10,-10,-10,-10,-10,-10,-10,-10});
+ *s8++ = ((vector signed char){-11,-11,-11,-11,-11,-11,-11,-11,-11,-11,-11,-11,-11,-11,-11,-11});
+ *s8++ = ((vector signed char){-12,-12,-12,-12,-12,-12,-12,-12,-12,-12,-12,-12,-12,-12,-12,-12});
+ *s8++ = ((vector signed char){-13,-13,-13,-13,-13,-13,-13,-13,-13,-13,-13,-13,-13,-13,-13,-13});
+ *s8++ = ((vector signed char){-14,-14,-14,-14,-14,-14,-14,-14,-14,-14,-14,-14,-14,-14,-14,-14});
+ *s8++ = ((vector signed char){-15,-15,-15,-15,-15,-15,-15,-15,-15,-15,-15,-15,-15,-15,-15,-15});
+ *s8++ = ((vector signed char){-16,-16,-16,-16,-16,-16,-16,-16,-16,-16,-16,-16,-16,-16,-16,-16});
+}
diff --git a/gcc/testsuite/gcc.dg/vmx/5-11.c b/gcc/testsuite/gcc.dg/vmx/5-11.c
new file mode 100644
index 0000000000..7d9b3975f5
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/5-11.c
@@ -0,0 +1,289 @@
+/* { dg-do compile } */
+#include <altivec.h>
+extern void fu32(vector unsigned int, vector unsigned int,
+ vector unsigned int, vector unsigned int);
+void fxu32(vector unsigned int u32a, vector unsigned int u32b,
+ vector unsigned int u32c, vector unsigned int u32d)
+{
+ fu32 (u32a,
+ u32b,
+ u32c,
+ vec_avg(u32d, u32d));
+ fu32 (vec_or (u32a, u32a),
+ vec_and (u32b, u32b),
+ vec_max (u32c, u32c),
+ vec_min (u32d, u32d));
+ fu32 (vec_sld (u32a, u32a, 0),
+ vec_sld (u32b, u32b, 0),
+ vec_sld (u32c, u32c, 0),
+ vec_sld (u32d, u32d, 0));
+ fu32 (((vector unsigned int){0,0,0,0}),
+ ((vector unsigned int){0,0,0,0}),
+ ((vector unsigned int){0,0,0,0}),
+ ((vector unsigned int){0,0,0,0}));
+ fu32 (vec_xor(u32a, u32a),
+ vec_andc(u32b, u32b),
+ vec_sub(u32c, u32c),
+ vec_subs(u32d, u32d));
+ fu32 (vec_splat_u32(0),
+ vec_splat_u32(0),
+ vec_splat_u32(0),
+ vec_splat_u32(0));
+ fu32 (((vector unsigned int){0xffffffff,0xffffffff,0xffffffff,0xffffffff}),
+ ((vector unsigned int){0xffffffff,0xffffffff,0xffffffff,0xffffffff}),
+ ((vector unsigned int){0xffffffff,0xffffffff,0xffffffff,0xffffffff}),
+ ((vector unsigned int){0xffffffff,0xffffffff,0xffffffff,0xffffffff}));
+ fu32 (vec_splat_u32(-1),
+ vec_splat_u32(-1),
+ vec_splat_u32(-1),
+ vec_splat_u32(-1));
+ fu32 ((vector unsigned int)vec_cmpeq(u32a, u32a),
+ (vector unsigned int)vec_cmpeq(u32b, u32b),
+ (vector unsigned int)vec_cmpeq(u32c, u32c),
+ (vector unsigned int)vec_cmpeq(u32d, u32d));
+}
+
+extern void fu16(vector unsigned short, vector unsigned short,
+ vector unsigned short, vector unsigned short);
+void fxu16(vector unsigned short u16a, vector unsigned short u16b,
+ vector unsigned short u16c, vector unsigned short u16d)
+{
+ fu16 (u16a,
+ u16b,
+ u16c,
+ vec_avg(u16d, u16d));
+ fu16 (vec_or (u16a, u16a),
+ vec_and (u16b, u16b),
+ vec_max (u16c, u16c),
+ vec_min (u16d, u16d));
+ fu16 (vec_sld (u16a, u16a, 0),
+ vec_sld (u16b, u16b, 0),
+ vec_sld (u16c, u16c, 0),
+ vec_sld (u16d, u16d, 0));
+ fu16 (((vector unsigned short){0,0,0,0,0,0,0,0}),
+ ((vector unsigned short){0,0,0,0,0,0,0,0}),
+ ((vector unsigned short){0,0,0,0,0,0,0,0}),
+ ((vector unsigned short){0,0,0,0,0,0,0,0}));
+ fu16 (vec_xor(u16a, u16a),
+ vec_andc(u16b, u16b),
+ vec_sub(u16c, u16c),
+ vec_subs(u16d, u16d));
+ fu16 (vec_splat_u16(0),
+ vec_splat_u16(0),
+ vec_splat_u16(0),
+ vec_splat_u16(0));
+ fu16 (((vector unsigned short){0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff}),
+ ((vector unsigned short){0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff}),
+ ((vector unsigned short){0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff}),
+ ((vector unsigned short){0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff}));
+ fu16 (vec_splat_u16(-1),
+ vec_splat_u16(-1),
+ vec_splat_u16(-1),
+ vec_splat_u16(-1));
+ fu16 ((vector unsigned short)vec_cmpeq(u16a, u16a),
+ (vector unsigned short)vec_cmpeq(u16b, u16b),
+ (vector unsigned short)vec_cmpeq(u16c, u16c),
+ (vector unsigned short)vec_cmpeq(u16d, u16d));
+}
+
+extern void fu8(vector unsigned char, vector unsigned char,
+ vector unsigned char, vector unsigned char);
+void fxu8(vector unsigned char u8a, vector unsigned char u8b,
+ vector unsigned char u8c, vector unsigned char u8d)
+{
+ fu8 (u8a,
+ u8b,
+ u8c,
+ vec_avg(u8d, u8d));
+ fu8 (vec_or (u8a, u8a),
+ vec_and (u8b, u8b),
+ vec_max (u8c, u8c),
+ vec_min (u8d, u8d));
+ fu8 (vec_sld (u8a, u8a, 0),
+ vec_sld (u8b, u8b, 0),
+ vec_sld (u8c, u8c, 0),
+ vec_sld (u8d, u8d, 0));
+ fu8 (((vector unsigned char){0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}),
+ ((vector unsigned char){0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}),
+ ((vector unsigned char){0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}),
+ ((vector unsigned char){0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}));
+ fu8 (vec_xor(u8a, u8a),
+ vec_andc(u8b, u8b),
+ vec_sub(u8c, u8c),
+ vec_subs(u8d, u8d));
+ fu8 (vec_splat_u8(0),
+ vec_splat_u8(0),
+ vec_splat_u8(0),
+ vec_splat_u8(0));
+ fu8 (((vector unsigned char){0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff}),
+ ((vector unsigned char){0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff}),
+ ((vector unsigned char){0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff}),
+ ((vector unsigned char){0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff}));
+ fu8 (vec_splat_u8(-1),
+ vec_splat_u8(-1),
+ vec_splat_u8(-1),
+ vec_splat_u8(-1));
+ fu8 ((vector unsigned char)vec_cmpeq(u8a, u8a),
+ (vector unsigned char)vec_cmpeq(u8b, u8b),
+ (vector unsigned char)vec_cmpeq(u8c, u8c),
+ (vector unsigned char)vec_cmpeq(u8d, u8d));
+}
+
+extern void fs32(vector signed int, vector signed int,
+ vector signed int, vector signed int);
+void fxs32(vector signed int s32a, vector signed int s32b,
+ vector signed int s32c, vector signed int s32d)
+{
+ fs32 (s32a,
+ s32b,
+ s32c,
+ vec_avg(s32d, s32d));
+ fs32 (vec_or (s32a, s32a),
+ vec_and (s32b, s32b),
+ vec_max (s32c, s32c),
+ vec_min (s32d, s32d));
+ fs32 (vec_sld (s32a, s32a, 0),
+ vec_sld (s32b, s32b, 0),
+ vec_sld (s32c, s32c, 0),
+ vec_sld (s32d, s32d, 0));
+ fs32 (((vector signed int){0,0,0,0}),
+ ((vector signed int){0,0,0,0}),
+ ((vector signed int){0,0,0,0}),
+ ((vector signed int){0,0,0,0}));
+ fs32 (vec_xor(s32a, s32a),
+ vec_andc(s32b, s32b),
+ vec_sub(s32c, s32c),
+ vec_subs(s32d, s32d));
+ fs32 (vec_splat_s32(0),
+ vec_splat_s32(0),
+ vec_splat_s32(0),
+ vec_splat_s32(0));
+ fs32 (((vector signed int){-1,-1,-1,-1}),
+ ((vector signed int){-1,-1,-1,-1}),
+ ((vector signed int){-1,-1,-1,-1}),
+ ((vector signed int){-1,-1,-1,-1}));
+ fs32 (vec_splat_s32(-1),
+ vec_splat_s32(-1),
+ vec_splat_s32(-1),
+ vec_splat_s32(-1));
+ fs32 ((vector signed int)vec_cmpeq(s32a, s32a),
+ (vector signed int)vec_cmpeq(s32b, s32b),
+ (vector signed int)vec_cmpeq(s32c, s32c),
+ (vector signed int)vec_cmpeq(s32d, s32d));
+}
+
+extern void fs16(vector signed short, vector signed short,
+ vector signed short, vector signed short);
+void fxs16(vector signed short s16a, vector signed short s16b,
+ vector signed short s16c, vector signed short s16d)
+{
+ fs16 (s16a,
+ s16b,
+ s16c,
+ vec_avg(s16d, s16d));
+ fs16 (vec_or (s16a, s16a),
+ vec_and (s16b, s16b),
+ vec_max (s16c, s16c),
+ vec_min (s16d, s16d));
+ fs16 (vec_sld (s16a, s16a, 0),
+ vec_sld (s16b, s16b, 0),
+ vec_sld (s16c, s16c, 0),
+ vec_sld (s16d, s16d, 0));
+ fs16 (((vector signed short){0,0,0,0,0,0,0,0}),
+ ((vector signed short){0,0,0,0,0,0,0,0}),
+ ((vector signed short){0,0,0,0,0,0,0,0}),
+ ((vector signed short){0,0,0,0,0,0,0,0}));
+ fs16 (vec_xor(s16a, s16a),
+ vec_andc(s16b, s16b),
+ vec_sub(s16c, s16c),
+ vec_subs(s16d, s16d));
+ fs16 (vec_splat_s16(0),
+ vec_splat_s16(0),
+ vec_splat_s16(0),
+ vec_splat_s16(0));
+ fs16 (((vector signed short){-1,-1,-1,-1,-1,-1,-1,-1}),
+ ((vector signed short){-1,-1,-1,-1,-1,-1,-1,-1}),
+ ((vector signed short){-1,-1,-1,-1,-1,-1,-1,-1}),
+ ((vector signed short){-1,-1,-1,-1,-1,-1,-1,-1}));
+ fs16 (vec_splat_s16(-1),
+ vec_splat_s16(-1),
+ vec_splat_s16(-1),
+ vec_splat_s16(-1));
+ fs16 ((vector signed short)vec_cmpeq(s16a, s16a),
+ (vector signed short)vec_cmpeq(s16b, s16b),
+ (vector signed short)vec_cmpeq(s16c, s16c),
+ (vector signed short)vec_cmpeq(s16d, s16d));
+}
+
+extern void fs8(vector signed char, vector signed char,
+ vector signed char, vector signed char);
+void fxs8(vector signed char s8a, vector signed char s8b,
+ vector signed char s8c, vector signed char s8d)
+{
+ fs8 (s8a,
+ s8b,
+ s8c,
+ vec_avg(s8d, s8d));
+ fs8 (vec_or (s8a, s8a),
+ vec_and (s8b, s8b),
+ vec_max (s8c, s8c),
+ vec_min (s8d, s8d));
+ fs8 (vec_sld (s8a, s8a, 0),
+ vec_sld (s8b, s8b, 0),
+ vec_sld (s8c, s8c, 0),
+ vec_sld (s8d, s8d, 0));
+ fs8 (((vector signed char){0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}),
+ ((vector signed char){0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}),
+ ((vector signed char){0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}),
+ ((vector signed char){0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}));
+ fs8 (vec_xor(s8a, s8a),
+ vec_andc(s8b, s8b),
+ vec_sub(s8c, s8c),
+ vec_subs(s8d, s8d));
+ fs8 (vec_splat_s8(0),
+ vec_splat_s8(0),
+ vec_splat_s8(0),
+ vec_splat_s8(0));
+ fs8 (((vector signed char){-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1}),
+ ((vector signed char){-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1}),
+ ((vector signed char){-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1}),
+ ((vector signed char){-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1}));
+ fs8 (vec_splat_s8(-1),
+ vec_splat_s8(-1),
+ vec_splat_s8(-1),
+ vec_splat_s8(-1));
+ fs8 ((vector signed char)vec_cmpeq(s8a, s8a),
+ (vector signed char)vec_cmpeq(s8b, s8b),
+ (vector signed char)vec_cmpeq(s8c, s8c),
+ (vector signed char)vec_cmpeq(s8d, s8d));
+}
+
+void fu32(vector unsigned int a, vector unsigned int b,
+ vector unsigned int c, vector unsigned int d)
+{
+}
+
+void fu16(vector unsigned short a, vector unsigned short b,
+ vector unsigned short c, vector unsigned short d)
+{
+}
+
+void fu8(vector unsigned char a, vector unsigned char b,
+ vector unsigned char c, vector unsigned char d)
+{
+}
+
+void fs32(vector signed int a, vector signed int b,
+ vector signed int c, vector signed int d)
+{
+}
+
+void fs16(vector signed short a, vector signed short b,
+ vector signed short c, vector signed short d)
+{
+}
+
+void fs8(vector signed char a, vector signed char b,
+ vector signed char c, vector signed char d)
+{
+}
diff --git a/gcc/testsuite/gcc.dg/vmx/7-01.c b/gcc/testsuite/gcc.dg/vmx/7-01.c
new file mode 100644
index 0000000000..f986d8fe5b
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/7-01.c
@@ -0,0 +1,36 @@
+/* { dg-do compile } */
+#include <altivec.h>
+extern vector signed short image[];
+extern vector signed short band[];
+
+#define load(a,b) (a[b])
+#define store(v,a,b) (a[b]) = (v)
+
+void
+haar (vector signed char a, vector signed char b, vector signed char c,
+ vector signed char d, unsigned int N, int XX)
+{
+ unsigned int i;
+ vector unsigned char high, low;
+ vector signed int zero = ((vector signed int){0,0,0,0});
+
+ for (i = 0; i < N; i++) {
+ high = (vector unsigned char) (vec_vmrghh (load(image, i+XX),
+ load(image, i)));
+ low = (vector unsigned char) (vec_vmrglh (load(image, i+XX),
+ load(image, i)));
+
+ store (vec_vpkswss (vec_vmsummbm (a, high, zero),
+ vec_vmsummbm (a, low, zero)),
+ band, i);
+ store (vec_vpkswss (vec_vmsummbm (b, high, zero),
+ vec_vmsummbm (b, low, zero)),
+ band, i+1);
+ store(vec_vpkswss (vec_vmsummbm (c, high, zero),
+ vec_vmsummbm (c, low, zero)),
+ band, i+2);
+ store(vec_vpkswss (vec_vmsummbm (d, high, zero),
+ vec_vmsummbm (d, low, zero)),
+ band, i+3);
+ }
+}
diff --git a/gcc/testsuite/gcc.dg/vmx/7-01a.c b/gcc/testsuite/gcc.dg/vmx/7-01a.c
new file mode 100644
index 0000000000..c09835b437
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/7-01a.c
@@ -0,0 +1,36 @@
+/* { dg-do compile } */
+#include <altivec.h>
+extern vector signed short image[];
+extern vector signed short band[];
+
+#define load(a,b) vec_ld((b)*16, a)
+#define store(v,a,b) vec_st(v,(b)*16,a)
+
+void
+haar (vector signed char a, vector signed char b, vector signed char c,
+ vector signed char d, unsigned int N, int XX)
+{
+ unsigned int i;
+ vector unsigned char high, low;
+ vector signed int zero = ((vector signed int){0,0,0,0});
+
+ for (i = 0; i < N; i++) {
+ high = (vector unsigned char) (vec_vmrghh (load(image, i+XX),
+ load(image, i)));
+ low = (vector unsigned char) (vec_vmrglh (load(image, i+XX),
+ load(image, i)));
+
+ store (vec_vpkswss (vec_vmsummbm (a, high, zero),
+ vec_vmsummbm (a, low, zero)),
+ band, i);
+ store (vec_vpkswss (vec_vmsummbm (b, high, zero),
+ vec_vmsummbm (b, low, zero)),
+ band, i+1);
+ store(vec_vpkswss (vec_vmsummbm (c, high, zero),
+ vec_vmsummbm (c, low, zero)),
+ band, i+2);
+ store(vec_vpkswss (vec_vmsummbm (d, high, zero),
+ vec_vmsummbm (d, low, zero)),
+ band, i+3);
+ }
+}
diff --git a/gcc/testsuite/gcc.dg/vmx/7c-01.c b/gcc/testsuite/gcc.dg/vmx/7c-01.c
new file mode 100644
index 0000000000..68e07aa305
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/7c-01.c
@@ -0,0 +1,11 @@
+/* { dg-do compile } */
+#include <altivec.h>
+vector float
+f(int i)
+{
+ switch (i) {
+ case 0:
+ return (vector float)(((vector unsigned char){3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3}));
+ }
+ return ((vector float){0,0,0,0});
+}
diff --git a/gcc/testsuite/gcc.dg/vmx/7d-01.c b/gcc/testsuite/gcc.dg/vmx/7d-01.c
new file mode 100644
index 0000000000..dac5151add
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/7d-01.c
@@ -0,0 +1,19 @@
+/* { dg-do compile } */
+#include <altivec.h>
+extern vector unsigned char a[];
+
+vector unsigned char
+f(vector unsigned char *p, int i, int b)
+{
+ if (b)
+ return p[i];
+ return vec_ld(i*16,p);
+}
+
+vector unsigned char
+g(int i, int b)
+{
+ if (b)
+ return a[i];
+ return vec_ld(i*16,a);
+}
diff --git a/gcc/testsuite/gcc.dg/vmx/7d-02.c b/gcc/testsuite/gcc.dg/vmx/7d-02.c
new file mode 100644
index 0000000000..6294cc99ef
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/7d-02.c
@@ -0,0 +1,21 @@
+/* { dg-do compile } */
+#include <altivec.h>
+extern vector unsigned char a[];
+
+void f
+(vector unsigned char v, vector unsigned char *p, int i, int b)
+{
+ if (b)
+ p[i] = v;
+ else
+ vec_st(v, i*16,p);
+}
+
+void g
+(vector unsigned char v, int i, int b)
+{
+ if (b)
+ a[i] = v;
+ else
+ vec_st(v,i*16,a);
+}
diff --git a/gcc/testsuite/gcc.dg/vmx/8-01.c b/gcc/testsuite/gcc.dg/vmx/8-01.c
new file mode 100644
index 0000000000..d65815e3a6
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/8-01.c
@@ -0,0 +1,4 @@
+/* { dg-do compile } */
+#include <altivec.h>
+
+int i = vec_step(vector unsigned short);
diff --git a/gcc/testsuite/gcc.dg/vmx/8-02.c b/gcc/testsuite/gcc.dg/vmx/8-02.c
new file mode 100644
index 0000000000..72a4a907ad
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/8-02.c
@@ -0,0 +1,299 @@
+#include "harness.h"
+
+extern vector unsigned char u8;
+extern vector signed char s8;
+extern vector bool char b8;
+extern vector unsigned short u16;
+extern vector signed short s16;
+extern vector bool short b16;
+extern vector unsigned int u32;
+extern vector signed int s32;
+extern vector bool int b32;
+extern vector float f32;
+extern vector pixel p16;
+extern vector unsigned char const u8c;
+extern vector signed char const s8c;
+extern vector bool char const b8c;
+extern vector unsigned short const u16c;
+extern vector signed short const s16c;
+extern vector bool short const b16c;
+extern vector unsigned int const u32c;
+extern vector signed int const s32c;
+extern vector bool int const b32c;
+extern vector float const f32c;
+extern vector pixel const p16c;
+extern vector unsigned char volatile u8v;
+extern vector signed char volatile s8v;
+extern vector bool char volatile b8v;
+extern vector unsigned short volatile u16v;
+extern vector signed short volatile s16v;
+extern vector bool short volatile b16v;
+extern vector unsigned int volatile u32v;
+extern vector signed int volatile s32v;
+extern vector bool int volatile b32v;
+extern vector float volatile f32v;
+extern vector pixel volatile p16v;
+extern const vector unsigned char u8c_;
+extern const vector signed char s8c_;
+extern const vector bool char b8c_;
+extern const vector unsigned short u16c_;
+extern const vector signed short s16c_;
+extern const vector bool short b16c_;
+extern const vector unsigned int u32c_;
+extern const vector signed int s32c_;
+extern const vector bool int b32c_;
+extern const vector float f32c_;
+extern const vector pixel p16c_;
+extern volatile vector unsigned char u8v_;
+extern volatile vector signed char s8v_;
+extern volatile vector bool char b8v_;
+extern volatile vector unsigned short u16v_;
+extern volatile vector signed short s16v_;
+extern volatile vector bool short b16v_;
+extern volatile vector unsigned int u32v_;
+extern volatile vector signed int s32v_;
+extern volatile vector bool int b32v_;
+extern volatile vector float f32v_;
+extern volatile vector pixel p16v_;
+int i_u8 = vec_step(u8);
+int i_s8 = vec_step(s8);
+int i_b8 = vec_step(b8);
+int i_u16 = vec_step(u16);
+int i_s16 = vec_step(s16);
+int i_b16 = vec_step(b16);
+int i_u32 = vec_step(u32);
+int i_s32 = vec_step(s32);
+int i_b32 = vec_step(b32);
+int i_f32 = vec_step(f32);
+int i_p16 = vec_step(p16);
+int i_u8c = vec_step(u8c);
+int i_s8c = vec_step(s8c);
+int i_b8c = vec_step(b8c);
+int i_u16c = vec_step(u16c);
+int i_s16c = vec_step(s16c);
+int i_b16c = vec_step(b16c);
+int i_u32c = vec_step(u32c);
+int i_s32c = vec_step(s32c);
+int i_b32c = vec_step(b32c);
+int i_f32c = vec_step(f32c);
+int i_p16c = vec_step(p16c);
+int i_u8v = vec_step(u8v);
+int i_s8v = vec_step(s8v);
+int i_b8v = vec_step(b8v);
+int i_u16v = vec_step(u16v);
+int i_s16v = vec_step(s16v);
+int i_b16v = vec_step(b16v);
+int i_u32v = vec_step(u32v);
+int i_s32v = vec_step(s32v);
+int i_b32v = vec_step(b32v);
+int i_f32v = vec_step(f32v);
+int i_p16v = vec_step(p16v);
+int i_u8c_ = vec_step(u8c_);
+int i_s8c_ = vec_step(s8c_);
+int i_b8c_ = vec_step(b8c_);
+int i_u16c_ = vec_step(u16c_);
+int i_s16c_ = vec_step(s16c_);
+int i_b16c_ = vec_step(b16c_);
+int i_u32c_ = vec_step(u32c_);
+int i_s32c_ = vec_step(s32c_);
+int i_b32c_ = vec_step(b32c_);
+int i_f32c_ = vec_step(f32c_);
+int i_p16c_ = vec_step(p16c_);
+int i_u8v_ = vec_step(u8v_);
+int i_s8v_ = vec_step(s8v_);
+int i_b8v_ = vec_step(b8v_);
+int i_u16v_ = vec_step(u16v_);
+int i_s16v_ = vec_step(s16v_);
+int i_b16v_ = vec_step(b16v_);
+int i_u32v_ = vec_step(u32v_);
+int i_s32v_ = vec_step(s32v_);
+int i_b32v_ = vec_step(b32v_);
+int i_f32v_ = vec_step(f32v_);
+int i_p16v_ = vec_step(p16v_);
+int j_u8 = vec_step(vector unsigned char);
+int j_s8 = vec_step(vector signed char);
+int j_b8 = vec_step(vector bool char);
+int j_u16 = vec_step(vector unsigned short);
+int j_s16 = vec_step(vector signed short);
+int j_b16 = vec_step(vector bool short);
+int j_u32 = vec_step(vector unsigned int);
+int j_s32 = vec_step(vector signed int);
+int j_b32 = vec_step(vector bool int);
+int j_f32 = vec_step(vector float);
+int j_p16 = vec_step(vector pixel);
+int j_u8c = vec_step(vector unsigned char const);
+int j_s8c = vec_step(vector signed char const);
+int j_b8c = vec_step(vector bool char const);
+int j_u16c = vec_step(vector unsigned short const);
+int j_s16c = vec_step(vector signed short const);
+int j_b16c = vec_step(vector bool short const);
+int j_u32c = vec_step(vector unsigned int const);
+int j_s32c = vec_step(vector signed int const);
+int j_b32c = vec_step(vector bool int const);
+int j_f32c = vec_step(vector float const);
+int j_p16c = vec_step(vector pixel const);
+int j_u8v = vec_step(vector unsigned char volatile);
+int j_s8v = vec_step(vector signed char volatile);
+int j_b8v = vec_step(vector bool char volatile);
+int j_u16v = vec_step(vector unsigned short volatile);
+int j_s16v = vec_step(vector signed short volatile);
+int j_b16v = vec_step(vector bool short volatile);
+int j_u32v = vec_step(vector unsigned int volatile);
+int j_s32v = vec_step(vector signed int volatile);
+int j_b32v = vec_step(vector bool int volatile);
+int j_f32v = vec_step(vector float volatile);
+int j_p16v = vec_step(vector pixel volatile);
+int j_u8c_ = vec_step(const vector unsigned char);
+int j_s8c_ = vec_step(const vector signed char);
+int j_b8c_ = vec_step(const vector bool char);
+int j_u16c_ = vec_step(const vector unsigned short);
+int j_s16c_ = vec_step(const vector signed short);
+int j_b16c_ = vec_step(const vector bool short);
+int j_u32c_ = vec_step(const vector unsigned int);
+int j_s32c_ = vec_step(const vector signed int);
+int j_b32c_ = vec_step(const vector bool int);
+int j_f32c_ = vec_step(const vector float);
+int j_p16c_ = vec_step(const vector pixel);
+int j_u8v_ = vec_step(volatile vector unsigned char);
+int j_s8v_ = vec_step(volatile vector signed char);
+int j_b8v_ = vec_step(volatile vector bool char);
+int j_u16v_ = vec_step(volatile vector unsigned short);
+int j_s16v_ = vec_step(volatile vector signed short);
+int j_b16v_ = vec_step(volatile vector bool short);
+int j_u32v_ = vec_step(volatile vector unsigned int);
+int j_s32v_ = vec_step(volatile vector signed int);
+int j_b32v_ = vec_step(volatile vector bool int);
+int j_f32v_ = vec_step(volatile vector float);
+int j_p16v_ = vec_step(volatile vector pixel);
+
+static void test()
+{
+ int i_u8 = vec_step(u8);
+ int i_s8 = vec_step(s8);
+ int i_b8 = vec_step(b8);
+ int i_u16 = vec_step(u16);
+ int i_s16 = vec_step(s16);
+ int i_b16 = vec_step(b16);
+ int i_u32 = vec_step(u32);
+ int i_s32 = vec_step(s32);
+ int i_b32 = vec_step(b32);
+ int i_f32 = vec_step(f32);
+ int i_p16 = vec_step(p16);
+ int i_u8c = vec_step(u8c);
+ int i_s8c = vec_step(s8c);
+ int i_b8c = vec_step(b8c);
+ int i_u16c = vec_step(u16c);
+ int i_s16c = vec_step(s16c);
+ int i_b16c = vec_step(b16c);
+ int i_u32c = vec_step(u32c);
+ int i_s32c = vec_step(s32c);
+ int i_b32c = vec_step(b32c);
+ int i_f32c = vec_step(f32c);
+ int i_p16c = vec_step(p16c);
+ int i_u8v = vec_step(u8v);
+ int i_s8v = vec_step(s8v);
+ int i_b8v = vec_step(b8v);
+ int i_u16v = vec_step(u16v);
+ int i_s16v = vec_step(s16v);
+ int i_b16v = vec_step(b16v);
+ int i_u32v = vec_step(u32v);
+ int i_s32v = vec_step(s32v);
+ int i_b32v = vec_step(b32v);
+ int i_f32v = vec_step(f32v);
+ int i_p16v = vec_step(p16v);
+ int i_u8c_ = vec_step(u8c_);
+ int i_s8c_ = vec_step(s8c_);
+ int i_b8c_ = vec_step(b8c_);
+ int i_u16c_ = vec_step(u16c_);
+ int i_s16c_ = vec_step(s16c_);
+ int i_b16c_ = vec_step(b16c_);
+ int i_u32c_ = vec_step(u32c_);
+ int i_s32c_ = vec_step(s32c_);
+ int i_b32c_ = vec_step(b32c_);
+ int i_f32c_ = vec_step(f32c_);
+ int i_p16c_ = vec_step(p16c_);
+ int i_u8v_ = vec_step(u8v_);
+ int i_s8v_ = vec_step(s8v_);
+ int i_b8v_ = vec_step(b8v_);
+ int i_u16v_ = vec_step(u16v_);
+ int i_s16v_ = vec_step(s16v_);
+ int i_b16v_ = vec_step(b16v_);
+ int i_u32v_ = vec_step(u32v_);
+ int i_s32v_ = vec_step(s32v_);
+ int i_b32v_ = vec_step(b32v_);
+ int i_f32v_ = vec_step(f32v_);
+ int i_p16v_ = vec_step(p16v_);
+ int j_u8 = vec_step(vector unsigned char);
+ int j_s8 = vec_step(vector signed char);
+ int j_b8 = vec_step(vector bool char);
+ int j_u16 = vec_step(vector unsigned short);
+ int j_s16 = vec_step(vector signed short);
+ int j_b16 = vec_step(vector bool short);
+ int j_u32 = vec_step(vector unsigned int);
+ int j_s32 = vec_step(vector signed int);
+ int j_b32 = vec_step(vector bool int);
+ int j_f32 = vec_step(vector float);
+ int j_p16 = vec_step(vector pixel);
+ int j_u8c = vec_step(vector unsigned char const);
+ int j_s8c = vec_step(vector signed char const);
+ int j_b8c = vec_step(vector bool char const);
+ int j_u16c = vec_step(vector unsigned short const);
+ int j_s16c = vec_step(vector signed short const);
+ int j_b16c = vec_step(vector bool short const);
+ int j_u32c = vec_step(vector unsigned int const);
+ int j_s32c = vec_step(vector signed int const);
+ int j_b32c = vec_step(vector bool int const);
+ int j_f32c = vec_step(vector float const);
+ int j_p16c = vec_step(vector pixel const);
+ int j_u8v = vec_step(vector unsigned char volatile);
+ int j_s8v = vec_step(vector signed char volatile);
+ int j_b8v = vec_step(vector bool char volatile);
+ int j_u16v = vec_step(vector unsigned short volatile);
+ int j_s16v = vec_step(vector signed short volatile);
+ int j_b16v = vec_step(vector bool short volatile);
+ int j_u32v = vec_step(vector unsigned int volatile);
+ int j_s32v = vec_step(vector signed int volatile);
+ int j_b32v = vec_step(vector bool int volatile);
+ int j_f32v = vec_step(vector float volatile);
+ int j_p16v = vec_step(vector pixel volatile);
+ int j_u8c_ = vec_step(const vector unsigned char);
+ int j_s8c_ = vec_step(const vector signed char);
+ int j_b8c_ = vec_step(const vector bool char);
+ int j_u16c_ = vec_step(const vector unsigned short);
+ int j_s16c_ = vec_step(const vector signed short);
+ int j_b16c_ = vec_step(const vector bool short);
+ int j_u32c_ = vec_step(const vector unsigned int);
+ int j_s32c_ = vec_step(const vector signed int);
+ int j_b32c_ = vec_step(const vector bool int);
+ int j_f32c_ = vec_step(const vector float);
+ int j_p16c_ = vec_step(const vector pixel);
+ int j_u8v_ = vec_step(volatile vector unsigned char);
+ int j_s8v_ = vec_step(volatile vector signed char);
+ int j_b8v_ = vec_step(volatile vector bool char);
+ int j_u16v_ = vec_step(volatile vector unsigned short);
+ int j_s16v_ = vec_step(volatile vector signed short);
+ int j_b16v_ = vec_step(volatile vector bool short);
+ int j_u32v_ = vec_step(volatile vector unsigned int);
+ int j_s32v_ = vec_step(volatile vector signed int);
+ int j_b32v_ = vec_step(volatile vector bool int);
+ int j_f32v_ = vec_step(volatile vector float);
+ int j_p16v_ = vec_step(volatile vector pixel);
+ check((i_u8 + i_s8 + i_b8 + i_u16 + i_s16 + i_b16 + i_u32 + i_s32 +
+ i_b32 + i_f32 + i_p16 + i_u8c + i_s8c + i_b8c + i_u16c +
+ i_s16c + i_b16c + i_u32c + i_s32c + i_b32c + i_f32c + i_p16c
+ + i_u8v + i_s8v + i_b8v + i_u16v + i_s16v + i_b16v + i_u32v
+ + i_s32v + i_b32v + i_f32v + i_p16v + i_u8c_ + i_s8c_ +
+ i_b8c_ + i_u16c_ + i_s16c_ + i_b16c_ + i_u32c_ + i_s32c_ +
+ i_b32c_ + i_f32c_ + i_p16c_ + i_u8v_ + i_s8v_ + i_b8v_ +
+ i_u16v_ + i_s16v_ + i_b16v_ + i_u32v_ + i_s32v_ + i_b32v_ +
+ i_f32v_ + i_p16v_ + j_u8 + j_s8 + j_b8 + j_u16 + j_s16 +
+ j_b16 + j_u32 + j_s32 + j_b32 + j_f32 + j_p16 + j_u8c +
+ j_s8c + j_b8c + j_u16c + j_s16c + j_b16c + j_u32c + j_s32c +
+ j_b32c + j_f32c + j_p16c + j_u8v + j_s8v + j_b8v + j_u16v +
+ j_s16v + j_b16v + j_u32v + j_s32v + j_b32v + j_f32v + j_p16v
+ + j_u8c_ + j_s8c_ + j_b8c_ + j_u16c_ + j_s16c_ + j_b16c_ +
+ j_u32c_ + j_s32c_ + j_b32c_ + j_f32c_ + j_p16c_ + j_u8v_ +
+ j_s8v_ + j_b8v_ + j_u16v_ + j_s16v_ + j_b16v_ + j_u32v_ +
+ j_s32v_ + j_b32v_ + j_f32v_ + j_p16v_) == 960,
+ "vec_step");
+}
diff --git a/gcc/testsuite/gcc.dg/vmx/8-02a.c b/gcc/testsuite/gcc.dg/vmx/8-02a.c
new file mode 100644
index 0000000000..cbea6ca6ba
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/8-02a.c
@@ -0,0 +1,17 @@
+#include "harness.h"
+
+extern vector pixel p16;
+extern vector pixel const p16c;
+extern vector pixel volatile p16v;
+extern const vector pixel p16c_;
+extern volatile vector pixel p16v_;
+
+static void test()
+{
+ int i_p16 = vec_step(p16);
+ int i_p16c = vec_step(p16c);
+ int i_p16v = vec_step(p16v);
+ int i_p16c_ = vec_step(p16c_);
+ int i_p16v_ = vec_step(p16v_);
+ check((i_p16 + i_p16c + i_p16v + i_p16c_ + i_p16v_) == 40, "vec_step");
+}
diff --git a/gcc/testsuite/gcc.dg/vmx/brode-1.c b/gcc/testsuite/gcc.dg/vmx/brode-1.c
new file mode 100644
index 0000000000..234192bac9
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/brode-1.c
@@ -0,0 +1,10 @@
+#include <altivec.h>
+ int main( )
+ {
+ static int a[3][5][7];
+ {
+ vector signed int a4v;
+ a4v = vec_ldl(0, &a[0][0][0]);
+ }
+ return 0;
+ }
diff --git a/gcc/testsuite/gcc.dg/vmx/bug-1.c b/gcc/testsuite/gcc.dg/vmx/bug-1.c
new file mode 100644
index 0000000000..fdf392d5f9
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/bug-1.c
@@ -0,0 +1,39 @@
+#include <stdlib.h>
+#include <stdio.h>
+#include <stddef.h>
+#include <altivec.h>
+
+#define NPAGES 20
+#define NSKIP 10
+static vector float big[NPAGES*4096/16] = { {1,1,1,1} };
+/* NPAGES pages worth. */
+
+static int failed;
+
+static void f(vector float *p)
+{
+ int i = 1;
+ p = (vector float *)(((ptrdiff_t)p + 4095) & ~4095);
+
+ i += NSKIP;
+ p += NSKIP*4096/16;
+
+ while (i < NPAGES)
+ {
+ if (!vec_all_eq(*p,((vector float){0,0,0,0})))
+ {
+ printf("*p isn't zero at 0x%p, page %d\n", p, i);
+ failed++;
+ }
+ i++;
+ p += 4096/16;
+ }
+}
+
+int main(void)
+{
+ f(big);
+ if (failed)
+ abort ();
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.dg/vmx/bug-2.c b/gcc/testsuite/gcc.dg/vmx/bug-2.c
new file mode 100644
index 0000000000..5cdbc9856c
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/bug-2.c
@@ -0,0 +1,24 @@
+#include "harness.h"
+
+static vector unsigned char value =
+ { 0x7c, 0x12, 0x1, 0xd5,
+ 0xc3, 0x99, 0x21, 0xe2,
+ 0x12, 0x57, 0xde, 0x6b,
+ 0x39, 0x66, 0xa8, 0x87 };
+
+void initn_c (int p1, int p2, signed char p3, int p4, double p5 ,
+ vector unsigned char p6, signed char p7)
+{
+ check(p1 == 3, "p1");
+ check(p2 == 4, "p2");
+ check(p3 == 5, "p3");
+ check(p4 == 6, "p4");
+ check(p5 == 1, "p5");
+ check(vec_all_eq(p6, value), "p6");
+ check(p7 == 7, "p7");
+}
+
+void test()
+{
+ initn_c (3, 4, 5, 6, 1, value, 7);
+}
diff --git a/gcc/testsuite/gcc.dg/vmx/bug-3.c b/gcc/testsuite/gcc.dg/vmx/bug-3.c
new file mode 100644
index 0000000000..aacca26676
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/bug-3.c
@@ -0,0 +1,45 @@
+#include "harness.h"
+
+static vector unsigned int value = { 9, 9, 9, 9 };
+
+void varargsC2 (int p1, int p2, int p3, int p4, int p5, int p6, int p7,
+ int p8, vector unsigned int p9, int p10)
+{
+ int i1;
+ int i2;
+ int i3;
+ int i4;
+ int i5;
+ int i6;
+ int i7;
+ int i8;
+ vector unsigned int i9;
+ int i10;
+
+ i1 = p1;
+ i2 = p2;
+ i3 = p3;
+ i4 = p4;
+ i5 = p5;
+ i6 = p6;
+ i7 = p7;
+ i8 = p8;
+ i9 = p9;
+ i10 = p10;
+
+ check(i1 == 1, "i1");
+ check(i2 == 2, "i2");
+ check(i3 == 3, "i3");
+ check(i4 == 4, "i4");
+ check(i5 == 5, "i5");
+ check(i6 == 6, "i6");
+ check(i7 == 7, "i7");
+ check(i8 == 8, "i8");
+ check(vec_all_eq(i9, value), "i9");
+ check(i10 == 10, "i10");
+}
+
+void test()
+{
+ varargsC2 (1, 2, 3, 4, 5, 6, 7, 8, ((vector unsigned int){9,9,9,9}), 10);
+}
diff --git a/gcc/testsuite/gcc.dg/vmx/cw-bug-1.c b/gcc/testsuite/gcc.dg/vmx/cw-bug-1.c
new file mode 100644
index 0000000000..0c3028b512
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/cw-bug-1.c
@@ -0,0 +1,12 @@
+#include <altivec.h>
+#include <stdlib.h>
+
+#define ZERO (((vector unsigned char){0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}))
+
+int main(void)
+{
+ vector unsigned char a = ZERO;
+ if (vec_any_ne(a, ZERO))
+ abort ();
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.dg/vmx/cw-bug-3.c b/gcc/testsuite/gcc.dg/vmx/cw-bug-3.c
new file mode 100644
index 0000000000..511a7e0ae9
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/cw-bug-3.c
@@ -0,0 +1,5 @@
+/* { dg-do compile } */
+#include <altivec.h>
+vector signed short hamming_window[1] = {
+ {2621, 2623, 2629, 2638, 2651, 2668, 2689, 2713}
+};
diff --git a/gcc/testsuite/gcc.dg/vmx/dct.c b/gcc/testsuite/gcc.dg/vmx/dct.c
new file mode 100644
index 0000000000..00c4cd93c7
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/dct.c
@@ -0,0 +1,176 @@
+/* { dg-do compile } */
+#include <altivec.h>
+
+inline void
+transpose_vmx (vector signed short *input, vector signed short *output)
+{
+ vector signed short v0, v1, v2, v3, v4, v5, v6, v7;
+ vector signed short x0, x1, x2, x3, x4, x5, x6, x7;
+
+ /* Matrix transpose */
+ v0 = vec_mergeh (input[0], input[4]);
+ v1 = vec_mergel (input[0], input[4]);
+ v2 = vec_mergeh (input[1], input[5]);
+ v3 = vec_mergel (input[1], input[5]);
+ v4 = vec_mergeh (input[2], input[6]);
+ v5 = vec_mergel (input[2], input[6]);
+ v6 = vec_mergeh (input[3], input[7]);
+ v7 = vec_mergel (input[3], input[7]);
+
+ x0 = vec_mergeh (v0, v4);
+ x1 = vec_mergel (v0, v4);
+ x2 = vec_mergeh (v1, v5);
+ x3 = vec_mergel (v1, v5);
+ x4 = vec_mergeh (v2, v6);
+ x5 = vec_mergel (v2, v6);
+ x6 = vec_mergeh (v3, v7);
+ x7 = vec_mergel (v3, v7);
+
+ output[0] = vec_mergeh (x0, x4);
+ output[1] = vec_mergel (x0, x4);
+ output[2] = vec_mergeh (x1, x5);
+ output[3] = vec_mergel (x1, x5);
+ output[4] = vec_mergeh (x2, x6);
+ output[5] = vec_mergel (x2, x6);
+ output[6] = vec_mergeh (x3, x7);
+ output[7] = vec_mergel (x3, x7);
+}
+
+void
+dct_vmx (vector signed short *input, vector signed short *output,
+ vector signed short *postscale)
+{
+ vector signed short mul0, mul1, mul2, mul3, mul4, mul5, mul6, mul;
+ vector signed short v0, v1, v2, v3, v4, v5, v6, v7, v8, v9;
+ vector signed short v20, v21, v22, v23, v24, v25, v26, v27, v31;
+ int i;
+ vector signed short in[8], out[8];
+
+ /* Load first eight rows of input data */
+
+ /* Load multiplication constants */
+
+ /* Splat multiplication constants */
+ mul0 = vec_splat(input[8],0);
+ mul1 = vec_splat(input[8],1);
+ mul2 = vec_splat(input[8],2);
+ mul3 = vec_splat(input[8],3);
+ mul4 = vec_splat(input[8],4);
+ mul5 = vec_splat(input[8],5);
+ mul6 = vec_splat(input[8],6);
+
+ /* Perform DCT on the eight columns */
+
+ /*********** Stage 1 ***********/
+
+ v8 = vec_adds (input[0], input[7]);
+ v9 = vec_subs (input[0], input[7]);
+ v0 = vec_adds (input[1], input[6]);
+ v7 = vec_subs (input[1], input[6]);
+ v1 = vec_adds (input[2], input[5]);
+ v6 = vec_subs (input[2], input[5]);
+ v2 = vec_adds (input[3], input[4]);
+ v5 = vec_subs (input[3], input[4]);
+
+ /*********** Stage 2 ***********/
+
+ /* Top */
+ v3 = vec_adds (v8, v2); /* (V0+V7) + (V3+V4) */
+ v4 = vec_subs (v8, v2); /* (V0+V7) - (V3+V4) */
+ v2 = vec_adds (v0, v1); /* (V1+V6) + (V2+V5) */
+ v8 = vec_subs (v0, v1); /* (V1+V6) - (V2+V5) */
+
+ /* Bottom */
+ v0 = vec_subs (v7, v6); /* (V1-V6) - (V2-V5) */
+ v1 = vec_adds (v7, v6); /* (V1-V6) + (V2-V5) */
+
+ /*********** Stage 3 ***********/
+
+ /* Top */
+ in[0] = vec_adds (v3, v2); /* y0 = v3 + v2 */
+ in[4] = vec_subs (v3, v2); /* y4 = v3 - v2 */
+ in[2] = vec_mradds (v8, mul2, v4); /* y2 = v8 * a0 + v4 */
+ v6 = vec_mradds (v4, mul2, mul6);
+ in[6] = vec_subs (v6, v8); /* y6 = v4 * a0 - v8 */
+
+ /* Bottom */
+ v6 = vec_mradds (v0, mul0, v5); /* v6 = v0 * (c4) + v5 */
+ v7 = vec_mradds (v0, mul4, v5); /* v7 = v0 * (-c4) + v5 */
+ v2 = vec_mradds (v1, mul4, v9); /* v2 = v1 * (-c4) + v9 */
+ v3 = vec_mradds (v1, mul0, v9); /* v3 = v1 * (c4) + v9 */
+
+ /*********** Stage 4 ***********/
+
+ /* Bottom */
+ in[1] = vec_mradds (v6, mul3, v3); /* y1 = v6 * (a1) + v3 */
+ v23 = vec_mradds (v3, mul3, mul6);
+ in[7] = vec_subs (v23, v6); /* y7 = v3 * (a1) - v6 */
+ in[5] = vec_mradds (v2, mul1, v7); /* y5 = v2 * (a2) + v7 */
+ in[3] = vec_mradds (v7, mul5, v2); /* y3 = v7 * (-a2) + v2 */
+
+ transpose_vmx (in, out);
+
+ /* Perform DCT on the eight rows */
+
+ /*********** Stage 1 ***********/
+
+ v8 = vec_adds (out[0], out[7]);
+ v9 = vec_subs (out[0], out[7]);
+ v0 = vec_adds (out[1], out[6]);
+ v7 = vec_subs (out[1], out[6]);
+ v1 = vec_adds (out[2], out[5]);
+ v6 = vec_subs (out[2], out[5]);
+ v2 = vec_adds (out[3], out[4]);
+ v5 = vec_subs (out[3], out[4]);
+
+ /*********** Stage 2 ***********/
+
+ /* Top */
+ v3 = vec_adds (v8, v2); /* (V0+V7) + (V3+V4) */
+ v4 = vec_subs (v8, v2); /* (V0+V7) - (V3+V4) */
+ v2 = vec_adds (v0, v1); /* (V1+V6) + (V2+V5) */
+ v8 = vec_subs (v0, v1); /* (V1+V6) - (V2+V5) */
+
+ /* Bottom */
+ v0 = vec_subs (v7, v6); /* (V1-V6) - (V2-V5) */
+ v1 = vec_adds (v7, v6); /* (V1-V6) + (V2-V5) */
+
+ /*********** Stage 3 ***********/
+
+ /* Top */
+ v25 = vec_subs (v25, v25); /* reinit v25 = 0 */
+
+ v20 = vec_adds (v3, v2); /* y0 = v3 + v2 */
+ v24 = vec_subs (v3, v2); /* y4 = v3 - v2 */
+ v22 = vec_mradds (v8, mul2, v4); /* y2 = v8 * a0 + v4 */
+ v6 = vec_mradds (v4, mul2, v25);
+ v26 = vec_subs (v6, v8); /* y6 = v4 * a0 - v8 */
+
+ /* Bottom */
+ v6 = vec_mradds (v0, mul0, v5); /* v6 = v0 * (c4) + v5 */
+ v7 = vec_mradds (v0, mul4, v5); /* v7 = v0 * (-c4) + v5 */
+ v2 = vec_mradds (v1, mul4, v9); /* v2 = v1 * (-c4) + v9 */
+ v3 = vec_mradds (v1, mul0, v9); /* v3 = v1 * (c4) + v9 */
+
+ /*********** Stage 4 ***********/
+
+ /* Bottom */
+ v21 = vec_mradds (v6, mul3, v3); /* y1 = v6 * (a1) + v3 */
+ v23 = vec_mradds (v3, mul3, v25);
+ v27 = vec_subs (v23, v6); /* y7 = v3 * (a1) - v6 */
+ v25 = vec_mradds (v2, mul1, v7); /* y5 = v2 * (a2) + v7 */
+ v23 = vec_mradds (v7, mul5, v2); /* y3 = v7 * (-a2) + v2 */
+
+ /* Post-scale and store reults */
+
+ v31 = vec_subs (v31, v31); /* reinit v25 = 0 */
+
+ output[0] = vec_mradds (postscale[0], v20, v31);
+ output[2] = vec_mradds (postscale[2], v22, v31);
+ output[4] = vec_mradds (postscale[4], v24, v31);
+ output[6] = vec_mradds (postscale[6], v26, v31);
+ output[1] = vec_mradds (postscale[1], v21, v31);
+ output[3] = vec_mradds (postscale[3], v23, v31);
+ output[5] = vec_mradds (postscale[5], v25, v31);
+ output[7] = vec_mradds (postscale[7], v27, v31);
+}
diff --git a/gcc/testsuite/gcc.dg/vmx/debug-1.c b/gcc/testsuite/gcc.dg/vmx/debug-1.c
new file mode 100644
index 0000000000..c92435e09c
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/debug-1.c
@@ -0,0 +1,26 @@
+#include <altivec.h>
+vector unsigned char v;
+typedef unsigned char T[16];
+T t;
+typedef struct { unsigned char a[16]; } R;
+R r;
+typedef union {
+ unsigned char u8[16];
+ signed char s8[16];
+ unsigned short u16[8];
+ signed short s16[8];
+ unsigned int u32[4];
+ signed int s32[4];
+ float f32[4];
+} U;
+U u;
+static void use(void *p) {
+}
+int main() {
+ use (&v);
+ use (&t);
+ use (&r);
+ use (&u);
+ return 0;
+}
+
diff --git a/gcc/testsuite/gcc.dg/vmx/debug-2.c b/gcc/testsuite/gcc.dg/vmx/debug-2.c
new file mode 100644
index 0000000000..60380bcfce
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/debug-2.c
@@ -0,0 +1,42 @@
+#include <altivec.h>
+vector unsigned char u8 = {1, 2, 3, 4, 5, 6, 7, 8,
+ 9, 10,11,12,13,14,15,16};
+vector signed char s8 = {1, 2, 3, 4, 5, 6, 7, 8,
+ 9, 10,11,12,13,14,15,16};
+vector bool char b8 = {0, -1, 0, -1, 0, 0, 0, 0,
+ -1, -1, -1, -1, 0, -1, 0, -1};
+vector unsigned short u16 = {1, 2, 3, 4, 5, 6, 7, 8};
+vector signed short s16 = {1, 2, 3, 4, 5, 6, 7, 8};
+vector bool short b16 = {-1, 0, -1, 0, -1, -1, 0, 0};
+vector unsigned int u32 = {1, 2, 3, 4};
+vector signed int s32 = {1, 2, 3, 4};
+vector bool int b32 = {0, -1, -1, 0};
+vector float f32 = {1, 2, 3, 4};
+vector pixel p16 = {1, 2, 3, 4, 5, 6, 7, 8};
+
+static void f_u8(vector unsigned char *p) {}
+static void f_s8(vector signed char *p) {}
+static void f_b8(vector bool char *p) {}
+static void f_u16(vector unsigned short *p) {}
+static void f_s16(vector signed short *p) {}
+static void f_b16(vector bool short *p) {}
+static void f_u32(vector unsigned int *p) {}
+static void f_s32(vector signed int *p) {}
+static void f_b32(vector bool int *p) {}
+static void f_f32(vector float *p) {}
+static void f_p16(vector pixel *p) {}
+
+int main() {
+ f_u8(&u8);
+ f_s8(&s8);
+ f_b8(&b8);
+ f_u16(&u16);
+ f_s16(&s16);
+ f_b16(&b16);
+ f_u32(&u32);
+ f_s32(&s32);
+ f_b32(&b32);
+ f_f32(&f32);
+ f_p16(&p16);
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.dg/vmx/debug-3.c b/gcc/testsuite/gcc.dg/vmx/debug-3.c
new file mode 100644
index 0000000000..a9fc866299
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/debug-3.c
@@ -0,0 +1,75 @@
+#include <altivec.h>
+vector unsigned char u8;
+vector signed char s8;
+vector bool char b8;
+vector unsigned short u16;
+vector signed short s16;
+vector bool short b16;
+vector unsigned int u32;
+vector signed int s32;
+vector bool int b32;
+vector float f32;
+vector pixel p16;
+
+void f_u8(vector unsigned char *p) {
+ u8 = *p;
+}
+void f_s8(vector signed char *p) {
+ s8 = *p;
+}
+void f_b8(vector bool char *p) {
+ b8 = *p;
+}
+void f_u16(vector unsigned short *p) {
+ u16 = *p;
+}
+void f_s16(vector signed short *p) {
+ s16 = *p;
+}
+void f_b16(vector bool short *p) {
+ b16 = *p;
+}
+void f_u32(vector unsigned int *p) {
+ u32 = *p;
+}
+void f_s32(vector signed int *p) {
+ s32 = *p;
+}
+void f_b32(vector bool int *p) {
+ b32 = *p;
+}
+void f_f32(vector float *p) {
+ f32 = *p;
+}
+void f_p16(vector pixel *p) {
+ p16 = *p;
+}
+
+int main() {
+ vector unsigned char u8 = ((vector unsigned char){1, 2, 3, 4, 5, 6, 7, 8,
+ 9, 10,11,12,13,14,15,16});
+ vector signed char s8 = ((vector signed char){1, 2, 3, 4, 5, 6, 7, 8,
+ 9, 10,11,12,13,14,15,16});
+ vector bool char b8 = ((vector bool char){0, -1, 0, -1, 0, 0, 0, 0,
+ -1, -1, -1, -1, 0, -1, 0, -1});
+ vector unsigned short u16 = ((vector unsigned short){1, 2, 3, 4, 5, 6, 7, 8});
+ vector signed short s16 = ((vector signed short){1, 2, 3, 4, 5, 6, 7, 8});
+ vector bool short b16 = ((vector bool short){-1, 0, -1, 0, -1, -1, 0, 0});
+ vector unsigned int u32 = ((vector unsigned int){1, 2, 3, 4});
+ vector signed int s32 = ((vector signed int){1, 2, 3, 4});
+ vector bool int b32 = ((vector bool int){0, -1, -1, 0});
+ vector float f32 = ((vector float){1, 2, 3, 4});
+ vector pixel p16 = ((vector pixel){1, 2, 3, 4, 5, 6, 7, 8});
+ f_u8(&u8);
+ f_s8(&s8);
+ f_b8(&b8);
+ f_u16(&u16);
+ f_s16(&s16);
+ f_b16(&b16);
+ f_u32(&u32);
+ f_s32(&s32);
+ f_b32(&b32);
+ f_f32(&f32);
+ f_p16(&p16);
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.dg/vmx/debug-4.c b/gcc/testsuite/gcc.dg/vmx/debug-4.c
new file mode 100644
index 0000000000..e30ba2674f
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/debug-4.c
@@ -0,0 +1,78 @@
+#include <altivec.h>
+vector unsigned char u8;
+vector signed char s8;
+vector bool char b8;
+vector unsigned short u16;
+vector signed short s16;
+vector bool short b16;
+vector unsigned int u32;
+vector signed int s32;
+vector bool int b32;
+vector float f32;
+vector pixel p16;
+
+void f_u8(vector unsigned char *p) {
+ u8 = vec_add(*p, *p);
+}
+void f_s8(vector signed char *p) {
+ s8 = vec_add(*p, *p);
+}
+void f_b8(vector bool char *p) {
+ b8 = vec_cmpgt(s8, s8);
+ b8 = vec_xor(b8, *p);
+}
+void f_u16(vector unsigned short *p) {
+ u16 = vec_add(*p, *p);
+}
+void f_s16(vector signed short *p) {
+ s16 = vec_add(*p, *p);
+}
+void f_b16(vector bool short *p) {
+ b16 = vec_cmpgt(s16, s16);
+ b16 = vec_xor(b16, *p);
+}
+void f_u32(vector unsigned int *p) {
+ u32 = vec_add(*p, *p);
+}
+void f_s32(vector signed int *p) {
+ s32 = vec_add(*p, *p);
+}
+void f_b32(vector bool int *p) {
+ b32 = vec_cmpgt(s32, s32);
+ b32 = vec_xor(b32, *p);
+}
+void f_f32(vector float *p) {
+ f32 = vec_add(*p, *p);
+}
+void f_p16(vector pixel *p) {
+ p16 = *p;
+}
+
+int main() {
+ vector unsigned char u8 = ((vector unsigned char){1, 2, 3, 4, 5, 6, 7, 8,
+ 9, 10,11,12,13,14,15,16});
+ vector signed char s8 = ((vector signed char){1, 2, 3, 4, 5, 6, 7, 8,
+ 9, 10,11,12,13,14,15,16});
+ vector bool char b8 = ((vector bool char){0, -1, 0, -1, 0, 0, 0, 0,
+ -1, -1, -1, -1, 0, -1, 0, -1});
+ vector unsigned short u16 = ((vector unsigned short){1, 2, 3, 4, 5, 6, 7, 8});
+ vector signed short s16 = ((vector signed short){1, 2, 3, 4, 5, 6, 7, 8});
+ vector bool short b16 = ((vector bool short){-1, 0, -1, 0, -1, -1, 0, 0});
+ vector unsigned int u32 = ((vector unsigned int){1, 2, 3, 4});
+ vector signed int s32 = ((vector signed int){1, 2, 3, 4});
+ vector bool int b32 = ((vector bool int){0, -1, -1, 0});
+ vector float f32 = ((vector float){1, 2, 3, 4});
+ vector pixel p16 = ((vector pixel){1, 2, 3, 4, 5, 6, 7, 8});
+ f_u8(&u8);
+ f_s8(&s8);
+ f_b8(&b8);
+ f_u16(&u16);
+ f_s16(&s16);
+ f_b16(&b16);
+ f_u32(&u32);
+ f_s32(&s32);
+ f_b32(&b32);
+ f_f32(&f32);
+ f_p16(&p16);
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.dg/vmx/dos-bug-1-gdb.c b/gcc/testsuite/gcc.dg/vmx/dos-bug-1-gdb.c
new file mode 100644
index 0000000000..dc4c4d3d9b
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/dos-bug-1-gdb.c
@@ -0,0 +1,7 @@
+#include <altivec.h>
+static vector unsigned int v = {0x01020304,0x05060708,0x21324354,0x65768798};
+static vector unsigned int f() { return v; }
+int main() {
+ vector unsigned int x = f();
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.dg/vmx/dos-bug-2-gdb.c b/gcc/testsuite/gcc.dg/vmx/dos-bug-2-gdb.c
new file mode 100644
index 0000000000..4ec47b12d2
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/dos-bug-2-gdb.c
@@ -0,0 +1,7 @@
+#include <altivec.h>
+static vector unsigned int v = {0x01020304,0x05060708,0x21324354,0x65768798};
+static vector unsigned int f() { return vec_splat(v,0); }
+int main() {
+ vector unsigned int x = f();
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.dg/vmx/eg-5.c b/gcc/testsuite/gcc.dg/vmx/eg-5.c
new file mode 100644
index 0000000000..0b37e69d19
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/eg-5.c
@@ -0,0 +1,27 @@
+#include "harness.h"
+
+static vector float
+matvecmul4 (vector float c0, vector float c1, vector float c2,
+ vector float c3, vector float v)
+{
+ /* Set result to a vector of f32 0's */
+ vector float result = ((vector float){0.,0.,0.,0.});
+
+ result = vec_madd (c0, vec_splat (v, 0), result);
+ result = vec_madd (c1, vec_splat (v, 1), result);
+ result = vec_madd (c2, vec_splat (v, 2), result);
+ result = vec_madd (c3, vec_splat (v, 3), result);
+
+ return result;
+}
+
+static void test()
+{
+ check(vec_all_eq(matvecmul4(((vector float){2,3,5,7}),
+ ((vector float){11,13,17,19}),
+ ((vector float){23,29,31,37}),
+ ((vector float){41,43,47,53}),
+ ((vector float){59,61,67,71})),
+ ((vector float){5241, 5966, 6746, 7814})),
+ "matvecmul4");
+}
diff --git a/gcc/testsuite/gcc.dg/vmx/fft.c b/gcc/testsuite/gcc.dg/vmx/fft.c
new file mode 100644
index 0000000000..2b8a537c66
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/fft.c
@@ -0,0 +1,99 @@
+/* { dg-do compile } */
+#include <altivec.h>
+
+inline void
+transpose4x4(vector float *matrix)
+{
+ vector float v0, v1, v2, v3;
+
+ v0 = vec_mergeh(matrix[0], matrix[2]);
+ v1 = vec_mergel(matrix[0], matrix[2]);
+ v2 = vec_mergeh(matrix[1], matrix[3]);
+ v3 = vec_mergel(matrix[1], matrix[3]);
+
+ matrix[0] = vec_mergeh(v0, v2);
+ matrix[1] = vec_mergel(v0, v2);
+ matrix[2] = vec_mergeh(v1, v3);
+ matrix[3] = vec_mergel(v1, v3);
+}
+
+void
+vec_ifft64(vector float *x0, vector float *x1)
+{
+ int i;
+ vector float real[4], imag[4];
+ vector float c0r, c1r, c2r, c3r, c0i, c1i, c2i, c3i;
+ vector float d0r, d1r, d2r, d3r, d0i, d1i, d2i, d3i;
+
+ /*
+ * N=64
+ *
+ * Stage 1: t=1 => k = 0, j = 0..15
+ * ================================
+ * for j = 0:15
+ * c0 = x0(j+0*16);
+ * c1 = x0(j+1*16);
+ * c2 = x0(j+2*16);
+ * c3 = x0(j+3*16);
+ *
+ * d0 = c0 + c2;
+ * d1 = c0 - c2;
+ * d2 = c1 + c3;
+ * d3 = i*(c1 - c3);
+ *
+ * x1(4j+0) = d0 + d2;
+ * x1(4j+1) = d1 + d3;
+ * x1(4j+2) = d0 - d2;
+ * x1(4j+3) = d1 - d3;
+ * end
+ ******************************************************/
+
+ for (i=0; i < 4; i++)
+ {
+ c0r = x0[i];
+ c1r = x0[i+4];
+ c2r = x0[i+8];
+ c3r = x0[i+12];
+
+ c0i = x0[i+16];
+ c1i = x0[i+20];
+ c2i = x0[i+24];
+ c3i = x0[i+28];
+
+ d0r = vec_add(c0r, c2r);
+ d1r = vec_sub(c0r, c2r);
+ d2r = vec_add(c1r, c3r);
+ d3r = vec_sub(c3i, c1i);
+
+ d0i = vec_add(c0i, c2i);
+ d1i = vec_sub(c0i, c2i);
+ d2i = vec_add(c1i, c3i);
+ d3i = vec_sub(c1r, c3r);
+
+ /* Calculate real{x1} */
+ real[0] = vec_add(d0r, d2r);
+ real[1] = vec_add(d1r, d3r);
+ real[2] = vec_sub(d0r, d2r);
+ real[3] = vec_sub(d1r, d3r);
+
+ transpose4x4(real);
+
+ /* Calculate imag{x1} */
+ imag[0] = vec_add(d0i, d2i);
+ imag[1] = vec_add(d1i, d3i);
+ imag[2] = vec_sub(d0i, d2i);
+ imag[3] = vec_sub(d1i, d3i);
+
+ transpose4x4(imag);
+
+ x1[4*i] = real[0];
+ x1[4*i+1] = real[1];
+ x1[4*i+2] = real[2];
+ x1[4*i+3] = real[3];
+
+ x1[4*i+16] = imag[0];
+ x1[4*i+17] = imag[1];
+ x1[4*i+18] = imag[2];
+ x1[4*i+19] = imag[3];
+ }
+}
diff --git a/gcc/testsuite/gcc.dg/vmx/gcc-bug-1.c b/gcc/testsuite/gcc.dg/vmx/gcc-bug-1.c
new file mode 100644
index 0000000000..684ffe90eb
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/gcc-bug-1.c
@@ -0,0 +1,20 @@
+/* { dg-do compile } */
+#include <altivec.h>
+typedef struct n_a {
+ signed char m1;
+ vector float m2;
+} n_a;
+
+typedef struct n_b {
+ signed char m1;
+ struct n_a m2;
+} n_b;
+
+extern void f(n_b *);
+
+void initn_b(signed char p1, struct n_a p2)
+{
+ n_b _i;
+ ((_i).m1 = p1, (_i).m2 = p2);
+ f(&_i);
+}
diff --git a/gcc/testsuite/gcc.dg/vmx/gcc-bug-2.c b/gcc/testsuite/gcc.dg/vmx/gcc-bug-2.c
new file mode 100644
index 0000000000..f2f3c4cfe0
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/gcc-bug-2.c
@@ -0,0 +1,19 @@
+/* { dg-do compile } */
+#include <altivec.h>
+typedef struct n_a {
+ signed char m1;
+ vector float m2;
+} n_a;
+
+typedef struct n_b {
+ signed char m1;
+ struct n_a m2;
+} n_b;
+
+extern void f(n_b *);
+
+void initn_b()
+{
+ n_b _i;
+ f(&_i);
+}
diff --git a/gcc/testsuite/gcc.dg/vmx/gcc-bug-3.c b/gcc/testsuite/gcc.dg/vmx/gcc-bug-3.c
new file mode 100644
index 0000000000..77b2a002f3
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/gcc-bug-3.c
@@ -0,0 +1,26 @@
+/* { dg-do compile } */
+#include <altivec.h>
+void valuen014(vector float p1, vector float p2, vector float p3,
+ vector float p4, vector float p5, vector float p6,
+ vector float p7, vector float p8, vector float p9,
+ vector float p10, vector float p11, vector float p12,
+ int p13)
+{
+}
+
+void f()
+{
+ valuen014(((vector float) {1.83e+09, 5.73e+08, -2.96e+08, -7.46e+08}),
+ ((vector float) {-2.01e+09, 9.89e+08, -1.92e+09, 2.09e+09}),
+ ((vector float) {1.95e+09, -2.41e+08, 2.67e+08, 1.67e+09}),
+ ((vector float) {-2.12e+09, 8.18e+08, 9.47e+08, -1.25e+09}),
+ ((vector float) {-9.47e+08, -9.3e+08, -1.65e+09, 1.64e+09}),
+ ((vector float) {-7.99e+07, 4.86e+08, -3.4e+06, 3.11e+08}),
+ ((vector float) {1.78e+09, 1.22e+09, -1.27e+09, -3.11e+08}),
+ ((vector float) {1.41e+09, -5.38e+07, -2.08e+09, 1.54e+09}),
+ ((vector float) {3.1e+08, -1.49e+09, 5.38e+08, -1.3e+09}),
+ ((vector float) {9.66e+08, 5.5e+08, 1.75e+08, -8.22e+07}),
+ ((vector float) {-1.72e+08, -2.06e+09, 1.14e+09, -4.64e+08}),
+ ((vector float) {-1.25e+09, 8.12e+07, -2.02e+09, 4.71e+08}),
+ 962425441);
+}
diff --git a/gcc/testsuite/gcc.dg/vmx/gcc-bug-4.c b/gcc/testsuite/gcc.dg/vmx/gcc-bug-4.c
new file mode 100644
index 0000000000..beb920f2ce
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/gcc-bug-4.c
@@ -0,0 +1,5 @@
+/* { dg-do compile } */
+#include <altivec.h>
+extern vector unsigned int gn00111;
+long f() { return (long)&gn00111; }
+
diff --git a/gcc/testsuite/gcc.dg/vmx/gcc-bug-5.c b/gcc/testsuite/gcc.dg/vmx/gcc-bug-5.c
new file mode 100644
index 0000000000..0eaa6adb7b
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/gcc-bug-5.c
@@ -0,0 +1,26 @@
+/* { dg-do compile } */
+#include <altivec.h>
+void valuen014(vector float p1, vector float p2, vector float p3,
+ vector float p4, vector float p5, vector float p6,
+ vector float p7, vector float p8, vector float p9,
+ vector float p10, vector float p11, vector float p12,
+ vector float px)
+{
+}
+
+void f()
+{
+ valuen014(((vector float) {1.83e+09, 5.73e+08, -2.96e+08, -7.46e+08}),
+ ((vector float) {-2.01e+09, 9.89e+08, -1.92e+09, 2.09e+09}),
+ ((vector float) {1.95e+09, -2.41e+08, 2.67e+08, 1.67e+09}),
+ ((vector float) {-2.12e+09, 8.18e+08, 9.47e+08, -1.25e+09}),
+ ((vector float) {-9.47e+08, -9.3e+08, -1.65e+09, 1.64e+09}),
+ ((vector float) {-7.99e+07, 4.86e+08, -3.4e+06, 3.11e+08}),
+ ((vector float) {1.78e+09, 1.22e+09, -1.27e+09, -3.11e+08}),
+ ((vector float) {1.41e+09, -5.38e+07, -2.08e+09, 1.54e+09}),
+ ((vector float) {3.1e+08, -1.49e+09, 5.38e+08, -1.3e+09}),
+ ((vector float) {9.66e+08, 5.5e+08, 1.75e+08, -8.22e+07}),
+ ((vector float) {-1.72e+08, -2.06e+09, 1.14e+09, -4.64e+08}),
+ ((vector float) {-1.25e+09, 8.12e+07, -2.02e+09, 4.71e+08}),
+ ((vector float){1,1,1,1}));
+}
diff --git a/gcc/testsuite/gcc.dg/vmx/gcc-bug-6.c b/gcc/testsuite/gcc.dg/vmx/gcc-bug-6.c
new file mode 100644
index 0000000000..f0a424a2bd
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/gcc-bug-6.c
@@ -0,0 +1,30 @@
+/* { dg-do compile } */
+#include <altivec.h>
+void valuen014(vector float p1, vector float p2, vector float p3,
+ vector float p4, vector float p5, vector float p6,
+ vector float p7, vector float p8, vector float p9,
+ vector float p10, vector float p11, vector float p12,
+ vector float px, vector float py, vector float pz,
+ int p13)
+{
+}
+
+void f()
+{
+ valuen014(((vector float) {1.83e+09, 5.73e+08, -2.96e+08, -7.46e+08}),
+ ((vector float) {-2.01e+09, 9.89e+08, -1.92e+09, 2.09e+09}),
+ ((vector float) {1.95e+09, -2.41e+08, 2.67e+08, 1.67e+09}),
+ ((vector float) {-2.12e+09, 8.18e+08, 9.47e+08, -1.25e+09}),
+ ((vector float) {-9.47e+08, -9.3e+08, -1.65e+09, 1.64e+09}),
+ ((vector float) {-7.99e+07, 4.86e+08, -3.4e+06, 3.11e+08}),
+ ((vector float) {1.78e+09, 1.22e+09, -1.27e+09, -3.11e+08}),
+ ((vector float) {1.41e+09, -5.38e+07, -2.08e+09, 1.54e+09}),
+ ((vector float) {3.1e+08, -1.49e+09, 5.38e+08, -1.3e+09}),
+ ((vector float) {9.66e+08, 5.5e+08, 1.75e+08, -8.22e+07}),
+ ((vector float) {-1.72e+08, -2.06e+09, 1.14e+09, -4.64e+08}),
+ ((vector float) {-1.25e+09, 8.12e+07, -2.02e+09, 4.71e+08}),
+ ((vector float){1,1,1,1}),
+ ((vector float){2,2,2,2}),
+ ((vector float){3,3,3,3}),
+ 962425441);
+}
diff --git a/gcc/testsuite/gcc.dg/vmx/gcc-bug-7.c b/gcc/testsuite/gcc.dg/vmx/gcc-bug-7.c
new file mode 100644
index 0000000000..452977bbfb
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/gcc-bug-7.c
@@ -0,0 +1,35 @@
+/* { dg-do compile } */
+#include <altivec.h>
+extern void referencen001(int *p1, int *p2, int *p3, int *p4, int *p5, int *p6, int *p7, vector float *p8);
+
+extern int gn0011;
+
+extern int gn0012;
+
+extern int gn0013;
+
+extern int gn0014;
+
+extern int gn0015;
+
+extern int gn0016;
+
+extern int gn0017;
+
+extern vector float gn0018;
+
+void testn001(void)
+{
+ int a1;
+ int a2;
+ int a3;
+ int a4;
+ int a5;
+ int a6;
+ int a7;
+ vector float a8;
+
+ (a1 = -53786696, a2 = -1840132710, a3 = -2130504990, a4 = 1429848501, a5 = 1139248605, a6 = 428762253, a7 = -1581480596, a8 = ((vector float) {1.66e+09, -1.83e+09, -6.79e+08, 1.58e+09}));
+
+ referencen001(&a1, &a2, &a3, &a4, &a5, &a6, &a7, &a8);
+}
diff --git a/gcc/testsuite/gcc.dg/vmx/gcc-bug-8.c b/gcc/testsuite/gcc.dg/vmx/gcc-bug-8.c
new file mode 100644
index 0000000000..33d4bd7e63
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/gcc-bug-8.c
@@ -0,0 +1,13 @@
+/* { dg-do compile } */
+#include <altivec.h>
+
+typedef struct n001 {
+ signed char m1;
+ vector float m2;
+ int m3;
+ vector signed short m4;
+ signed char m5;
+ vector unsigned short m6;
+} n001;
+
+ n001 _i = {-4, {-1.84e+09, -2.13e+09, 1.43e+09, 1.14e+09}, 428762253, {-24132, 25298, -27969, -10358, 24164, -5157, -18143, -6509}, 40, {0x8737, 0xd7cf, 0xb6a7, 0x948f, 0x790b, 0x9255, 0x872d, 0xe72c}};
diff --git a/gcc/testsuite/gcc.dg/vmx/gcc-bug-9.c b/gcc/testsuite/gcc.dg/vmx/gcc-bug-9.c
new file mode 100644
index 0000000000..b54338a3bc
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/gcc-bug-9.c
@@ -0,0 +1,5 @@
+/* { dg-do compile } */
+#include <altivec.h>
+
+vector signed short _j = {-24132, 25298, -27969, -10358, 24164, -5157, -18143, -6509};
+
diff --git a/gcc/testsuite/gcc.dg/vmx/gcc-bug-b.c b/gcc/testsuite/gcc.dg/vmx/gcc-bug-b.c
new file mode 100644
index 0000000000..e893ff1813
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/gcc-bug-b.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+#include <altivec.h>
+vector unsigned char u8a, u8b, u8c, u8d, *u8ptr;
+vector signed short s16a, s16b, s16c, s16d;
+vector unsigned short u16a, u16b, u16c, u16d;
+vector unsigned int u32a, u32b, u32c, u32d;
+vector float f32a, f32b, f32c, f32d, f32e;
+int i, j, *p;
+
+void test()
+{
+ u8c = vec_add(u8a, u8b);
+ f32c = vec_ceil(f32a);
+ f32d = vec_vcfux(u32a, 31U);
+ s16c = vec_splat_s16(-16);
+ u8d = vec_vsldoi(u8a, u8b, 15);
+ f32e = vec_vmaddfp(f32a, f32b, f32c);
+
+ vec_dss(3);
+ vec_dssall();
+ vec_mtvscr(u8a);
+ u16a = vec_mfvscr();
+}
diff --git a/gcc/testsuite/gcc.dg/vmx/gcc-bug-c.c b/gcc/testsuite/gcc.dg/vmx/gcc-bug-c.c
new file mode 100644
index 0000000000..33fed4bd9c
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/gcc-bug-c.c
@@ -0,0 +1,25 @@
+/* { dg-do compile } */
+#include <altivec.h>
+vector signed short s16ss() { vector signed short a; return vec_subs(a,a); }
+vector signed short s16s() { vector signed short a; return vec_sub(a,a); }
+vector signed short s16x() { vector signed short a; return vec_xor(a,a); }
+vector signed short s16a() { vector signed short a; return vec_andc(a,a); }
+vector unsigned char u8;
+vector signed short s16;
+vector bool int b32;
+vector float f32;
+vector pixel p16;
+void x()
+{
+ u8 = ((vector unsigned char){3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3});
+ s16 = ((vector signed short){-7,-7,-7,-7,-7,-7,-7,-7});
+ b32 = ((vector bool int) {10,20,30,40});
+ f32 = ((vector float) {2,4,6,8});
+ p16 = ((vector pixel){23,23,23,23,23,23,23,23});
+}
+vector unsigned int a;
+vector unsigned int b;
+void f(void) {
+ a = ((vector unsigned int){1,1,1,1});
+ b = ((vector unsigned int){1,2,3,4});
+}
diff --git a/gcc/testsuite/gcc.dg/vmx/gcc-bug-d.c b/gcc/testsuite/gcc.dg/vmx/gcc-bug-d.c
new file mode 100644
index 0000000000..2b2151eec9
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/gcc-bug-d.c
@@ -0,0 +1,14 @@
+/* { dg-do compile } */
+#include <altivec.h>
+static void f() {}
+extern void g() {}
+extern vector unsigned char permute_128(vector unsigned char);
+
+void foo()
+{
+ vector unsigned char input
+ = {0,1,2,4,8,16,32,64,128,0,1,2,4,8,16,32};
+ vector unsigned char result = permute_128(input);
+ void (*p)() = f;
+ void (*q)() = g;
+}
diff --git a/gcc/testsuite/gcc.dg/vmx/gcc-bug-e.c b/gcc/testsuite/gcc.dg/vmx/gcc-bug-e.c
new file mode 100644
index 0000000000..58e4fc3d31
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/gcc-bug-e.c
@@ -0,0 +1,44 @@
+#include "harness.h"
+
+typedef struct n_a
+{
+ signed char m1;
+ short m2;
+ int m3;
+ double m4;
+ vector float m5;
+}
+n_a;
+
+static void
+initn_a(signed char p1, short p2, int p3, double p4, vector float p5)
+{
+ n_a i;
+ static struct
+ {
+ n_a b;
+ char a;
+ }
+ x;
+
+ i.m1 = p1;
+ i.m2 = p2;
+ i.m3 = p3;
+ i.m4 = p4;
+ i.m5 = p5;
+
+ check(i.m1 == -17, "i.m1");
+ check(i.m2 == 9165, "i.m2");
+ check(i.m3 == -1857760764, "i.m3");
+ check(i.m4 == 7.3e+18, "i.m4");
+ check(vec_all_eq(i.m5, ((vector float){-5.02e+08,
+ -4.34e+08,
+ -1.04e+09,
+ 1.42e+09})), "i.m5");
+}
+
+static void test()
+{
+ initn_a(-17, 9165, -1857760764, 7.3e+18,
+ ((vector float){-5.02e+08, -4.34e+08, -1.04e+09, 1.42e+09}));
+}
diff --git a/gcc/testsuite/gcc.dg/vmx/gcc-bug-f.c b/gcc/testsuite/gcc.dg/vmx/gcc-bug-f.c
new file mode 100644
index 0000000000..fb61bb15a7
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/gcc-bug-f.c
@@ -0,0 +1,125 @@
+/* { dg-do compile } */
+#include <altivec.h>
+typedef short Word16;
+typedef int Word32;
+typedef int Flag;
+
+extern Flag Overflow;
+extern Flag Carry;
+
+extern vector signed short table[8];
+extern vector signed short slope_cos[8];
+
+void Lsf_lsp(
+ vector signed short lsfq[],
+ vector signed short lspq[]
+)
+{
+ vector signed short Q17_con = ((vector signed short){20861,20861,20861,20861,20861,20861,20861,20861});
+ vector unsigned char perm1 = ((vector unsigned char){0, 8, 1, 9, 2, 10, 3,
+ 11, 4, 12, 5 ,13, 6, 14, 7, 15});
+ vector unsigned char PerIndex, tmpIndex;
+ vector signed short tmp0, tmp1, tmp2, tmp3;
+ vector signed short stmp0, stmp1, stmp2, stmp3;
+ vector signed short index0, index1, offset0, offset1;
+ vector signed short table0, table1, slope0, slope1;
+ vector unsigned short select;
+ vector signed int L_tmp0, L_tmp1, L_tmp2, L_tmp3;
+
+
+ tmp0 = vec_madds(lsfq[0], Q17_con, (((vector signed short){0,0,0,0,0,0,0,0})) );
+ tmp1 = vec_madds(lsfq[1], Q17_con, (((vector signed short){0,0,0,0,0,0,0,0})) );
+
+
+ offset0 = vec_and(tmp0, (((vector signed short){0x00ff,0x00ff,0x00ff,0x00ff,0x00ff,0x00ff,0x00ff,0x00ff})) );
+ offset1 = vec_and(tmp1, (((vector signed short){0x00ff,0x00ff,0x00ff,0x00ff,0x00ff,0x00ff,0x00ff,0x00ff})) );
+
+
+ index0 = vec_min(vec_sra(tmp0, (((vector unsigned short){8,8,8,8,8,8,8,8})) ), (((vector signed short){63,63,63,63,63,63,63,63})) );
+ index1 = vec_min(vec_sra(tmp1, (((vector unsigned short){8,8,8,8,8,8,8,8})) ), (((vector signed short){63,63,63,63,63,63,63,63})) );
+
+
+
+ tmp0 = vec_sl(index0, (vector unsigned short)((((vector signed short){1,1,1,1,1,1,1,1})) ));
+ PerIndex = (vector unsigned char)vec_packs(tmp0, vec_add(tmp0, (((vector signed short){1,1,1,1,1,1,1,1})) ));
+ PerIndex = vec_perm(PerIndex, PerIndex, perm1);
+
+
+ tmp0 = vec_perm(table[0], table[1], PerIndex);
+ stmp0 = vec_perm(slope_cos[0], slope_cos[1], PerIndex);
+
+ tmpIndex = vec_sub(PerIndex, (((vector unsigned char){32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32})) );
+ tmp1 = vec_perm(table[2], table[3], tmpIndex);
+ stmp1 = vec_perm(slope_cos[2], slope_cos[3], tmpIndex);
+
+ select = (vector unsigned short)vec_cmpgt(PerIndex, (((vector unsigned char){31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31})) );
+ tmp2 = vec_sel(tmp0, tmp1, select);
+ stmp2 = vec_sel(stmp0, stmp1, select);
+
+ tmpIndex = vec_sub(tmpIndex, (((vector unsigned char){32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32})) );
+ tmp0 = vec_perm(table[4], table[5], tmpIndex);
+ stmp0 = vec_perm(slope_cos[4], slope_cos[5], tmpIndex);
+
+ tmpIndex = vec_sub(tmpIndex, (((vector unsigned char){32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32})) );
+ tmp1 = vec_perm(table[6], table[7], tmpIndex);
+ stmp1 = vec_perm(slope_cos[6], slope_cos[7], tmpIndex);
+
+ select = (vector unsigned short)vec_cmpgt(PerIndex, (((vector unsigned char){95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95})) );
+ tmp3 = vec_sel(tmp0, tmp1, select);
+ stmp3 = vec_sel(stmp0, stmp1, select);
+
+ select = (vector unsigned short)vec_cmpgt(PerIndex, (((vector unsigned char){63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63})) );
+ table0 = vec_sel(tmp2, tmp3, select);
+ slope0 = vec_sel(stmp2, stmp3, select);
+
+ tmp0 = vec_sl(index1, (vector unsigned short)((((vector signed short){1,1,1,1,1,1,1,1})) ));
+ PerIndex = (vector unsigned char)vec_packs(tmp0, vec_add(tmp0, (((vector signed short){1,1,1,1,1,1,1,1})) ));
+ PerIndex = vec_perm(PerIndex, PerIndex, perm1);
+
+
+ tmp0 = vec_perm(table[0], table[1], PerIndex);
+ stmp0 = vec_perm(slope_cos[0], slope_cos[1], PerIndex);
+
+ tmpIndex = vec_sub(PerIndex, (((vector unsigned char){32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32})) );
+ tmp1 = vec_perm(table[2], table[3], tmpIndex);
+ stmp1 = vec_perm(slope_cos[2], slope_cos[3], tmpIndex);
+
+ select = (vector unsigned short)vec_cmpgt(PerIndex, (((vector unsigned char){31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31})) );
+ tmp2 = vec_sel(tmp0, tmp1, select);
+ stmp2 = vec_sel(stmp0, stmp1, select);
+
+ tmpIndex = vec_sub(tmpIndex, (((vector unsigned char){32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32})) );
+ tmp0 = vec_perm(table[4], table[5], tmpIndex);
+ stmp0 = vec_perm(slope_cos[4], slope_cos[5], tmpIndex);
+
+ tmpIndex = vec_sub(tmpIndex, (((vector unsigned char){32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32})) );
+ tmp1 = vec_perm(table[6], table[7], tmpIndex);
+ stmp1 = vec_perm(slope_cos[6], slope_cos[7], tmpIndex);
+
+ select = (vector unsigned short)vec_cmpgt(PerIndex, (((vector unsigned char){95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95})) );
+ tmp3 = vec_sel(tmp0, tmp1, select);
+ stmp3 = vec_sel(stmp0, stmp1, select);
+
+ select = (vector unsigned short)vec_cmpgt(PerIndex, (((vector unsigned char){63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63})) );
+ table1 = vec_sel(tmp2, tmp3, select);
+ slope1 = vec_sel(stmp2, stmp3, select);
+
+
+
+ L_tmp0 = vec_sra(vec_mule(slope0, offset0), (((vector unsigned int){12,12,12,12})) );
+ L_tmp1 = vec_sra(vec_mulo(slope0, offset0), (((vector unsigned int){12,12,12,12})) );
+ L_tmp2 = vec_sra(vec_mule(slope1, offset1), (((vector unsigned int){12,12,12,12})) );
+ L_tmp3 = vec_sra(vec_mulo(slope1, offset1), (((vector unsigned int){12,12,12,12})) );
+
+
+ tmp0 = vec_packs(L_tmp0, L_tmp2);
+ tmp1 = vec_packs(L_tmp1, L_tmp3);
+ tmp2 = vec_mergeh(tmp0, tmp1);
+ tmp3 = vec_mergel(tmp0, tmp1);
+
+
+ lspq[0] = vec_adds(table0, tmp2);
+ lspq[1] = vec_adds(table1, tmp3);
+
+ return;
+}
diff --git a/gcc/testsuite/gcc.dg/vmx/gcc-bug-g.c b/gcc/testsuite/gcc.dg/vmx/gcc-bug-g.c
new file mode 100644
index 0000000000..573a73b356
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/gcc-bug-g.c
@@ -0,0 +1,119 @@
+/* { dg-do compile } */
+#include <altivec.h>
+extern vector signed short table[8];
+extern vector signed short slope_cos[8];
+extern vector signed short slope_acos[8];
+
+void Lsf_lsp(
+ vector signed short lsfq[],
+ vector signed short lspq[]
+)
+{
+ vector signed short Q17_con = ((vector signed short){20861,20861,20861,20861,20861,20861,20861,20861});
+ vector unsigned char perm1 = ((vector unsigned char){0, 8, 1, 9, 2, 10, 3,
+ 11, 4, 12, 5 ,13, 6, 14, 7, 15});
+ vector unsigned char PerIndex, tmpIndex;
+ vector signed short tmp0, tmp1, tmp2, tmp3;
+ vector signed short stmp0, stmp1, stmp2, stmp3;
+ vector signed short index0, index1, offset0, offset1;
+ vector signed short table0, table1, slope0, slope1;
+ vector unsigned short select;
+ vector signed int L_tmp0, L_tmp1, L_tmp2, L_tmp3;
+
+
+ tmp0 = vec_madds(lsfq[0], Q17_con, (((vector signed short){0,0,0,0,0,0,0,0})) );
+ tmp1 = vec_madds(lsfq[1], Q17_con, (((vector signed short){0,0,0,0,0,0,0,0})) );
+
+
+ offset0 = vec_and(tmp0, (((vector signed short){0x00ff,0x00ff,0x00ff,0x00ff,0x00ff,0x00ff,0x00ff,0x00ff})) );
+ offset1 = vec_and(tmp1, (((vector signed short){0x00ff,0x00ff,0x00ff,0x00ff,0x00ff,0x00ff,0x00ff,0x00ff})) );
+
+
+ index0 = vec_min(vec_sra(tmp0, (((vector unsigned short){8,8,8,8,8,8,8,8})) ), (((vector signed short){63,63,63,63,63,63,63,63})) );
+ index1 = vec_min(vec_sra(tmp1, (((vector unsigned short){8,8,8,8,8,8,8,8})) ), (((vector signed short){63,63,63,63,63,63,63,63})) );
+
+
+
+ tmp0 = vec_sl(index0, (vector unsigned short)((((vector signed short){1,1,1,1,1,1,1,1})) ));
+ PerIndex = (vector unsigned char)vec_packs(tmp0, vec_add(tmp0, (((vector signed short){1,1,1,1,1,1,1,1})) ));
+ PerIndex = vec_perm(PerIndex, PerIndex, perm1);
+
+
+ tmp0 = vec_perm(table[0], table[1], PerIndex);
+ stmp0 = vec_perm(slope_cos[0], slope_cos[1], PerIndex);
+
+ tmpIndex = vec_sub(PerIndex, (((vector unsigned char){32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32})) );
+ tmp1 = vec_perm(table[2], table[3], tmpIndex);
+ stmp1 = vec_perm(slope_cos[2], slope_cos[3], tmpIndex);
+
+ select = (vector unsigned short)vec_cmpgt(PerIndex, (((vector unsigned char){31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31})) );
+ tmp2 = vec_sel(tmp0, tmp1, select);
+ stmp2 = vec_sel(stmp0, stmp1, select);
+
+ tmpIndex = vec_sub(tmpIndex, (((vector unsigned char){32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32})) );
+ tmp0 = vec_perm(table[4], table[5], tmpIndex);
+ stmp0 = vec_perm(slope_cos[4], slope_cos[5], tmpIndex);
+
+ tmpIndex = vec_sub(tmpIndex, (((vector unsigned char){32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32})) );
+ tmp1 = vec_perm(table[6], table[7], tmpIndex);
+ stmp1 = vec_perm(slope_cos[6], slope_cos[7], tmpIndex);
+
+ select = (vector unsigned short)vec_cmpgt(PerIndex, (((vector unsigned char){95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95})) );
+ tmp3 = vec_sel(tmp0, tmp1, select);
+ stmp3 = vec_sel(stmp0, stmp1, select);
+
+ select = (vector unsigned short)vec_cmpgt(PerIndex, (((vector unsigned char){63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63})) );
+ table0 = vec_sel(tmp2, tmp3, select);
+ slope0 = vec_sel(stmp2, stmp3, select);
+
+ tmp0 = vec_sl(index1, (vector unsigned short)((((vector signed short){1,1,1,1,1,1,1,1})) ));
+ PerIndex = (vector unsigned char)vec_packs(tmp0, vec_add(tmp0, (((vector signed short){1,1,1,1,1,1,1,1})) ));
+ PerIndex = vec_perm(PerIndex, PerIndex, perm1);
+
+
+ tmp0 = vec_perm(table[0], table[1], PerIndex);
+ stmp0 = vec_perm(slope_cos[0], slope_cos[1], PerIndex);
+
+ tmpIndex = vec_sub(PerIndex, (((vector unsigned char){32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32})) );
+ tmp1 = vec_perm(table[2], table[3], tmpIndex);
+ stmp1 = vec_perm(slope_cos[2], slope_cos[3], tmpIndex);
+
+ select = (vector unsigned short)vec_cmpgt(PerIndex, (((vector unsigned char){31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31})) );
+ tmp2 = vec_sel(tmp0, tmp1, select);
+ stmp2 = vec_sel(stmp0, stmp1, select);
+
+ tmpIndex = vec_sub(tmpIndex, (((vector unsigned char){32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32})) );
+ tmp0 = vec_perm(table[4], table[5], tmpIndex);
+ stmp0 = vec_perm(slope_cos[4], slope_cos[5], tmpIndex);
+
+ tmpIndex = vec_sub(tmpIndex, (((vector unsigned char){32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32})) );
+ tmp1 = vec_perm(table[6], table[7], tmpIndex);
+ stmp1 = vec_perm(slope_cos[6], slope_cos[7], tmpIndex);
+
+ select = (vector unsigned short)vec_cmpgt(PerIndex, (((vector unsigned char){95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95})) );
+ tmp3 = vec_sel(tmp0, tmp1, select);
+ stmp3 = vec_sel(stmp0, stmp1, select);
+
+ select = (vector unsigned short)vec_cmpgt(PerIndex, (((vector unsigned char){63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63})) );
+ table1 = vec_sel(tmp2, tmp3, select);
+ slope1 = vec_sel(stmp2, stmp3, select);
+
+
+
+ L_tmp0 = vec_sra(vec_mule(slope0, offset0), (((vector unsigned int){12,12,12,12})) );
+ L_tmp1 = vec_sra(vec_mulo(slope0, offset0), (((vector unsigned int){12,12,12,12})) );
+ L_tmp2 = vec_sra(vec_mule(slope1, offset1), (((vector unsigned int){12,12,12,12})) );
+ L_tmp3 = vec_sra(vec_mulo(slope1, offset1), (((vector unsigned int){12,12,12,12})) );
+
+
+ tmp0 = vec_packs(L_tmp0, L_tmp2);
+ tmp1 = vec_packs(L_tmp1, L_tmp3);
+ tmp2 = vec_mergeh(tmp0, tmp1);
+ tmp3 = vec_mergel(tmp0, tmp1);
+
+
+ lspq[0] = vec_adds(table0, tmp2);
+ lspq[1] = vec_adds(table1, tmp3);
+
+ return;
+}
diff --git a/gcc/testsuite/gcc.dg/vmx/gcc-bug-i.c b/gcc/testsuite/gcc.dg/vmx/gcc-bug-i.c
new file mode 100644
index 0000000000..58ccb3fe76
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/gcc-bug-i.c
@@ -0,0 +1,42 @@
+#include "harness.h"
+
+/* This problem occurs if a function is inlined. When its local
+ variables get allocated space on the caller's (the function to
+ which it is inlined) stack frame, they don't get 16-byte alignment
+ even if they need it. Here's an example with a union (that's the
+ first case I uncovered, but it's probably a general occurrence on
+ inlining). */
+
+#define N 10
+/* adjust N = size of buffer to try to get bad alignment for inlined union */
+
+#define DO_INLINE __attribute__ ((always_inline))
+#define DONT_INLINE __attribute__ ((noinline))
+
+static DO_INLINE int inline_me(vector signed short data)
+{
+ union {vector signed short v; signed short s[8];} u;
+ u.v = data;
+ return u.s[7];
+}
+
+static DONT_INLINE int foo(vector signed short data)
+{
+ int c, buffer[N], i;
+ c = inline_me(data);
+ for (i=0; i<N; i++) {
+ if (i == 0)
+ buffer[i] = c;
+ else
+ buffer[i] = buffer[i-1] + c*i;
+ }
+ return buffer[N-1];
+}
+
+static void test()
+{
+ check(foo((vector signed short)
+ ((vector unsigned char){1,2,3,4,5,6,7,8,
+ 9,10,11,12,13,14,15,16})) == 0x2b4e0,
+ "foo");
+}
diff --git a/gcc/testsuite/gcc.dg/vmx/harness.h b/gcc/testsuite/gcc.dg/vmx/harness.h
new file mode 100644
index 0000000000..faea3e97db
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/harness.h
@@ -0,0 +1,30 @@
+/* Common code for most VMX test cases. To use, include this file,
+ then write a routine named test() that performs a series of calls
+ to check(). */
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <altivec.h>
+
+static int failed;
+static void test (void);
+
+static void
+check (int result, const char *name)
+{
+ if (!result)
+ {
+ failed++;
+ printf ("fail %s\n", name);
+ }
+}
+
+int
+main (void)
+{
+ test ();
+ if (failed)
+ abort ();
+
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.dg/vmx/ira1.c b/gcc/testsuite/gcc.dg/vmx/ira1.c
new file mode 100644
index 0000000000..eee2869121
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/ira1.c
@@ -0,0 +1,10 @@
+#include <altivec.h>
+#include <stdlib.h>
+vector unsigned char u8a, u8b;
+
+int main()
+{
+ if (!vec_all_eq(u8a, u8b))
+ abort ();
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.dg/vmx/ira2.c b/gcc/testsuite/gcc.dg/vmx/ira2.c
new file mode 100644
index 0000000000..cf2248097f
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/ira2.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+#include <altivec.h>
+vector unsigned char u8a, u8b, u8c, u8d, *u8ptr;
+vector signed short s16a, s16b, s16c, s16d;
+vector unsigned short u16a, u16b, u16c, u16d;
+vector unsigned int u32a, u32b, u32c, u32d;
+vector float f32a, f32b, f32c, f32d, f32e;
+int i, j, *p;
+
+void test()
+{
+ u8c = vec_add(u8a, u8b);
+ f32c = vec_ceil(f32a);
+ f32d = vec_vcfux(u32a, 31U);
+ s16c = vec_splat_s16(-16);
+ u8d = vec_vsldoi(u8a, u8b, 15);
+ f32e = vec_vmaddfp(f32a, f32b, f32c);
+ /* vec_dstst(u8ptr, i, 3U); */
+ vec_dss(3);
+ vec_dssall();
+ vec_mtvscr(u8a);
+ u16a = vec_mfvscr();
+}
diff --git a/gcc/testsuite/gcc.dg/vmx/ira2a.c b/gcc/testsuite/gcc.dg/vmx/ira2a.c
new file mode 100644
index 0000000000..e6a716d6fe
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/ira2a.c
@@ -0,0 +1,9 @@
+#include <altivec.h>
+vector unsigned char *u8ptr;
+int i;
+
+int main()
+{
+ vec_dstst(u8ptr, i, 3U);
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.dg/vmx/ira2b.c b/gcc/testsuite/gcc.dg/vmx/ira2b.c
new file mode 100644
index 0000000000..d9a33f2dd0
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/ira2b.c
@@ -0,0 +1,17 @@
+#include "harness.h"
+
+static vector float
+f(vector float f32a, vector float f32b, vector float f32c)
+{
+ f32c = vec_ceil(f32a);
+ return vec_vmaddfp(f32a, f32b, f32c);
+}
+
+static void test()
+{
+ check(vec_all_eq(f(((vector float){2,3,5,7}),
+ ((vector float){11,13,17,19}),
+ ((vector float){23,29,31,37})),
+ ((vector float){24, 42, 90, 140})),
+ "test");
+}
diff --git a/gcc/testsuite/gcc.dg/vmx/ira2c.c b/gcc/testsuite/gcc.dg/vmx/ira2c.c
new file mode 100644
index 0000000000..bce5469cea
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/ira2c.c
@@ -0,0 +1,11 @@
+/* { dg-do compile } */
+
+double __fabs(double a) { return a; }
+double __fmadd(double a, double b, double c) { return a*b+c; }
+
+double
+test(double f32a, double f32b, double f32c)
+{
+ f32c = __fabs(f32a);
+ return __fmadd(f32a, f32b, f32c);
+}
diff --git a/gcc/testsuite/gcc.dg/vmx/mem.c b/gcc/testsuite/gcc.dg/vmx/mem.c
new file mode 100644
index 0000000000..a26eb3cfc6
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/mem.c
@@ -0,0 +1,9 @@
+/* { dg-do compile } */
+#include <altivec.h>
+void
+f(vector unsigned char *a, vector unsigned char *b, vector unsigned char *c)
+{
+ int i;
+ for (i = 0; i < 16; i++)
+ c[i] = vec_add(a[i], b[i]);
+}
diff --git a/gcc/testsuite/gcc.dg/vmx/newton-1.c b/gcc/testsuite/gcc.dg/vmx/newton-1.c
new file mode 100644
index 0000000000..c5963c0344
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/newton-1.c
@@ -0,0 +1,67 @@
+/* { dg-do compile } */
+#include <altivec.h>
+
+#define SPLAT76 ((vector unsigned char)\
+ {0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3})
+#define SPLAT54 ((vector unsigned char)\
+ {4,5,6,7,4,5,6,7,4,5,6,7,4,5,6,7})
+#define SPLAT32 ((vector unsigned char)\
+ {8,9,10,11,8,9,10,11,8,9,10,11,8,9,10,11})
+#define SPLAT10 ((vector unsigned char)\
+ {12,13,14,15,12,13,14,15,12,13,14,15,12,13,14,15})
+#define INTERLEAVE ((vector unsigned char)\
+ {0,1,16,17,4,5,20,21,8,9,24,25,12,13,28,29})
+
+long real_32_manytaps (long ntaps, vector signed short *c, long ndat,
+ vector signed short *x, vector signed short *y)
+{
+ long i, j, op, ndatavec, ncoefvec;
+ vector signed short x0, x1;
+ vector signed short coef;
+ vector signed short cr10, cr32, cr54, cr76;
+ vector signed int y_even, y_odd;
+ vector signed short *x1p;
+
+ op = 0;
+ ndatavec = ndat >> 3;
+ ncoefvec = ntaps >> 3;
+
+ for (i = 0; i < ndatavec; i += 1) {
+ x0 = x[i];
+
+ y_even = ((vector signed int){0x8000,0x8000,0x8000,0x8000});
+ y_odd = ((vector signed int){0x8000,0x8000,0x8000,0x8000});
+
+ j = 0;
+ x1p = x + 1 + i;
+
+ do {
+
+ coef = c[j];
+ x1 = x1p[j];
+
+ cr10 = vec_perm(coef, coef, SPLAT10);
+ y_odd = vec_msums(cr10, x1, y_odd);
+ y_even = vec_msums(cr10, vec_sld(x0, x1, 14), y_even);
+
+ cr32 = vec_perm(coef, coef, SPLAT32);
+ y_odd = vec_msums(cr32, vec_sld(x0, x1, 12), y_odd);
+ y_even = vec_msums(cr32, vec_sld(x0, x1, 10), y_even);
+
+ cr54 = vec_perm(coef, coef, SPLAT54);
+ y_odd = vec_msums(cr54, vec_sld(x0, x1, 8), y_odd);
+ y_even = vec_msums(cr54, vec_sld(x0, x1, 6), y_even);
+
+ cr76 = vec_perm(coef, coef, SPLAT76);
+ y_odd = vec_msums(cr76, vec_sld(x0, x1, 4), y_odd);
+ y_even = vec_msums(cr76, vec_sld(x0, x1, 2), y_even);
+
+ x0 = x1;
+
+ } while (++j < ncoefvec);
+ y[op++] = (vector signed short) vec_perm(y_even, y_odd, INTERLEAVE);
+
+ }
+
+ return op*8;
+}
diff --git a/gcc/testsuite/gcc.dg/vmx/ops-long-1.c b/gcc/testsuite/gcc.dg/vmx/ops-long-1.c
new file mode 100644
index 0000000000..5471706790
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/ops-long-1.c
@@ -0,0 +1,80 @@
+/* { dg-do compile } */
+
+/* Checks from the original ops.c that pass pointers to long or
+ unsigned long for operations that support that in released versions
+ of <altivec.h>. */
+
+#include <altivec.h>
+#include <stdlib.h>
+extern int *var_int;
+extern long * *var_long_ptr;
+extern unsigned long * *var_unsigned_long_ptr;
+extern vector signed int * *var_vec_s32_ptr;
+extern vector signed int *var_vec_s32;
+extern vector unsigned char * *var_vec_u8_ptr;
+extern vector unsigned char *var_vec_u8;
+extern vector unsigned int * *var_vec_u32_ptr;
+extern vector unsigned int *var_vec_u32;
+
+void f13() {
+ *var_vec_s32++ = vec_ld(var_int[0], var_long_ptr[1]);
+ *var_vec_s32++ = vec_lde(var_int[0], var_long_ptr[1]);
+ *var_vec_s32++ = vec_ldl(var_int[0], var_long_ptr[1]);
+ *var_vec_s32++ = vec_lvewx(var_int[0], var_long_ptr[1]);
+ *var_vec_s32++ = vec_lvx(var_int[0], var_long_ptr[1]);
+ *var_vec_s32++ = vec_lvxl(var_int[0], var_long_ptr[1]);
+}
+void f22() {
+ *var_vec_u32++ = vec_ld(var_int[0], var_unsigned_long_ptr[1]);
+ *var_vec_u32++ = vec_lde(var_int[0], var_unsigned_long_ptr[1]);
+ *var_vec_u32++ = vec_ldl(var_int[0], var_unsigned_long_ptr[1]);
+ *var_vec_u32++ = vec_lvewx(var_int[0], var_unsigned_long_ptr[1]);
+ *var_vec_u32++ = vec_lvx(var_int[0], var_unsigned_long_ptr[1]);
+ *var_vec_u32++ = vec_lvxl(var_int[0], var_unsigned_long_ptr[1]);
+}
+void f25() {
+ *var_vec_u8++ = vec_lvsl(var_int[0], var_long_ptr[1]);
+ *var_vec_u8++ = vec_lvsl(var_int[0], var_unsigned_long_ptr[1]);
+ *var_vec_u8++ = vec_lvsr(var_int[0], var_long_ptr[1]);
+ *var_vec_u8++ = vec_lvsr(var_int[0], var_unsigned_long_ptr[1]);
+}
+void f33() {
+ vec_dst(var_long_ptr[0], var_int[1], 0);
+ vec_dst(var_long_ptr[0], var_int[1], 1);
+ vec_dst(var_long_ptr[0], var_int[1], 2);
+ vec_dst(var_long_ptr[0], var_int[1], 3);
+ vec_dst(var_unsigned_long_ptr[0], var_int[1], 0);
+ vec_dst(var_unsigned_long_ptr[0], var_int[1], 1);
+ vec_dst(var_unsigned_long_ptr[0], var_int[1], 2);
+ vec_dst(var_unsigned_long_ptr[0], var_int[1], 3);
+}
+void f34() {
+ vec_dstst(var_long_ptr[0], var_int[1], 0);
+ vec_dstst(var_long_ptr[0], var_int[1], 1);
+ vec_dstst(var_long_ptr[0], var_int[1], 2);
+ vec_dstst(var_long_ptr[0], var_int[1], 3);
+ vec_dstst(var_unsigned_long_ptr[0], var_int[1], 0);
+ vec_dstst(var_unsigned_long_ptr[0], var_int[1], 1);
+ vec_dstst(var_unsigned_long_ptr[0], var_int[1], 2);
+ vec_dstst(var_unsigned_long_ptr[0], var_int[1], 3);
+}
+void f35() {
+ vec_dststt(var_long_ptr[0], var_int[1], 0);
+ vec_dststt(var_long_ptr[0], var_int[1], 1);
+ vec_dststt(var_long_ptr[0], var_int[1], 2);
+ vec_dststt(var_long_ptr[0], var_int[1], 3);
+ vec_dststt(var_unsigned_long_ptr[0], var_int[1], 0);
+ vec_dststt(var_unsigned_long_ptr[0], var_int[1], 1);
+ vec_dststt(var_unsigned_long_ptr[0], var_int[1], 2);
+ vec_dststt(var_unsigned_long_ptr[0], var_int[1], 3);
+ vec_dstt(var_long_ptr[0], var_int[1], 0);
+ vec_dstt(var_long_ptr[0], var_int[1], 1);
+ vec_dstt(var_long_ptr[0], var_int[1], 2);
+ vec_dstt(var_long_ptr[0], var_int[1], 3);
+}
+void f36() {
+ vec_dstt(var_unsigned_long_ptr[0], var_int[1], 0);
+ vec_dstt(var_unsigned_long_ptr[0], var_int[1], 1);
+ vec_dstt(var_unsigned_long_ptr[0], var_int[1], 2);
+ vec_dstt(var_unsigned_long_ptr[0], var_int[1], 3);
+}
diff --git a/gcc/testsuite/gcc.dg/vmx/ops-long-2.c b/gcc/testsuite/gcc.dg/vmx/ops-long-2.c
new file mode 100644
index 0000000000..9cb6721065
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/ops-long-2.c
@@ -0,0 +1,34 @@
+/* { dg-do compile } */
+
+/* Checks from the original ops.c that pass pointers to long or
+ unsigned long to operations that do not support that in released
+ versions of altivec.h. */
+
+#include <altivec.h>
+#include <stdlib.h>
+extern int *var_int;
+extern long * *var_long_ptr;
+extern unsigned long * *var_unsigned_long_ptr;
+extern vector signed int * *var_vec_s32_ptr;
+extern vector signed int *var_vec_s32;
+extern vector unsigned char * *var_vec_u8_ptr;
+extern vector unsigned char *var_vec_u8;
+extern vector unsigned int * *var_vec_u32_ptr;
+extern vector unsigned int *var_vec_u32;
+
+void f36() {
+ vec_st(var_vec_s32[0], var_int[1], var_long_ptr[2]); /* { dg-error "invalid parameter combination" } */
+ vec_st(var_vec_u32[0], var_int[1], var_unsigned_long_ptr[2]); /* { dg-error "invalid parameter combination" } */
+}
+void f37() {
+ vec_ste(var_vec_s32[0], var_int[1], var_long_ptr[2]); /* { dg-error "invalid parameter combination" } */
+ vec_ste(var_vec_u32[0], var_int[1], var_unsigned_long_ptr[2]); /* { dg-error "invalid parameter combination" } */
+ vec_stl(var_vec_s32[0], var_int[1], var_long_ptr[2]); /* { dg-error "invalid parameter combination" } */
+ vec_stl(var_vec_u32[0], var_int[1], var_unsigned_long_ptr[2]); /* { dg-error "invalid parameter combination" } */
+ vec_stvewx(var_vec_s32[0], var_int[1], var_long_ptr[2]); /* { dg-error "invalid parameter combination" } */
+ vec_stvewx(var_vec_u32[0], var_int[1], var_unsigned_long_ptr[2]); /* { dg-error "invalid parameter combination" } */
+ vec_stvx(var_vec_s32[0], var_int[1], var_long_ptr[2]); /* { dg-error "invalid parameter combination" } */
+ vec_stvx(var_vec_u32[0], var_int[1], var_unsigned_long_ptr[2]); /* { dg-error "invalid parameter combination" } */
+ vec_stvxl(var_vec_s32[0], var_int[1], var_long_ptr[2]); /* { dg-error "invalid parameter combination" } */
+ vec_stvxl(var_vec_u32[0], var_int[1], var_unsigned_long_ptr[2]); /* { dg-error "invalid parameter combination" } */
+}
diff --git a/gcc/testsuite/gcc.dg/vmx/ops.c b/gcc/testsuite/gcc.dg/vmx/ops.c
new file mode 100644
index 0000000000..b39ad1d656
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/ops.c
@@ -0,0 +1,3831 @@
+/* { dg-do compile } */
+#include <altivec.h>
+#include <stdlib.h>
+extern char * *var_char_ptr;
+extern float * *var_float_ptr;
+extern int * *var_int_ptr;
+extern int *var_cc24f;
+extern int *var_cc24fd;
+extern int *var_cc24fr;
+extern int *var_cc24t;
+extern int *var_cc24td;
+extern int *var_cc24tr;
+extern int *var_cc26f;
+extern int *var_cc26fd;
+extern int *var_cc26fr;
+extern int *var_cc26t;
+extern int *var_cc26td;
+extern int *var_cc26tr;
+extern int *var_int;
+extern short * *var_short_ptr;
+extern signed char * *var_signed_char_ptr;
+extern unsigned char * *var_unsigned_char_ptr;
+extern unsigned int * *var_unsigned_int_ptr;
+extern unsigned short * *var_unsigned_short_ptr;
+extern vector bool char * *var_vec_b8_ptr;
+extern vector bool char *var_vec_b8;
+extern vector bool int * *var_vec_b32_ptr;
+extern vector bool int *var_vec_b32;
+extern vector bool short * *var_vec_b16_ptr;
+extern vector bool short *var_vec_b16;
+extern vector float * *var_vec_f32_ptr;
+extern vector float *var_vec_f32;
+extern vector pixel * *var_vec_p16_ptr;
+extern vector pixel *var_vec_p16;
+extern vector signed char * *var_vec_s8_ptr;
+extern vector signed char *var_vec_s8;
+extern vector signed int * *var_vec_s32_ptr;
+extern vector signed int *var_vec_s32;
+extern vector signed short * *var_vec_s16_ptr;
+extern vector signed short *var_vec_s16;
+extern vector unsigned char * *var_vec_u8_ptr;
+extern vector unsigned char *var_vec_u8;
+extern vector unsigned int * *var_vec_u32_ptr;
+extern vector unsigned int *var_vec_u32;
+extern vector unsigned short * *var_vec_u16_ptr;
+extern vector unsigned short *var_vec_u16;
+extern vector unsigned short *var_volatile_vec_u16;
+void f0() {
+ *var_cc24f++ = vec_any_le(var_vec_b16[0], var_vec_s16[1]);
+ *var_cc24f++ = vec_any_le(var_vec_b16[0], var_vec_u16[1]);
+ *var_cc24f++ = vec_any_le(var_vec_b32[0], var_vec_s32[1]);
+ *var_cc24f++ = vec_any_le(var_vec_b32[0], var_vec_u32[1]);
+ *var_cc24f++ = vec_any_le(var_vec_b8[0], var_vec_s8[1]);
+ *var_cc24f++ = vec_any_le(var_vec_b8[0], var_vec_u8[1]);
+ *var_cc24f++ = vec_any_le(var_vec_s16[0], var_vec_b16[1]);
+ *var_cc24f++ = vec_any_le(var_vec_s16[0], var_vec_s16[1]);
+ *var_cc24f++ = vec_any_le(var_vec_s32[0], var_vec_b32[1]);
+ *var_cc24f++ = vec_any_le(var_vec_s32[0], var_vec_s32[1]);
+ *var_cc24f++ = vec_any_le(var_vec_s8[0], var_vec_b8[1]);
+ *var_cc24f++ = vec_any_le(var_vec_s8[0], var_vec_s8[1]);
+ *var_cc24f++ = vec_any_le(var_vec_u16[0], var_vec_b16[1]);
+ *var_cc24f++ = vec_any_le(var_vec_u16[0], var_vec_u16[1]);
+ *var_cc24f++ = vec_any_le(var_vec_u32[0], var_vec_b32[1]);
+ *var_cc24f++ = vec_any_le(var_vec_u32[0], var_vec_u32[1]);
+ *var_cc24f++ = vec_any_le(var_vec_u8[0], var_vec_b8[1]);
+ *var_cc24f++ = vec_any_le(var_vec_u8[0], var_vec_u8[1]);
+ *var_cc24f++ = vec_any_ne(var_vec_b16[0], var_vec_b16[1]);
+ *var_cc24f++ = vec_any_ne(var_vec_b16[0], var_vec_s16[1]);
+ *var_cc24f++ = vec_any_ne(var_vec_b16[0], var_vec_u16[1]);
+ *var_cc24f++ = vec_any_ne(var_vec_b32[0], var_vec_b32[1]);
+ *var_cc24f++ = vec_any_ne(var_vec_b32[0], var_vec_s32[1]);
+ *var_cc24f++ = vec_any_ne(var_vec_b32[0], var_vec_u32[1]);
+ *var_cc24f++ = vec_any_ne(var_vec_b8[0], var_vec_b8[1]);
+ *var_cc24f++ = vec_any_ne(var_vec_b8[0], var_vec_s8[1]);
+ *var_cc24f++ = vec_any_ne(var_vec_b8[0], var_vec_u8[1]);
+ *var_cc24f++ = vec_any_ne(var_vec_f32[0], var_vec_f32[1]);
+ *var_cc24f++ = vec_any_ne(var_vec_p16[0], var_vec_p16[1]);
+ *var_cc24f++ = vec_any_ne(var_vec_s16[0], var_vec_b16[1]);
+ *var_cc24f++ = vec_any_ne(var_vec_s16[0], var_vec_s16[1]);
+ *var_cc24f++ = vec_any_ne(var_vec_s32[0], var_vec_b32[1]);
+ *var_cc24f++ = vec_any_ne(var_vec_s32[0], var_vec_s32[1]);
+ *var_cc24f++ = vec_any_ne(var_vec_s8[0], var_vec_b8[1]);
+ *var_cc24f++ = vec_any_ne(var_vec_s8[0], var_vec_s8[1]);
+ *var_cc24f++ = vec_any_ne(var_vec_u16[0], var_vec_b16[1]);
+ *var_cc24f++ = vec_any_ne(var_vec_u16[0], var_vec_u16[1]);
+ *var_cc24f++ = vec_any_ne(var_vec_u32[0], var_vec_b32[1]);
+ *var_cc24f++ = vec_any_ne(var_vec_u32[0], var_vec_u32[1]);
+ *var_cc24f++ = vec_any_ne(var_vec_u8[0], var_vec_b8[1]);
+ *var_cc24f++ = vec_any_ne(var_vec_u8[0], var_vec_u8[1]);
+ *var_cc24f++ = vec_any_nge(var_vec_f32[0], var_vec_f32[1]);
+ *var_cc24f++ = vec_any_ngt(var_vec_f32[0], var_vec_f32[1]);
+ *var_cc24fd++ = vec_any_nan(var_vec_f32[0]);
+ *var_cc24fr++ = vec_any_ge(var_vec_b16[0], var_vec_s16[1]);
+ *var_cc24fr++ = vec_any_ge(var_vec_b16[0], var_vec_u16[1]);
+ *var_cc24fr++ = vec_any_ge(var_vec_b32[0], var_vec_s32[1]);
+ *var_cc24fr++ = vec_any_ge(var_vec_b32[0], var_vec_u32[1]);
+ *var_cc24fr++ = vec_any_ge(var_vec_b8[0], var_vec_s8[1]);
+ *var_cc24fr++ = vec_any_ge(var_vec_b8[0], var_vec_u8[1]);
+ *var_cc24fr++ = vec_any_ge(var_vec_s16[0], var_vec_b16[1]);
+ *var_cc24fr++ = vec_any_ge(var_vec_s16[0], var_vec_s16[1]);
+ *var_cc24fr++ = vec_any_ge(var_vec_s32[0], var_vec_b32[1]);
+ *var_cc24fr++ = vec_any_ge(var_vec_s32[0], var_vec_s32[1]);
+ *var_cc24fr++ = vec_any_ge(var_vec_s8[0], var_vec_b8[1]);
+ *var_cc24fr++ = vec_any_ge(var_vec_s8[0], var_vec_s8[1]);
+ *var_cc24fr++ = vec_any_ge(var_vec_u16[0], var_vec_b16[1]);
+ *var_cc24fr++ = vec_any_ge(var_vec_u16[0], var_vec_u16[1]);
+ *var_cc24fr++ = vec_any_ge(var_vec_u32[0], var_vec_b32[1]);
+ *var_cc24fr++ = vec_any_ge(var_vec_u32[0], var_vec_u32[1]);
+ *var_cc24fr++ = vec_any_ge(var_vec_u8[0], var_vec_b8[1]);
+ *var_cc24fr++ = vec_any_ge(var_vec_u8[0], var_vec_u8[1]);
+ *var_cc24fr++ = vec_any_nle(var_vec_f32[0], var_vec_f32[1]);
+ *var_cc24fr++ = vec_any_nlt(var_vec_f32[0], var_vec_f32[1]);
+ *var_cc24t++ = vec_all_eq(var_vec_b16[0], var_vec_b16[1]);
+ *var_cc24t++ = vec_all_eq(var_vec_b16[0], var_vec_s16[1]);
+ *var_cc24t++ = vec_all_eq(var_vec_b16[0], var_vec_u16[1]);
+ *var_cc24t++ = vec_all_eq(var_vec_b32[0], var_vec_b32[1]);
+ *var_cc24t++ = vec_all_eq(var_vec_b32[0], var_vec_s32[1]);
+ *var_cc24t++ = vec_all_eq(var_vec_b32[0], var_vec_u32[1]);
+ *var_cc24t++ = vec_all_eq(var_vec_b8[0], var_vec_b8[1]);
+ *var_cc24t++ = vec_all_eq(var_vec_b8[0], var_vec_s8[1]);
+ *var_cc24t++ = vec_all_eq(var_vec_b8[0], var_vec_u8[1]);
+ *var_cc24t++ = vec_all_eq(var_vec_f32[0], var_vec_f32[1]);
+ *var_cc24t++ = vec_all_eq(var_vec_p16[0], var_vec_p16[1]);
+ *var_cc24t++ = vec_all_eq(var_vec_s16[0], var_vec_b16[1]);
+ *var_cc24t++ = vec_all_eq(var_vec_s16[0], var_vec_s16[1]);
+ *var_cc24t++ = vec_all_eq(var_vec_s32[0], var_vec_b32[1]);
+ *var_cc24t++ = vec_all_eq(var_vec_s32[0], var_vec_s32[1]);
+ *var_cc24t++ = vec_all_eq(var_vec_s8[0], var_vec_b8[1]);
+ *var_cc24t++ = vec_all_eq(var_vec_s8[0], var_vec_s8[1]);
+ *var_cc24t++ = vec_all_eq(var_vec_u16[0], var_vec_b16[1]);
+ *var_cc24t++ = vec_all_eq(var_vec_u16[0], var_vec_u16[1]);
+ *var_cc24t++ = vec_all_eq(var_vec_u32[0], var_vec_b32[1]);
+ *var_cc24t++ = vec_all_eq(var_vec_u32[0], var_vec_u32[1]);
+ *var_cc24t++ = vec_all_eq(var_vec_u8[0], var_vec_b8[1]);
+ *var_cc24t++ = vec_all_eq(var_vec_u8[0], var_vec_u8[1]);
+ *var_cc24t++ = vec_all_ge(var_vec_f32[0], var_vec_f32[1]);
+ *var_cc24t++ = vec_all_gt(var_vec_b16[0], var_vec_s16[1]);
+ *var_cc24t++ = vec_all_gt(var_vec_b16[0], var_vec_u16[1]);
+ *var_cc24t++ = vec_all_gt(var_vec_b32[0], var_vec_s32[1]);
+ *var_cc24t++ = vec_all_gt(var_vec_b32[0], var_vec_u32[1]);
+ *var_cc24t++ = vec_all_gt(var_vec_b8[0], var_vec_s8[1]);
+ *var_cc24t++ = vec_all_gt(var_vec_b8[0], var_vec_u8[1]);
+ *var_cc24t++ = vec_all_gt(var_vec_f32[0], var_vec_f32[1]);
+ *var_cc24t++ = vec_all_gt(var_vec_s16[0], var_vec_b16[1]);
+ *var_cc24t++ = vec_all_gt(var_vec_s16[0], var_vec_s16[1]);
+ *var_cc24t++ = vec_all_gt(var_vec_s32[0], var_vec_b32[1]);
+ *var_cc24t++ = vec_all_gt(var_vec_s32[0], var_vec_s32[1]);
+ *var_cc24t++ = vec_all_gt(var_vec_s8[0], var_vec_b8[1]);
+}
+void f1() {
+ *var_cc24t++ = vec_all_gt(var_vec_s8[0], var_vec_s8[1]);
+ *var_cc24t++ = vec_all_gt(var_vec_u16[0], var_vec_b16[1]);
+ *var_cc24t++ = vec_all_gt(var_vec_u16[0], var_vec_u16[1]);
+ *var_cc24t++ = vec_all_gt(var_vec_u32[0], var_vec_b32[1]);
+ *var_cc24t++ = vec_all_gt(var_vec_u32[0], var_vec_u32[1]);
+ *var_cc24t++ = vec_all_gt(var_vec_u8[0], var_vec_b8[1]);
+ *var_cc24t++ = vec_all_gt(var_vec_u8[0], var_vec_u8[1]);
+ *var_cc24td++ = vec_all_numeric(var_vec_f32[0]);
+ *var_cc24tr++ = vec_all_le(var_vec_f32[0], var_vec_f32[1]);
+ *var_cc24tr++ = vec_all_lt(var_vec_b16[0], var_vec_s16[1]);
+ *var_cc24tr++ = vec_all_lt(var_vec_b16[0], var_vec_u16[1]);
+ *var_cc24tr++ = vec_all_lt(var_vec_b32[0], var_vec_s32[1]);
+ *var_cc24tr++ = vec_all_lt(var_vec_b32[0], var_vec_u32[1]);
+ *var_cc24tr++ = vec_all_lt(var_vec_b8[0], var_vec_s8[1]);
+ *var_cc24tr++ = vec_all_lt(var_vec_b8[0], var_vec_u8[1]);
+ *var_cc24tr++ = vec_all_lt(var_vec_f32[0], var_vec_f32[1]);
+ *var_cc24tr++ = vec_all_lt(var_vec_s16[0], var_vec_b16[1]);
+ *var_cc24tr++ = vec_all_lt(var_vec_s16[0], var_vec_s16[1]);
+ *var_cc24tr++ = vec_all_lt(var_vec_s32[0], var_vec_b32[1]);
+ *var_cc24tr++ = vec_all_lt(var_vec_s32[0], var_vec_s32[1]);
+ *var_cc24tr++ = vec_all_lt(var_vec_s8[0], var_vec_b8[1]);
+ *var_cc24tr++ = vec_all_lt(var_vec_s8[0], var_vec_s8[1]);
+ *var_cc24tr++ = vec_all_lt(var_vec_u16[0], var_vec_b16[1]);
+ *var_cc24tr++ = vec_all_lt(var_vec_u16[0], var_vec_u16[1]);
+ *var_cc24tr++ = vec_all_lt(var_vec_u32[0], var_vec_b32[1]);
+ *var_cc24tr++ = vec_all_lt(var_vec_u32[0], var_vec_u32[1]);
+ *var_cc24tr++ = vec_all_lt(var_vec_u8[0], var_vec_b8[1]);
+ *var_cc24tr++ = vec_all_lt(var_vec_u8[0], var_vec_u8[1]);
+ *var_cc26f++ = vec_any_eq(var_vec_b16[0], var_vec_b16[1]);
+ *var_cc26f++ = vec_any_eq(var_vec_b16[0], var_vec_s16[1]);
+ *var_cc26f++ = vec_any_eq(var_vec_b16[0], var_vec_u16[1]);
+ *var_cc26f++ = vec_any_eq(var_vec_b32[0], var_vec_b32[1]);
+ *var_cc26f++ = vec_any_eq(var_vec_b32[0], var_vec_s32[1]);
+ *var_cc26f++ = vec_any_eq(var_vec_b32[0], var_vec_u32[1]);
+ *var_cc26f++ = vec_any_eq(var_vec_b8[0], var_vec_b8[1]);
+ *var_cc26f++ = vec_any_eq(var_vec_b8[0], var_vec_s8[1]);
+ *var_cc26f++ = vec_any_eq(var_vec_b8[0], var_vec_u8[1]);
+ *var_cc26f++ = vec_any_eq(var_vec_f32[0], var_vec_f32[1]);
+ *var_cc26f++ = vec_any_eq(var_vec_p16[0], var_vec_p16[1]);
+ *var_cc26f++ = vec_any_eq(var_vec_s16[0], var_vec_b16[1]);
+ *var_cc26f++ = vec_any_eq(var_vec_s16[0], var_vec_s16[1]);
+ *var_cc26f++ = vec_any_eq(var_vec_s32[0], var_vec_b32[1]);
+ *var_cc26f++ = vec_any_eq(var_vec_s32[0], var_vec_s32[1]);
+ *var_cc26f++ = vec_any_eq(var_vec_s8[0], var_vec_b8[1]);
+ *var_cc26f++ = vec_any_eq(var_vec_s8[0], var_vec_s8[1]);
+ *var_cc26f++ = vec_any_eq(var_vec_u16[0], var_vec_b16[1]);
+ *var_cc26f++ = vec_any_eq(var_vec_u16[0], var_vec_u16[1]);
+ *var_cc26f++ = vec_any_eq(var_vec_u32[0], var_vec_b32[1]);
+ *var_cc26f++ = vec_any_eq(var_vec_u32[0], var_vec_u32[1]);
+ *var_cc26f++ = vec_any_eq(var_vec_u8[0], var_vec_b8[1]);
+ *var_cc26f++ = vec_any_eq(var_vec_u8[0], var_vec_u8[1]);
+ *var_cc26f++ = vec_any_ge(var_vec_f32[0], var_vec_f32[1]);
+ *var_cc26f++ = vec_any_gt(var_vec_b16[0], var_vec_s16[1]);
+ *var_cc26f++ = vec_any_gt(var_vec_b16[0], var_vec_u16[1]);
+ *var_cc26f++ = vec_any_gt(var_vec_b32[0], var_vec_s32[1]);
+ *var_cc26f++ = vec_any_gt(var_vec_b32[0], var_vec_u32[1]);
+ *var_cc26f++ = vec_any_gt(var_vec_b8[0], var_vec_s8[1]);
+ *var_cc26f++ = vec_any_gt(var_vec_b8[0], var_vec_u8[1]);
+ *var_cc26f++ = vec_any_gt(var_vec_f32[0], var_vec_f32[1]);
+ *var_cc26f++ = vec_any_gt(var_vec_s16[0], var_vec_b16[1]);
+ *var_cc26f++ = vec_any_gt(var_vec_s16[0], var_vec_s16[1]);
+ *var_cc26f++ = vec_any_gt(var_vec_s32[0], var_vec_b32[1]);
+ *var_cc26f++ = vec_any_gt(var_vec_s32[0], var_vec_s32[1]);
+ *var_cc26f++ = vec_any_gt(var_vec_s8[0], var_vec_b8[1]);
+ *var_cc26f++ = vec_any_gt(var_vec_s8[0], var_vec_s8[1]);
+ *var_cc26f++ = vec_any_gt(var_vec_u16[0], var_vec_b16[1]);
+ *var_cc26f++ = vec_any_gt(var_vec_u16[0], var_vec_u16[1]);
+ *var_cc26f++ = vec_any_gt(var_vec_u32[0], var_vec_b32[1]);
+ *var_cc26f++ = vec_any_gt(var_vec_u32[0], var_vec_u32[1]);
+ *var_cc26f++ = vec_any_gt(var_vec_u8[0], var_vec_b8[1]);
+ *var_cc26f++ = vec_any_gt(var_vec_u8[0], var_vec_u8[1]);
+ *var_cc26f++ = vec_any_out(var_vec_f32[0], var_vec_f32[1]);
+ *var_cc26fd++ = vec_any_numeric(var_vec_f32[0]);
+ *var_cc26fr++ = vec_any_le(var_vec_f32[0], var_vec_f32[1]);
+ *var_cc26fr++ = vec_any_lt(var_vec_b16[0], var_vec_s16[1]);
+ *var_cc26fr++ = vec_any_lt(var_vec_b16[0], var_vec_u16[1]);
+ *var_cc26fr++ = vec_any_lt(var_vec_b32[0], var_vec_s32[1]);
+ *var_cc26fr++ = vec_any_lt(var_vec_b32[0], var_vec_u32[1]);
+ *var_cc26fr++ = vec_any_lt(var_vec_b8[0], var_vec_s8[1]);
+ *var_cc26fr++ = vec_any_lt(var_vec_b8[0], var_vec_u8[1]);
+ *var_cc26fr++ = vec_any_lt(var_vec_f32[0], var_vec_f32[1]);
+ *var_cc26fr++ = vec_any_lt(var_vec_s16[0], var_vec_b16[1]);
+ *var_cc26fr++ = vec_any_lt(var_vec_s16[0], var_vec_s16[1]);
+ *var_cc26fr++ = vec_any_lt(var_vec_s32[0], var_vec_b32[1]);
+ *var_cc26fr++ = vec_any_lt(var_vec_s32[0], var_vec_s32[1]);
+ *var_cc26fr++ = vec_any_lt(var_vec_s8[0], var_vec_b8[1]);
+ *var_cc26fr++ = vec_any_lt(var_vec_s8[0], var_vec_s8[1]);
+ *var_cc26fr++ = vec_any_lt(var_vec_u16[0], var_vec_b16[1]);
+ *var_cc26fr++ = vec_any_lt(var_vec_u16[0], var_vec_u16[1]);
+ *var_cc26fr++ = vec_any_lt(var_vec_u32[0], var_vec_b32[1]);
+ *var_cc26fr++ = vec_any_lt(var_vec_u32[0], var_vec_u32[1]);
+ *var_cc26fr++ = vec_any_lt(var_vec_u8[0], var_vec_b8[1]);
+ *var_cc26fr++ = vec_any_lt(var_vec_u8[0], var_vec_u8[1]);
+ *var_cc26t++ = vec_all_in(var_vec_f32[0], var_vec_f32[1]);
+ *var_cc26t++ = vec_all_le(var_vec_b16[0], var_vec_s16[1]);
+ *var_cc26t++ = vec_all_le(var_vec_b16[0], var_vec_u16[1]);
+ *var_cc26t++ = vec_all_le(var_vec_b32[0], var_vec_s32[1]);
+ *var_cc26t++ = vec_all_le(var_vec_b32[0], var_vec_u32[1]);
+ *var_cc26t++ = vec_all_le(var_vec_b8[0], var_vec_s8[1]);
+ *var_cc26t++ = vec_all_le(var_vec_b8[0], var_vec_u8[1]);
+}
+void f2() {
+ *var_cc26t++ = vec_all_le(var_vec_s16[0], var_vec_b16[1]);
+ *var_cc26t++ = vec_all_le(var_vec_s16[0], var_vec_s16[1]);
+ *var_cc26t++ = vec_all_le(var_vec_s32[0], var_vec_b32[1]);
+ *var_cc26t++ = vec_all_le(var_vec_s32[0], var_vec_s32[1]);
+ *var_cc26t++ = vec_all_le(var_vec_s8[0], var_vec_b8[1]);
+ *var_cc26t++ = vec_all_le(var_vec_s8[0], var_vec_s8[1]);
+ *var_cc26t++ = vec_all_le(var_vec_u16[0], var_vec_b16[1]);
+ *var_cc26t++ = vec_all_le(var_vec_u16[0], var_vec_u16[1]);
+ *var_cc26t++ = vec_all_le(var_vec_u32[0], var_vec_b32[1]);
+ *var_cc26t++ = vec_all_le(var_vec_u32[0], var_vec_u32[1]);
+ *var_cc26t++ = vec_all_le(var_vec_u8[0], var_vec_b8[1]);
+ *var_cc26t++ = vec_all_le(var_vec_u8[0], var_vec_u8[1]);
+ *var_cc26t++ = vec_all_ne(var_vec_b16[0], var_vec_b16[1]);
+ *var_cc26t++ = vec_all_ne(var_vec_b16[0], var_vec_s16[1]);
+ *var_cc26t++ = vec_all_ne(var_vec_b16[0], var_vec_u16[1]);
+ *var_cc26t++ = vec_all_ne(var_vec_b32[0], var_vec_b32[1]);
+ *var_cc26t++ = vec_all_ne(var_vec_b32[0], var_vec_s32[1]);
+ *var_cc26t++ = vec_all_ne(var_vec_b32[0], var_vec_u32[1]);
+ *var_cc26t++ = vec_all_ne(var_vec_b8[0], var_vec_b8[1]);
+ *var_cc26t++ = vec_all_ne(var_vec_b8[0], var_vec_s8[1]);
+ *var_cc26t++ = vec_all_ne(var_vec_b8[0], var_vec_u8[1]);
+ *var_cc26t++ = vec_all_ne(var_vec_f32[0], var_vec_f32[1]);
+ *var_cc26t++ = vec_all_ne(var_vec_p16[0], var_vec_p16[1]);
+ *var_cc26t++ = vec_all_ne(var_vec_s16[0], var_vec_b16[1]);
+ *var_cc26t++ = vec_all_ne(var_vec_s16[0], var_vec_s16[1]);
+ *var_cc26t++ = vec_all_ne(var_vec_s32[0], var_vec_b32[1]);
+ *var_cc26t++ = vec_all_ne(var_vec_s32[0], var_vec_s32[1]);
+ *var_cc26t++ = vec_all_ne(var_vec_s8[0], var_vec_b8[1]);
+ *var_cc26t++ = vec_all_ne(var_vec_s8[0], var_vec_s8[1]);
+ *var_cc26t++ = vec_all_ne(var_vec_u16[0], var_vec_b16[1]);
+ *var_cc26t++ = vec_all_ne(var_vec_u16[0], var_vec_u16[1]);
+ *var_cc26t++ = vec_all_ne(var_vec_u32[0], var_vec_b32[1]);
+ *var_cc26t++ = vec_all_ne(var_vec_u32[0], var_vec_u32[1]);
+ *var_cc26t++ = vec_all_ne(var_vec_u8[0], var_vec_b8[1]);
+ *var_cc26t++ = vec_all_ne(var_vec_u8[0], var_vec_u8[1]);
+ *var_cc26t++ = vec_all_nge(var_vec_f32[0], var_vec_f32[1]);
+ *var_cc26t++ = vec_all_ngt(var_vec_f32[0], var_vec_f32[1]);
+ *var_cc26td++ = vec_all_nan(var_vec_f32[0]);
+ *var_cc26tr++ = vec_all_ge(var_vec_b16[0], var_vec_s16[1]);
+ *var_cc26tr++ = vec_all_ge(var_vec_b16[0], var_vec_u16[1]);
+ *var_cc26tr++ = vec_all_ge(var_vec_b32[0], var_vec_s32[1]);
+ *var_cc26tr++ = vec_all_ge(var_vec_b32[0], var_vec_u32[1]);
+ *var_cc26tr++ = vec_all_ge(var_vec_b8[0], var_vec_s8[1]);
+ *var_cc26tr++ = vec_all_ge(var_vec_b8[0], var_vec_u8[1]);
+ *var_cc26tr++ = vec_all_ge(var_vec_s16[0], var_vec_b16[1]);
+ *var_cc26tr++ = vec_all_ge(var_vec_s16[0], var_vec_s16[1]);
+ *var_cc26tr++ = vec_all_ge(var_vec_s32[0], var_vec_b32[1]);
+ *var_cc26tr++ = vec_all_ge(var_vec_s32[0], var_vec_s32[1]);
+ *var_cc26tr++ = vec_all_ge(var_vec_s8[0], var_vec_b8[1]);
+ *var_cc26tr++ = vec_all_ge(var_vec_s8[0], var_vec_s8[1]);
+ *var_cc26tr++ = vec_all_ge(var_vec_u16[0], var_vec_b16[1]);
+ *var_cc26tr++ = vec_all_ge(var_vec_u16[0], var_vec_u16[1]);
+ *var_cc26tr++ = vec_all_ge(var_vec_u32[0], var_vec_b32[1]);
+ *var_cc26tr++ = vec_all_ge(var_vec_u32[0], var_vec_u32[1]);
+ *var_cc26tr++ = vec_all_ge(var_vec_u8[0], var_vec_b8[1]);
+ *var_cc26tr++ = vec_all_ge(var_vec_u8[0], var_vec_u8[1]);
+ *var_cc26tr++ = vec_all_nle(var_vec_f32[0], var_vec_f32[1]);
+ *var_cc26tr++ = vec_all_nlt(var_vec_f32[0], var_vec_f32[1]);
+ *var_vec_b16++ = vec_and(var_vec_b16[0], var_vec_b16[1]);
+ *var_vec_b16++ = vec_andc(var_vec_b16[0], var_vec_b16[1]);
+ *var_vec_b16++ = vec_cmpeq(var_vec_s16[0], var_vec_s16[1]);
+ *var_vec_b16++ = vec_cmpeq(var_vec_u16[0], var_vec_u16[1]);
+ *var_vec_b16++ = vec_cmpgt(var_vec_s16[0], var_vec_s16[1]);
+ *var_vec_b16++ = vec_cmpgt(var_vec_u16[0], var_vec_u16[1]);
+ *var_vec_b16++ = vec_ld(var_int[0], var_vec_b16_ptr[1]);
+ *var_vec_b16++ = vec_ldl(var_int[0], var_vec_b16_ptr[1]);
+ *var_vec_b16++ = vec_lvx(var_int[0], var_vec_b16_ptr[1]);
+ *var_vec_b16++ = vec_lvxl(var_int[0], var_vec_b16_ptr[1]);
+ *var_vec_b16++ = vec_mergeh(var_vec_b16[0], var_vec_b16[1]);
+ *var_vec_b16++ = vec_mergel(var_vec_b16[0], var_vec_b16[1]);
+ *var_vec_b16++ = vec_nor(var_vec_b16[0], var_vec_b16[1]);
+ *var_vec_b16++ = vec_or(var_vec_b16[0], var_vec_b16[1]);
+ *var_vec_b16++ = vec_pack(var_vec_b32[0], var_vec_b32[1]);
+ *var_vec_b16++ = vec_perm(var_vec_b16[0], var_vec_b16[1], var_vec_u8[2]);
+ *var_vec_b16++ = vec_sel(var_vec_b16[0], var_vec_b16[1], var_vec_b16[2]);
+ *var_vec_b16++ = vec_sel(var_vec_b16[0], var_vec_b16[1], var_vec_u16[2]);
+ *var_vec_b16++ = vec_sll(var_vec_b16[0], var_vec_u16[1]);
+ *var_vec_b16++ = vec_sll(var_vec_b16[0], var_vec_u32[1]);
+ *var_vec_b16++ = vec_sll(var_vec_b16[0], var_vec_u8[1]);
+ *var_vec_b16++ = vec_splat(var_vec_b16[0], 0);
+ *var_vec_b16++ = vec_splat(var_vec_b16[0], 1);
+ *var_vec_b16++ = vec_splat(var_vec_b16[0], 2);
+ *var_vec_b16++ = vec_splat(var_vec_b16[0], 3);
+ *var_vec_b16++ = vec_splat(var_vec_b16[0], 4);
+ *var_vec_b16++ = vec_splat(var_vec_b16[0], 5);
+ *var_vec_b16++ = vec_splat(var_vec_b16[0], 6);
+ *var_vec_b16++ = vec_splat(var_vec_b16[0], 7);
+ *var_vec_b16++ = vec_splat(var_vec_b16[0], 8);
+ *var_vec_b16++ = vec_splat(var_vec_b16[0], 9);
+ *var_vec_b16++ = vec_splat(var_vec_b16[0], 10);
+ *var_vec_b16++ = vec_splat(var_vec_b16[0], 11);
+ *var_vec_b16++ = vec_splat(var_vec_b16[0], 12);
+ *var_vec_b16++ = vec_splat(var_vec_b16[0], 13);
+ *var_vec_b16++ = vec_splat(var_vec_b16[0], 14);
+ *var_vec_b16++ = vec_splat(var_vec_b16[0], 15);
+ *var_vec_b16++ = vec_splat(var_vec_b16[0], 16);
+ *var_vec_b16++ = vec_splat(var_vec_b16[0], 17);
+ *var_vec_b16++ = vec_splat(var_vec_b16[0], 18);
+ *var_vec_b16++ = vec_splat(var_vec_b16[0], 19);
+ *var_vec_b16++ = vec_splat(var_vec_b16[0], 20);
+}
+void f3() {
+ *var_vec_b16++ = vec_splat(var_vec_b16[0], 21);
+ *var_vec_b16++ = vec_splat(var_vec_b16[0], 22);
+ *var_vec_b16++ = vec_splat(var_vec_b16[0], 23);
+ *var_vec_b16++ = vec_splat(var_vec_b16[0], 24);
+ *var_vec_b16++ = vec_splat(var_vec_b16[0], 25);
+ *var_vec_b16++ = vec_splat(var_vec_b16[0], 26);
+ *var_vec_b16++ = vec_splat(var_vec_b16[0], 27);
+ *var_vec_b16++ = vec_splat(var_vec_b16[0], 28);
+ *var_vec_b16++ = vec_splat(var_vec_b16[0], 29);
+ *var_vec_b16++ = vec_splat(var_vec_b16[0], 30);
+ *var_vec_b16++ = vec_splat(var_vec_b16[0], 31);
+ *var_vec_b16++ = vec_srl(var_vec_b16[0], var_vec_u16[1]);
+ *var_vec_b16++ = vec_srl(var_vec_b16[0], var_vec_u32[1]);
+ *var_vec_b16++ = vec_srl(var_vec_b16[0], var_vec_u8[1]);
+ *var_vec_b16++ = vec_unpackh(var_vec_b8[0]);
+ *var_vec_b16++ = vec_unpackl(var_vec_b8[0]);
+ *var_vec_b16++ = vec_vand(var_vec_b16[0], var_vec_b16[1]);
+ *var_vec_b16++ = vec_vandc(var_vec_b16[0], var_vec_b16[1]);
+ *var_vec_b16++ = vec_vcmpequh(var_vec_s16[0], var_vec_s16[1]);
+ *var_vec_b16++ = vec_vcmpequh(var_vec_u16[0], var_vec_u16[1]);
+ *var_vec_b16++ = vec_vcmpgtsh(var_vec_s16[0], var_vec_s16[1]);
+ *var_vec_b16++ = vec_vcmpgtuh(var_vec_u16[0], var_vec_u16[1]);
+ *var_vec_b16++ = vec_vmrghh(var_vec_b16[0], var_vec_b16[1]);
+ *var_vec_b16++ = vec_vmrglh(var_vec_b16[0], var_vec_b16[1]);
+ *var_vec_b16++ = vec_vnor(var_vec_b16[0], var_vec_b16[1]);
+ *var_vec_b16++ = vec_vor(var_vec_b16[0], var_vec_b16[1]);
+ *var_vec_b16++ = vec_vperm(var_vec_b16[0], var_vec_b16[1], var_vec_u8[2]);
+ *var_vec_b16++ = vec_vpkuwum(var_vec_b32[0], var_vec_b32[1]);
+ *var_vec_b16++ = vec_vsel(var_vec_b16[0], var_vec_b16[1], var_vec_b16[2]);
+ *var_vec_b16++ = vec_vsel(var_vec_b16[0], var_vec_b16[1], var_vec_u16[2]);
+ *var_vec_b16++ = vec_vsl(var_vec_b16[0], var_vec_u16[1]);
+ *var_vec_b16++ = vec_vsl(var_vec_b16[0], var_vec_u32[1]);
+ *var_vec_b16++ = vec_vsl(var_vec_b16[0], var_vec_u8[1]);
+ *var_vec_b16++ = vec_vsplth(var_vec_b16[0], 0);
+ *var_vec_b16++ = vec_vsplth(var_vec_b16[0], 1);
+ *var_vec_b16++ = vec_vsplth(var_vec_b16[0], 2);
+ *var_vec_b16++ = vec_vsplth(var_vec_b16[0], 3);
+ *var_vec_b16++ = vec_vsplth(var_vec_b16[0], 4);
+ *var_vec_b16++ = vec_vsplth(var_vec_b16[0], 5);
+ *var_vec_b16++ = vec_vsplth(var_vec_b16[0], 6);
+ *var_vec_b16++ = vec_vsplth(var_vec_b16[0], 7);
+ *var_vec_b16++ = vec_vsplth(var_vec_b16[0], 8);
+ *var_vec_b16++ = vec_vsplth(var_vec_b16[0], 9);
+ *var_vec_b16++ = vec_vsplth(var_vec_b16[0], 10);
+ *var_vec_b16++ = vec_vsplth(var_vec_b16[0], 11);
+ *var_vec_b16++ = vec_vsplth(var_vec_b16[0], 12);
+ *var_vec_b16++ = vec_vsplth(var_vec_b16[0], 13);
+ *var_vec_b16++ = vec_vsplth(var_vec_b16[0], 14);
+ *var_vec_b16++ = vec_vsplth(var_vec_b16[0], 15);
+ *var_vec_b16++ = vec_vsplth(var_vec_b16[0], 16);
+ *var_vec_b16++ = vec_vsplth(var_vec_b16[0], 17);
+ *var_vec_b16++ = vec_vsplth(var_vec_b16[0], 18);
+ *var_vec_b16++ = vec_vsplth(var_vec_b16[0], 19);
+ *var_vec_b16++ = vec_vsplth(var_vec_b16[0], 20);
+ *var_vec_b16++ = vec_vsplth(var_vec_b16[0], 21);
+ *var_vec_b16++ = vec_vsplth(var_vec_b16[0], 22);
+ *var_vec_b16++ = vec_vsplth(var_vec_b16[0], 23);
+ *var_vec_b16++ = vec_vsplth(var_vec_b16[0], 24);
+ *var_vec_b16++ = vec_vsplth(var_vec_b16[0], 25);
+ *var_vec_b16++ = vec_vsplth(var_vec_b16[0], 26);
+ *var_vec_b16++ = vec_vsplth(var_vec_b16[0], 27);
+ *var_vec_b16++ = vec_vsplth(var_vec_b16[0], 28);
+ *var_vec_b16++ = vec_vsplth(var_vec_b16[0], 29);
+ *var_vec_b16++ = vec_vsplth(var_vec_b16[0], 30);
+ *var_vec_b16++ = vec_vsplth(var_vec_b16[0], 31);
+ *var_vec_b16++ = vec_vsr(var_vec_b16[0], var_vec_u16[1]);
+ *var_vec_b16++ = vec_vsr(var_vec_b16[0], var_vec_u32[1]);
+ *var_vec_b16++ = vec_vsr(var_vec_b16[0], var_vec_u8[1]);
+ *var_vec_b16++ = vec_vupkhsb(var_vec_b8[0]);
+ *var_vec_b16++ = vec_vupklsb(var_vec_b8[0]);
+ *var_vec_b16++ = vec_vxor(var_vec_b16[0], var_vec_b16[1]);
+ *var_vec_b16++ = vec_xor(var_vec_b16[0], var_vec_b16[1]);
+ *var_vec_b32++ = vec_and(var_vec_b32[0], var_vec_b32[1]);
+ *var_vec_b32++ = vec_andc(var_vec_b32[0], var_vec_b32[1]);
+ *var_vec_b32++ = vec_cmpeq(var_vec_f32[0], var_vec_f32[1]);
+ *var_vec_b32++ = vec_cmpeq(var_vec_s32[0], var_vec_s32[1]);
+ *var_vec_b32++ = vec_cmpeq(var_vec_u32[0], var_vec_u32[1]);
+ *var_vec_b32++ = vec_cmpge(var_vec_f32[0], var_vec_f32[1]);
+ *var_vec_b32++ = vec_cmpgt(var_vec_f32[0], var_vec_f32[1]);
+ *var_vec_b32++ = vec_cmpgt(var_vec_s32[0], var_vec_s32[1]);
+ *var_vec_b32++ = vec_cmpgt(var_vec_u32[0], var_vec_u32[1]);
+ *var_vec_b32++ = vec_ld(var_int[0], var_vec_b32_ptr[1]);
+ *var_vec_b32++ = vec_ldl(var_int[0], var_vec_b32_ptr[1]);
+ *var_vec_b32++ = vec_lvx(var_int[0], var_vec_b32_ptr[1]);
+ *var_vec_b32++ = vec_lvxl(var_int[0], var_vec_b32_ptr[1]);
+ *var_vec_b32++ = vec_mergeh(var_vec_b32[0], var_vec_b32[1]);
+ *var_vec_b32++ = vec_mergel(var_vec_b32[0], var_vec_b32[1]);
+ *var_vec_b32++ = vec_nor(var_vec_b32[0], var_vec_b32[1]);
+ *var_vec_b32++ = vec_or(var_vec_b32[0], var_vec_b32[1]);
+ *var_vec_b32++ = vec_perm(var_vec_b32[0], var_vec_b32[1], var_vec_u8[2]);
+ *var_vec_b32++ = vec_sel(var_vec_b32[0], var_vec_b32[1], var_vec_b32[2]);
+ *var_vec_b32++ = vec_sel(var_vec_b32[0], var_vec_b32[1], var_vec_u32[2]);
+ *var_vec_b32++ = vec_sll(var_vec_b32[0], var_vec_u16[1]);
+ *var_vec_b32++ = vec_sll(var_vec_b32[0], var_vec_u32[1]);
+ *var_vec_b32++ = vec_sll(var_vec_b32[0], var_vec_u8[1]);
+ *var_vec_b32++ = vec_splat(var_vec_b32[0], 0);
+ *var_vec_b32++ = vec_splat(var_vec_b32[0], 1);
+ *var_vec_b32++ = vec_splat(var_vec_b32[0], 2);
+ *var_vec_b32++ = vec_splat(var_vec_b32[0], 3);
+ *var_vec_b32++ = vec_splat(var_vec_b32[0], 4);
+}
+void f4() {
+ *var_vec_b32++ = vec_splat(var_vec_b32[0], 5);
+ *var_vec_b32++ = vec_splat(var_vec_b32[0], 6);
+ *var_vec_b32++ = vec_splat(var_vec_b32[0], 7);
+ *var_vec_b32++ = vec_splat(var_vec_b32[0], 8);
+ *var_vec_b32++ = vec_splat(var_vec_b32[0], 9);
+ *var_vec_b32++ = vec_splat(var_vec_b32[0], 10);
+ *var_vec_b32++ = vec_splat(var_vec_b32[0], 11);
+ *var_vec_b32++ = vec_splat(var_vec_b32[0], 12);
+ *var_vec_b32++ = vec_splat(var_vec_b32[0], 13);
+ *var_vec_b32++ = vec_splat(var_vec_b32[0], 14);
+ *var_vec_b32++ = vec_splat(var_vec_b32[0], 15);
+ *var_vec_b32++ = vec_splat(var_vec_b32[0], 16);
+ *var_vec_b32++ = vec_splat(var_vec_b32[0], 17);
+ *var_vec_b32++ = vec_splat(var_vec_b32[0], 18);
+ *var_vec_b32++ = vec_splat(var_vec_b32[0], 19);
+ *var_vec_b32++ = vec_splat(var_vec_b32[0], 20);
+ *var_vec_b32++ = vec_splat(var_vec_b32[0], 21);
+ *var_vec_b32++ = vec_splat(var_vec_b32[0], 22);
+ *var_vec_b32++ = vec_splat(var_vec_b32[0], 23);
+ *var_vec_b32++ = vec_splat(var_vec_b32[0], 24);
+ *var_vec_b32++ = vec_splat(var_vec_b32[0], 25);
+ *var_vec_b32++ = vec_splat(var_vec_b32[0], 26);
+ *var_vec_b32++ = vec_splat(var_vec_b32[0], 27);
+ *var_vec_b32++ = vec_splat(var_vec_b32[0], 28);
+ *var_vec_b32++ = vec_splat(var_vec_b32[0], 29);
+ *var_vec_b32++ = vec_splat(var_vec_b32[0], 30);
+ *var_vec_b32++ = vec_splat(var_vec_b32[0], 31);
+ *var_vec_b32++ = vec_srl(var_vec_b32[0], var_vec_u16[1]);
+ *var_vec_b32++ = vec_srl(var_vec_b32[0], var_vec_u32[1]);
+ *var_vec_b32++ = vec_srl(var_vec_b32[0], var_vec_u8[1]);
+ *var_vec_b32++ = vec_unpackh(var_vec_b16[0]);
+ *var_vec_b32++ = vec_unpackl(var_vec_b16[0]);
+ *var_vec_b32++ = vec_vand(var_vec_b32[0], var_vec_b32[1]);
+ *var_vec_b32++ = vec_vandc(var_vec_b32[0], var_vec_b32[1]);
+ *var_vec_b32++ = vec_vcmpeqfp(var_vec_f32[0], var_vec_f32[1]);
+ *var_vec_b32++ = vec_vcmpequw(var_vec_s32[0], var_vec_s32[1]);
+ *var_vec_b32++ = vec_vcmpequw(var_vec_u32[0], var_vec_u32[1]);
+ *var_vec_b32++ = vec_vcmpgefp(var_vec_f32[0], var_vec_f32[1]);
+ *var_vec_b32++ = vec_vcmpgtfp(var_vec_f32[0], var_vec_f32[1]);
+ *var_vec_b32++ = vec_vcmpgtsw(var_vec_s32[0], var_vec_s32[1]);
+ *var_vec_b32++ = vec_vcmpgtuw(var_vec_u32[0], var_vec_u32[1]);
+ *var_vec_b32++ = vec_vmrghw(var_vec_b32[0], var_vec_b32[1]);
+ *var_vec_b32++ = vec_vmrglw(var_vec_b32[0], var_vec_b32[1]);
+ *var_vec_b32++ = vec_vnor(var_vec_b32[0], var_vec_b32[1]);
+ *var_vec_b32++ = vec_vor(var_vec_b32[0], var_vec_b32[1]);
+ *var_vec_b32++ = vec_vperm(var_vec_b32[0], var_vec_b32[1], var_vec_u8[2]);
+ *var_vec_b32++ = vec_vsel(var_vec_b32[0], var_vec_b32[1], var_vec_b32[2]);
+ *var_vec_b32++ = vec_vsel(var_vec_b32[0], var_vec_b32[1], var_vec_u32[2]);
+ *var_vec_b32++ = vec_vsl(var_vec_b32[0], var_vec_u16[1]);
+ *var_vec_b32++ = vec_vsl(var_vec_b32[0], var_vec_u32[1]);
+ *var_vec_b32++ = vec_vsl(var_vec_b32[0], var_vec_u8[1]);
+ *var_vec_b32++ = vec_vspltw(var_vec_b32[0], 0);
+ *var_vec_b32++ = vec_vspltw(var_vec_b32[0], 1);
+ *var_vec_b32++ = vec_vspltw(var_vec_b32[0], 2);
+ *var_vec_b32++ = vec_vspltw(var_vec_b32[0], 3);
+ *var_vec_b32++ = vec_vspltw(var_vec_b32[0], 4);
+ *var_vec_b32++ = vec_vspltw(var_vec_b32[0], 5);
+ *var_vec_b32++ = vec_vspltw(var_vec_b32[0], 6);
+ *var_vec_b32++ = vec_vspltw(var_vec_b32[0], 7);
+ *var_vec_b32++ = vec_vspltw(var_vec_b32[0], 8);
+ *var_vec_b32++ = vec_vspltw(var_vec_b32[0], 9);
+ *var_vec_b32++ = vec_vspltw(var_vec_b32[0], 10);
+ *var_vec_b32++ = vec_vspltw(var_vec_b32[0], 11);
+ *var_vec_b32++ = vec_vspltw(var_vec_b32[0], 12);
+ *var_vec_b32++ = vec_vspltw(var_vec_b32[0], 13);
+ *var_vec_b32++ = vec_vspltw(var_vec_b32[0], 14);
+ *var_vec_b32++ = vec_vspltw(var_vec_b32[0], 15);
+ *var_vec_b32++ = vec_vspltw(var_vec_b32[0], 16);
+ *var_vec_b32++ = vec_vspltw(var_vec_b32[0], 17);
+ *var_vec_b32++ = vec_vspltw(var_vec_b32[0], 18);
+ *var_vec_b32++ = vec_vspltw(var_vec_b32[0], 19);
+ *var_vec_b32++ = vec_vspltw(var_vec_b32[0], 20);
+ *var_vec_b32++ = vec_vspltw(var_vec_b32[0], 21);
+ *var_vec_b32++ = vec_vspltw(var_vec_b32[0], 22);
+ *var_vec_b32++ = vec_vspltw(var_vec_b32[0], 23);
+ *var_vec_b32++ = vec_vspltw(var_vec_b32[0], 24);
+ *var_vec_b32++ = vec_vspltw(var_vec_b32[0], 25);
+ *var_vec_b32++ = vec_vspltw(var_vec_b32[0], 26);
+ *var_vec_b32++ = vec_vspltw(var_vec_b32[0], 27);
+ *var_vec_b32++ = vec_vspltw(var_vec_b32[0], 28);
+ *var_vec_b32++ = vec_vspltw(var_vec_b32[0], 29);
+ *var_vec_b32++ = vec_vspltw(var_vec_b32[0], 30);
+ *var_vec_b32++ = vec_vspltw(var_vec_b32[0], 31);
+ *var_vec_b32++ = vec_vsr(var_vec_b32[0], var_vec_u16[1]);
+ *var_vec_b32++ = vec_vsr(var_vec_b32[0], var_vec_u32[1]);
+ *var_vec_b32++ = vec_vsr(var_vec_b32[0], var_vec_u8[1]);
+ *var_vec_b32++ = vec_vupkhsh(var_vec_b16[0]);
+ *var_vec_b32++ = vec_vupklsh(var_vec_b16[0]);
+ *var_vec_b32++ = vec_vxor(var_vec_b32[0], var_vec_b32[1]);
+ *var_vec_b32++ = vec_xor(var_vec_b32[0], var_vec_b32[1]);
+ *var_vec_b8++ = vec_and(var_vec_b8[0], var_vec_b8[1]);
+ *var_vec_b8++ = vec_andc(var_vec_b8[0], var_vec_b8[1]);
+ *var_vec_b8++ = vec_cmpeq(var_vec_s8[0], var_vec_s8[1]);
+ *var_vec_b8++ = vec_cmpeq(var_vec_u8[0], var_vec_u8[1]);
+ *var_vec_b8++ = vec_cmpgt(var_vec_s8[0], var_vec_s8[1]);
+ *var_vec_b8++ = vec_cmpgt(var_vec_u8[0], var_vec_u8[1]);
+ *var_vec_b8++ = vec_ld(var_int[0], var_vec_b8_ptr[1]);
+ *var_vec_b8++ = vec_ldl(var_int[0], var_vec_b8_ptr[1]);
+ *var_vec_b8++ = vec_lvx(var_int[0], var_vec_b8_ptr[1]);
+ *var_vec_b8++ = vec_lvxl(var_int[0], var_vec_b8_ptr[1]);
+}
+void f5() {
+ *var_vec_b8++ = vec_mergeh(var_vec_b8[0], var_vec_b8[1]);
+ *var_vec_b8++ = vec_mergel(var_vec_b8[0], var_vec_b8[1]);
+ *var_vec_b8++ = vec_nor(var_vec_b8[0], var_vec_b8[1]);
+ *var_vec_b8++ = vec_or(var_vec_b8[0], var_vec_b8[1]);
+ *var_vec_b8++ = vec_pack(var_vec_b16[0], var_vec_b16[1]);
+ *var_vec_b8++ = vec_perm(var_vec_b8[0], var_vec_b8[1], var_vec_u8[2]);
+ *var_vec_b8++ = vec_sel(var_vec_b8[0], var_vec_b8[1], var_vec_b8[2]);
+ *var_vec_b8++ = vec_sel(var_vec_b8[0], var_vec_b8[1], var_vec_u8[2]);
+ *var_vec_b8++ = vec_sll(var_vec_b8[0], var_vec_u16[1]);
+ *var_vec_b8++ = vec_sll(var_vec_b8[0], var_vec_u32[1]);
+ *var_vec_b8++ = vec_sll(var_vec_b8[0], var_vec_u8[1]);
+ *var_vec_b8++ = vec_splat(var_vec_b8[0], 0);
+ *var_vec_b8++ = vec_splat(var_vec_b8[0], 1);
+ *var_vec_b8++ = vec_splat(var_vec_b8[0], 2);
+ *var_vec_b8++ = vec_splat(var_vec_b8[0], 3);
+ *var_vec_b8++ = vec_splat(var_vec_b8[0], 4);
+ *var_vec_b8++ = vec_splat(var_vec_b8[0], 5);
+ *var_vec_b8++ = vec_splat(var_vec_b8[0], 6);
+ *var_vec_b8++ = vec_splat(var_vec_b8[0], 7);
+ *var_vec_b8++ = vec_splat(var_vec_b8[0], 8);
+ *var_vec_b8++ = vec_splat(var_vec_b8[0], 9);
+ *var_vec_b8++ = vec_splat(var_vec_b8[0], 10);
+ *var_vec_b8++ = vec_splat(var_vec_b8[0], 11);
+ *var_vec_b8++ = vec_splat(var_vec_b8[0], 12);
+ *var_vec_b8++ = vec_splat(var_vec_b8[0], 13);
+ *var_vec_b8++ = vec_splat(var_vec_b8[0], 14);
+ *var_vec_b8++ = vec_splat(var_vec_b8[0], 15);
+ *var_vec_b8++ = vec_splat(var_vec_b8[0], 16);
+ *var_vec_b8++ = vec_splat(var_vec_b8[0], 17);
+ *var_vec_b8++ = vec_splat(var_vec_b8[0], 18);
+ *var_vec_b8++ = vec_splat(var_vec_b8[0], 19);
+ *var_vec_b8++ = vec_splat(var_vec_b8[0], 20);
+ *var_vec_b8++ = vec_splat(var_vec_b8[0], 21);
+ *var_vec_b8++ = vec_splat(var_vec_b8[0], 22);
+ *var_vec_b8++ = vec_splat(var_vec_b8[0], 23);
+ *var_vec_b8++ = vec_splat(var_vec_b8[0], 24);
+ *var_vec_b8++ = vec_splat(var_vec_b8[0], 25);
+ *var_vec_b8++ = vec_splat(var_vec_b8[0], 26);
+ *var_vec_b8++ = vec_splat(var_vec_b8[0], 27);
+ *var_vec_b8++ = vec_splat(var_vec_b8[0], 28);
+ *var_vec_b8++ = vec_splat(var_vec_b8[0], 29);
+ *var_vec_b8++ = vec_splat(var_vec_b8[0], 30);
+ *var_vec_b8++ = vec_splat(var_vec_b8[0], 31);
+ *var_vec_b8++ = vec_srl(var_vec_b8[0], var_vec_u16[1]);
+ *var_vec_b8++ = vec_srl(var_vec_b8[0], var_vec_u32[1]);
+ *var_vec_b8++ = vec_srl(var_vec_b8[0], var_vec_u8[1]);
+ *var_vec_b8++ = vec_vand(var_vec_b8[0], var_vec_b8[1]);
+ *var_vec_b8++ = vec_vandc(var_vec_b8[0], var_vec_b8[1]);
+ *var_vec_b8++ = vec_vcmpequb(var_vec_s8[0], var_vec_s8[1]);
+ *var_vec_b8++ = vec_vcmpequb(var_vec_u8[0], var_vec_u8[1]);
+ *var_vec_b8++ = vec_vcmpgtsb(var_vec_s8[0], var_vec_s8[1]);
+ *var_vec_b8++ = vec_vcmpgtub(var_vec_u8[0], var_vec_u8[1]);
+ *var_vec_b8++ = vec_vmrghb(var_vec_b8[0], var_vec_b8[1]);
+ *var_vec_b8++ = vec_vmrglb(var_vec_b8[0], var_vec_b8[1]);
+ *var_vec_b8++ = vec_vnor(var_vec_b8[0], var_vec_b8[1]);
+ *var_vec_b8++ = vec_vor(var_vec_b8[0], var_vec_b8[1]);
+ *var_vec_b8++ = vec_vperm(var_vec_b8[0], var_vec_b8[1], var_vec_u8[2]);
+ *var_vec_b8++ = vec_vpkuhum(var_vec_b16[0], var_vec_b16[1]);
+ *var_vec_b8++ = vec_vsel(var_vec_b8[0], var_vec_b8[1], var_vec_b8[2]);
+ *var_vec_b8++ = vec_vsel(var_vec_b8[0], var_vec_b8[1], var_vec_u8[2]);
+ *var_vec_b8++ = vec_vsl(var_vec_b8[0], var_vec_u16[1]);
+ *var_vec_b8++ = vec_vsl(var_vec_b8[0], var_vec_u32[1]);
+ *var_vec_b8++ = vec_vsl(var_vec_b8[0], var_vec_u8[1]);
+ *var_vec_b8++ = vec_vspltb(var_vec_b8[0], 0);
+ *var_vec_b8++ = vec_vspltb(var_vec_b8[0], 1);
+ *var_vec_b8++ = vec_vspltb(var_vec_b8[0], 2);
+ *var_vec_b8++ = vec_vspltb(var_vec_b8[0], 3);
+ *var_vec_b8++ = vec_vspltb(var_vec_b8[0], 4);
+ *var_vec_b8++ = vec_vspltb(var_vec_b8[0], 5);
+ *var_vec_b8++ = vec_vspltb(var_vec_b8[0], 6);
+ *var_vec_b8++ = vec_vspltb(var_vec_b8[0], 7);
+ *var_vec_b8++ = vec_vspltb(var_vec_b8[0], 8);
+ *var_vec_b8++ = vec_vspltb(var_vec_b8[0], 9);
+ *var_vec_b8++ = vec_vspltb(var_vec_b8[0], 10);
+ *var_vec_b8++ = vec_vspltb(var_vec_b8[0], 11);
+ *var_vec_b8++ = vec_vspltb(var_vec_b8[0], 12);
+ *var_vec_b8++ = vec_vspltb(var_vec_b8[0], 13);
+ *var_vec_b8++ = vec_vspltb(var_vec_b8[0], 14);
+ *var_vec_b8++ = vec_vspltb(var_vec_b8[0], 15);
+ *var_vec_b8++ = vec_vspltb(var_vec_b8[0], 16);
+ *var_vec_b8++ = vec_vspltb(var_vec_b8[0], 17);
+ *var_vec_b8++ = vec_vspltb(var_vec_b8[0], 18);
+ *var_vec_b8++ = vec_vspltb(var_vec_b8[0], 19);
+ *var_vec_b8++ = vec_vspltb(var_vec_b8[0], 20);
+ *var_vec_b8++ = vec_vspltb(var_vec_b8[0], 21);
+ *var_vec_b8++ = vec_vspltb(var_vec_b8[0], 22);
+ *var_vec_b8++ = vec_vspltb(var_vec_b8[0], 23);
+ *var_vec_b8++ = vec_vspltb(var_vec_b8[0], 24);
+ *var_vec_b8++ = vec_vspltb(var_vec_b8[0], 25);
+ *var_vec_b8++ = vec_vspltb(var_vec_b8[0], 26);
+ *var_vec_b8++ = vec_vspltb(var_vec_b8[0], 27);
+ *var_vec_b8++ = vec_vspltb(var_vec_b8[0], 28);
+ *var_vec_b8++ = vec_vspltb(var_vec_b8[0], 29);
+ *var_vec_b8++ = vec_vspltb(var_vec_b8[0], 30);
+ *var_vec_b8++ = vec_vspltb(var_vec_b8[0], 31);
+ *var_vec_b8++ = vec_vsr(var_vec_b8[0], var_vec_u16[1]);
+ *var_vec_b8++ = vec_vsr(var_vec_b8[0], var_vec_u32[1]);
+ *var_vec_b8++ = vec_vsr(var_vec_b8[0], var_vec_u8[1]);
+ *var_vec_b8++ = vec_vxor(var_vec_b8[0], var_vec_b8[1]);
+ *var_vec_b8++ = vec_xor(var_vec_b8[0], var_vec_b8[1]);
+}
+void f6() {
+ *var_vec_f32++ = vec_add(var_vec_f32[0], var_vec_f32[1]);
+ *var_vec_f32++ = vec_and(var_vec_b32[0], var_vec_f32[1]);
+ *var_vec_f32++ = vec_and(var_vec_f32[0], var_vec_b32[1]);
+ *var_vec_f32++ = vec_and(var_vec_f32[0], var_vec_f32[1]);
+ *var_vec_f32++ = vec_andc(var_vec_b32[0], var_vec_f32[1]);
+ *var_vec_f32++ = vec_andc(var_vec_f32[0], var_vec_b32[1]);
+ *var_vec_f32++ = vec_andc(var_vec_f32[0], var_vec_f32[1]);
+ *var_vec_f32++ = vec_ceil(var_vec_f32[0]);
+ *var_vec_f32++ = vec_ctf(var_vec_s32[0], 0);
+ *var_vec_f32++ = vec_ctf(var_vec_s32[0], 1);
+ *var_vec_f32++ = vec_ctf(var_vec_s32[0], 2);
+ *var_vec_f32++ = vec_ctf(var_vec_s32[0], 3);
+ *var_vec_f32++ = vec_ctf(var_vec_s32[0], 4);
+ *var_vec_f32++ = vec_ctf(var_vec_s32[0], 5);
+ *var_vec_f32++ = vec_ctf(var_vec_s32[0], 6);
+ *var_vec_f32++ = vec_ctf(var_vec_s32[0], 7);
+ *var_vec_f32++ = vec_ctf(var_vec_s32[0], 8);
+ *var_vec_f32++ = vec_ctf(var_vec_s32[0], 9);
+ *var_vec_f32++ = vec_ctf(var_vec_s32[0], 10);
+ *var_vec_f32++ = vec_ctf(var_vec_s32[0], 11);
+ *var_vec_f32++ = vec_ctf(var_vec_s32[0], 12);
+ *var_vec_f32++ = vec_ctf(var_vec_s32[0], 13);
+ *var_vec_f32++ = vec_ctf(var_vec_s32[0], 14);
+ *var_vec_f32++ = vec_ctf(var_vec_s32[0], 15);
+ *var_vec_f32++ = vec_ctf(var_vec_s32[0], 16);
+ *var_vec_f32++ = vec_ctf(var_vec_s32[0], 17);
+ *var_vec_f32++ = vec_ctf(var_vec_s32[0], 18);
+ *var_vec_f32++ = vec_ctf(var_vec_s32[0], 19);
+ *var_vec_f32++ = vec_ctf(var_vec_s32[0], 20);
+ *var_vec_f32++ = vec_ctf(var_vec_s32[0], 21);
+ *var_vec_f32++ = vec_ctf(var_vec_s32[0], 22);
+ *var_vec_f32++ = vec_ctf(var_vec_s32[0], 23);
+ *var_vec_f32++ = vec_ctf(var_vec_s32[0], 24);
+ *var_vec_f32++ = vec_ctf(var_vec_s32[0], 25);
+ *var_vec_f32++ = vec_ctf(var_vec_s32[0], 26);
+ *var_vec_f32++ = vec_ctf(var_vec_s32[0], 27);
+ *var_vec_f32++ = vec_ctf(var_vec_s32[0], 28);
+ *var_vec_f32++ = vec_ctf(var_vec_s32[0], 29);
+ *var_vec_f32++ = vec_ctf(var_vec_s32[0], 30);
+ *var_vec_f32++ = vec_ctf(var_vec_s32[0], 31);
+ *var_vec_f32++ = vec_ctf(var_vec_u32[0], 0);
+ *var_vec_f32++ = vec_ctf(var_vec_u32[0], 1);
+ *var_vec_f32++ = vec_ctf(var_vec_u32[0], 2);
+ *var_vec_f32++ = vec_ctf(var_vec_u32[0], 3);
+ *var_vec_f32++ = vec_ctf(var_vec_u32[0], 4);
+ *var_vec_f32++ = vec_ctf(var_vec_u32[0], 5);
+ *var_vec_f32++ = vec_ctf(var_vec_u32[0], 6);
+ *var_vec_f32++ = vec_ctf(var_vec_u32[0], 7);
+ *var_vec_f32++ = vec_ctf(var_vec_u32[0], 8);
+ *var_vec_f32++ = vec_ctf(var_vec_u32[0], 9);
+ *var_vec_f32++ = vec_ctf(var_vec_u32[0], 10);
+ *var_vec_f32++ = vec_ctf(var_vec_u32[0], 11);
+ *var_vec_f32++ = vec_ctf(var_vec_u32[0], 12);
+ *var_vec_f32++ = vec_ctf(var_vec_u32[0], 13);
+ *var_vec_f32++ = vec_ctf(var_vec_u32[0], 14);
+ *var_vec_f32++ = vec_ctf(var_vec_u32[0], 15);
+ *var_vec_f32++ = vec_ctf(var_vec_u32[0], 16);
+ *var_vec_f32++ = vec_ctf(var_vec_u32[0], 17);
+ *var_vec_f32++ = vec_ctf(var_vec_u32[0], 18);
+ *var_vec_f32++ = vec_ctf(var_vec_u32[0], 19);
+ *var_vec_f32++ = vec_ctf(var_vec_u32[0], 20);
+ *var_vec_f32++ = vec_ctf(var_vec_u32[0], 21);
+ *var_vec_f32++ = vec_ctf(var_vec_u32[0], 22);
+ *var_vec_f32++ = vec_ctf(var_vec_u32[0], 23);
+ *var_vec_f32++ = vec_ctf(var_vec_u32[0], 24);
+ *var_vec_f32++ = vec_ctf(var_vec_u32[0], 25);
+ *var_vec_f32++ = vec_ctf(var_vec_u32[0], 26);
+ *var_vec_f32++ = vec_ctf(var_vec_u32[0], 27);
+ *var_vec_f32++ = vec_ctf(var_vec_u32[0], 28);
+ *var_vec_f32++ = vec_ctf(var_vec_u32[0], 29);
+ *var_vec_f32++ = vec_ctf(var_vec_u32[0], 30);
+ *var_vec_f32++ = vec_ctf(var_vec_u32[0], 31);
+ *var_vec_f32++ = vec_expte(var_vec_f32[0]);
+ *var_vec_f32++ = vec_floor(var_vec_f32[0]);
+ *var_vec_f32++ = vec_ld(var_int[0], var_float_ptr[1]);
+ *var_vec_f32++ = vec_ld(var_int[0], var_vec_f32_ptr[1]);
+ *var_vec_f32++ = vec_lde(var_int[0], var_float_ptr[1]);
+ *var_vec_f32++ = vec_ldl(var_int[0], var_float_ptr[1]);
+ *var_vec_f32++ = vec_ldl(var_int[0], var_vec_f32_ptr[1]);
+ *var_vec_f32++ = vec_loge(var_vec_f32[0]);
+ *var_vec_f32++ = vec_lvewx(var_int[0], var_float_ptr[1]);
+ *var_vec_f32++ = vec_lvx(var_int[0], var_float_ptr[1]);
+ *var_vec_f32++ = vec_lvx(var_int[0], var_vec_f32_ptr[1]);
+ *var_vec_f32++ = vec_lvxl(var_int[0], var_float_ptr[1]);
+ *var_vec_f32++ = vec_lvxl(var_int[0], var_vec_f32_ptr[1]);
+ *var_vec_f32++ = vec_madd(var_vec_f32[0], var_vec_f32[1], var_vec_f32[2]);
+ *var_vec_f32++ = vec_max(var_vec_f32[0], var_vec_f32[1]);
+ *var_vec_f32++ = vec_mergeh(var_vec_f32[0], var_vec_f32[1]);
+ *var_vec_f32++ = vec_mergel(var_vec_f32[0], var_vec_f32[1]);
+ *var_vec_f32++ = vec_min(var_vec_f32[0], var_vec_f32[1]);
+ *var_vec_f32++ = vec_nmsub(var_vec_f32[0], var_vec_f32[1], var_vec_f32[2]);
+ *var_vec_f32++ = vec_nor(var_vec_f32[0], var_vec_f32[1]);
+ *var_vec_f32++ = vec_or(var_vec_b32[0], var_vec_f32[1]);
+ *var_vec_f32++ = vec_or(var_vec_f32[0], var_vec_b32[1]);
+ *var_vec_f32++ = vec_or(var_vec_f32[0], var_vec_f32[1]);
+ *var_vec_f32++ = vec_perm(var_vec_f32[0], var_vec_f32[1], var_vec_u8[2]);
+ *var_vec_f32++ = vec_re(var_vec_f32[0]);
+ *var_vec_f32++ = vec_round(var_vec_f32[0]);
+ *var_vec_f32++ = vec_rsqrte(var_vec_f32[0]);
+ *var_vec_f32++ = vec_sel(var_vec_f32[0], var_vec_f32[1], var_vec_b32[2]);
+}
+void f7() {
+ *var_vec_f32++ = vec_sel(var_vec_f32[0], var_vec_f32[1], var_vec_u32[2]);
+ *var_vec_f32++ = vec_sld(var_vec_f32[0], var_vec_f32[1], 0);
+ *var_vec_f32++ = vec_sld(var_vec_f32[0], var_vec_f32[1], 1);
+ *var_vec_f32++ = vec_sld(var_vec_f32[0], var_vec_f32[1], 2);
+ *var_vec_f32++ = vec_sld(var_vec_f32[0], var_vec_f32[1], 3);
+ *var_vec_f32++ = vec_sld(var_vec_f32[0], var_vec_f32[1], 4);
+ *var_vec_f32++ = vec_sld(var_vec_f32[0], var_vec_f32[1], 5);
+ *var_vec_f32++ = vec_sld(var_vec_f32[0], var_vec_f32[1], 6);
+ *var_vec_f32++ = vec_sld(var_vec_f32[0], var_vec_f32[1], 7);
+ *var_vec_f32++ = vec_sld(var_vec_f32[0], var_vec_f32[1], 8);
+ *var_vec_f32++ = vec_sld(var_vec_f32[0], var_vec_f32[1], 9);
+ *var_vec_f32++ = vec_sld(var_vec_f32[0], var_vec_f32[1], 10);
+ *var_vec_f32++ = vec_sld(var_vec_f32[0], var_vec_f32[1], 11);
+ *var_vec_f32++ = vec_sld(var_vec_f32[0], var_vec_f32[1], 12);
+ *var_vec_f32++ = vec_sld(var_vec_f32[0], var_vec_f32[1], 13);
+ *var_vec_f32++ = vec_sld(var_vec_f32[0], var_vec_f32[1], 14);
+ *var_vec_f32++ = vec_sld(var_vec_f32[0], var_vec_f32[1], 15);
+ *var_vec_f32++ = vec_slo(var_vec_f32[0], var_vec_s8[1]);
+ *var_vec_f32++ = vec_slo(var_vec_f32[0], var_vec_u8[1]);
+ *var_vec_f32++ = vec_splat(var_vec_f32[0], 0);
+ *var_vec_f32++ = vec_splat(var_vec_f32[0], 1);
+ *var_vec_f32++ = vec_splat(var_vec_f32[0], 2);
+ *var_vec_f32++ = vec_splat(var_vec_f32[0], 3);
+ *var_vec_f32++ = vec_splat(var_vec_f32[0], 4);
+ *var_vec_f32++ = vec_splat(var_vec_f32[0], 5);
+ *var_vec_f32++ = vec_splat(var_vec_f32[0], 6);
+ *var_vec_f32++ = vec_splat(var_vec_f32[0], 7);
+ *var_vec_f32++ = vec_splat(var_vec_f32[0], 8);
+ *var_vec_f32++ = vec_splat(var_vec_f32[0], 9);
+ *var_vec_f32++ = vec_splat(var_vec_f32[0], 10);
+ *var_vec_f32++ = vec_splat(var_vec_f32[0], 11);
+ *var_vec_f32++ = vec_splat(var_vec_f32[0], 12);
+ *var_vec_f32++ = vec_splat(var_vec_f32[0], 13);
+ *var_vec_f32++ = vec_splat(var_vec_f32[0], 14);
+ *var_vec_f32++ = vec_splat(var_vec_f32[0], 15);
+ *var_vec_f32++ = vec_splat(var_vec_f32[0], 16);
+ *var_vec_f32++ = vec_splat(var_vec_f32[0], 17);
+ *var_vec_f32++ = vec_splat(var_vec_f32[0], 18);
+ *var_vec_f32++ = vec_splat(var_vec_f32[0], 19);
+ *var_vec_f32++ = vec_splat(var_vec_f32[0], 20);
+ *var_vec_f32++ = vec_splat(var_vec_f32[0], 21);
+ *var_vec_f32++ = vec_splat(var_vec_f32[0], 22);
+ *var_vec_f32++ = vec_splat(var_vec_f32[0], 23);
+ *var_vec_f32++ = vec_splat(var_vec_f32[0], 24);
+ *var_vec_f32++ = vec_splat(var_vec_f32[0], 25);
+ *var_vec_f32++ = vec_splat(var_vec_f32[0], 26);
+ *var_vec_f32++ = vec_splat(var_vec_f32[0], 27);
+ *var_vec_f32++ = vec_splat(var_vec_f32[0], 28);
+ *var_vec_f32++ = vec_splat(var_vec_f32[0], 29);
+ *var_vec_f32++ = vec_splat(var_vec_f32[0], 30);
+ *var_vec_f32++ = vec_splat(var_vec_f32[0], 31);
+ *var_vec_f32++ = vec_sro(var_vec_f32[0], var_vec_s8[1]);
+ *var_vec_f32++ = vec_sro(var_vec_f32[0], var_vec_u8[1]);
+ *var_vec_f32++ = vec_sub(var_vec_f32[0], var_vec_f32[1]);
+ *var_vec_f32++ = vec_trunc(var_vec_f32[0]);
+ *var_vec_f32++ = vec_vaddfp(var_vec_f32[0], var_vec_f32[1]);
+ *var_vec_f32++ = vec_vand(var_vec_b32[0], var_vec_f32[1]);
+ *var_vec_f32++ = vec_vand(var_vec_f32[0], var_vec_b32[1]);
+ *var_vec_f32++ = vec_vand(var_vec_f32[0], var_vec_f32[1]);
+ *var_vec_f32++ = vec_vandc(var_vec_b32[0], var_vec_f32[1]);
+ *var_vec_f32++ = vec_vandc(var_vec_f32[0], var_vec_b32[1]);
+ *var_vec_f32++ = vec_vandc(var_vec_f32[0], var_vec_f32[1]);
+ *var_vec_f32++ = vec_vcfsx(var_vec_s32[0], 0);
+ *var_vec_f32++ = vec_vcfsx(var_vec_s32[0], 1);
+ *var_vec_f32++ = vec_vcfsx(var_vec_s32[0], 2);
+ *var_vec_f32++ = vec_vcfsx(var_vec_s32[0], 3);
+ *var_vec_f32++ = vec_vcfsx(var_vec_s32[0], 4);
+ *var_vec_f32++ = vec_vcfsx(var_vec_s32[0], 5);
+ *var_vec_f32++ = vec_vcfsx(var_vec_s32[0], 6);
+ *var_vec_f32++ = vec_vcfsx(var_vec_s32[0], 7);
+ *var_vec_f32++ = vec_vcfsx(var_vec_s32[0], 8);
+ *var_vec_f32++ = vec_vcfsx(var_vec_s32[0], 9);
+ *var_vec_f32++ = vec_vcfsx(var_vec_s32[0], 10);
+ *var_vec_f32++ = vec_vcfsx(var_vec_s32[0], 11);
+ *var_vec_f32++ = vec_vcfsx(var_vec_s32[0], 12);
+ *var_vec_f32++ = vec_vcfsx(var_vec_s32[0], 13);
+ *var_vec_f32++ = vec_vcfsx(var_vec_s32[0], 14);
+ *var_vec_f32++ = vec_vcfsx(var_vec_s32[0], 15);
+ *var_vec_f32++ = vec_vcfsx(var_vec_s32[0], 16);
+ *var_vec_f32++ = vec_vcfsx(var_vec_s32[0], 17);
+ *var_vec_f32++ = vec_vcfsx(var_vec_s32[0], 18);
+ *var_vec_f32++ = vec_vcfsx(var_vec_s32[0], 19);
+ *var_vec_f32++ = vec_vcfsx(var_vec_s32[0], 20);
+ *var_vec_f32++ = vec_vcfsx(var_vec_s32[0], 21);
+ *var_vec_f32++ = vec_vcfsx(var_vec_s32[0], 22);
+ *var_vec_f32++ = vec_vcfsx(var_vec_s32[0], 23);
+ *var_vec_f32++ = vec_vcfsx(var_vec_s32[0], 24);
+ *var_vec_f32++ = vec_vcfsx(var_vec_s32[0], 25);
+ *var_vec_f32++ = vec_vcfsx(var_vec_s32[0], 26);
+ *var_vec_f32++ = vec_vcfsx(var_vec_s32[0], 27);
+ *var_vec_f32++ = vec_vcfsx(var_vec_s32[0], 28);
+ *var_vec_f32++ = vec_vcfsx(var_vec_s32[0], 29);
+ *var_vec_f32++ = vec_vcfsx(var_vec_s32[0], 30);
+ *var_vec_f32++ = vec_vcfsx(var_vec_s32[0], 31);
+ *var_vec_f32++ = vec_vcfux(var_vec_u32[0], 0);
+ *var_vec_f32++ = vec_vcfux(var_vec_u32[0], 1);
+ *var_vec_f32++ = vec_vcfux(var_vec_u32[0], 2);
+ *var_vec_f32++ = vec_vcfux(var_vec_u32[0], 3);
+ *var_vec_f32++ = vec_vcfux(var_vec_u32[0], 4);
+ *var_vec_f32++ = vec_vcfux(var_vec_u32[0], 5);
+}
+void f8() {
+ *var_vec_f32++ = vec_vcfux(var_vec_u32[0], 6);
+ *var_vec_f32++ = vec_vcfux(var_vec_u32[0], 7);
+ *var_vec_f32++ = vec_vcfux(var_vec_u32[0], 8);
+ *var_vec_f32++ = vec_vcfux(var_vec_u32[0], 9);
+ *var_vec_f32++ = vec_vcfux(var_vec_u32[0], 10);
+ *var_vec_f32++ = vec_vcfux(var_vec_u32[0], 11);
+ *var_vec_f32++ = vec_vcfux(var_vec_u32[0], 12);
+ *var_vec_f32++ = vec_vcfux(var_vec_u32[0], 13);
+ *var_vec_f32++ = vec_vcfux(var_vec_u32[0], 14);
+ *var_vec_f32++ = vec_vcfux(var_vec_u32[0], 15);
+ *var_vec_f32++ = vec_vcfux(var_vec_u32[0], 16);
+ *var_vec_f32++ = vec_vcfux(var_vec_u32[0], 17);
+ *var_vec_f32++ = vec_vcfux(var_vec_u32[0], 18);
+ *var_vec_f32++ = vec_vcfux(var_vec_u32[0], 19);
+ *var_vec_f32++ = vec_vcfux(var_vec_u32[0], 20);
+ *var_vec_f32++ = vec_vcfux(var_vec_u32[0], 21);
+ *var_vec_f32++ = vec_vcfux(var_vec_u32[0], 22);
+ *var_vec_f32++ = vec_vcfux(var_vec_u32[0], 23);
+ *var_vec_f32++ = vec_vcfux(var_vec_u32[0], 24);
+ *var_vec_f32++ = vec_vcfux(var_vec_u32[0], 25);
+ *var_vec_f32++ = vec_vcfux(var_vec_u32[0], 26);
+ *var_vec_f32++ = vec_vcfux(var_vec_u32[0], 27);
+ *var_vec_f32++ = vec_vcfux(var_vec_u32[0], 28);
+ *var_vec_f32++ = vec_vcfux(var_vec_u32[0], 29);
+ *var_vec_f32++ = vec_vcfux(var_vec_u32[0], 30);
+ *var_vec_f32++ = vec_vcfux(var_vec_u32[0], 31);
+ *var_vec_f32++ = vec_vexptefp(var_vec_f32[0]);
+ *var_vec_f32++ = vec_vlogefp(var_vec_f32[0]);
+ *var_vec_f32++ = vec_vmaddfp(var_vec_f32[0], var_vec_f32[1], var_vec_f32[2]);
+ *var_vec_f32++ = vec_vmaxfp(var_vec_f32[0], var_vec_f32[1]);
+ *var_vec_f32++ = vec_vminfp(var_vec_f32[0], var_vec_f32[1]);
+ *var_vec_f32++ = vec_vmrghw(var_vec_f32[0], var_vec_f32[1]);
+ *var_vec_f32++ = vec_vmrglw(var_vec_f32[0], var_vec_f32[1]);
+ *var_vec_f32++ = vec_vnmsubfp(var_vec_f32[0], var_vec_f32[1], var_vec_f32[2]);
+ *var_vec_f32++ = vec_vnor(var_vec_f32[0], var_vec_f32[1]);
+ *var_vec_f32++ = vec_vor(var_vec_b32[0], var_vec_f32[1]);
+ *var_vec_f32++ = vec_vor(var_vec_f32[0], var_vec_b32[1]);
+ *var_vec_f32++ = vec_vor(var_vec_f32[0], var_vec_f32[1]);
+ *var_vec_f32++ = vec_vperm(var_vec_f32[0], var_vec_f32[1], var_vec_u8[2]);
+ *var_vec_f32++ = vec_vrefp(var_vec_f32[0]);
+ *var_vec_f32++ = vec_vrfim(var_vec_f32[0]);
+ *var_vec_f32++ = vec_vrfin(var_vec_f32[0]);
+ *var_vec_f32++ = vec_vrfip(var_vec_f32[0]);
+ *var_vec_f32++ = vec_vrfiz(var_vec_f32[0]);
+ *var_vec_f32++ = vec_vrsqrtefp(var_vec_f32[0]);
+ *var_vec_f32++ = vec_vsel(var_vec_f32[0], var_vec_f32[1], var_vec_b32[2]);
+ *var_vec_f32++ = vec_vsel(var_vec_f32[0], var_vec_f32[1], var_vec_u32[2]);
+ *var_vec_f32++ = vec_vsldoi(var_vec_f32[0], var_vec_f32[1], 0);
+ *var_vec_f32++ = vec_vsldoi(var_vec_f32[0], var_vec_f32[1], 1);
+ *var_vec_f32++ = vec_vsldoi(var_vec_f32[0], var_vec_f32[1], 2);
+ *var_vec_f32++ = vec_vsldoi(var_vec_f32[0], var_vec_f32[1], 3);
+ *var_vec_f32++ = vec_vsldoi(var_vec_f32[0], var_vec_f32[1], 4);
+ *var_vec_f32++ = vec_vsldoi(var_vec_f32[0], var_vec_f32[1], 5);
+ *var_vec_f32++ = vec_vsldoi(var_vec_f32[0], var_vec_f32[1], 6);
+ *var_vec_f32++ = vec_vsldoi(var_vec_f32[0], var_vec_f32[1], 7);
+ *var_vec_f32++ = vec_vsldoi(var_vec_f32[0], var_vec_f32[1], 8);
+ *var_vec_f32++ = vec_vsldoi(var_vec_f32[0], var_vec_f32[1], 9);
+ *var_vec_f32++ = vec_vsldoi(var_vec_f32[0], var_vec_f32[1], 10);
+ *var_vec_f32++ = vec_vsldoi(var_vec_f32[0], var_vec_f32[1], 11);
+ *var_vec_f32++ = vec_vsldoi(var_vec_f32[0], var_vec_f32[1], 12);
+ *var_vec_f32++ = vec_vsldoi(var_vec_f32[0], var_vec_f32[1], 13);
+ *var_vec_f32++ = vec_vsldoi(var_vec_f32[0], var_vec_f32[1], 14);
+ *var_vec_f32++ = vec_vsldoi(var_vec_f32[0], var_vec_f32[1], 15);
+ *var_vec_f32++ = vec_vslo(var_vec_f32[0], var_vec_s8[1]);
+ *var_vec_f32++ = vec_vslo(var_vec_f32[0], var_vec_u8[1]);
+ *var_vec_f32++ = vec_vspltw(var_vec_f32[0], 0);
+ *var_vec_f32++ = vec_vspltw(var_vec_f32[0], 1);
+ *var_vec_f32++ = vec_vspltw(var_vec_f32[0], 2);
+ *var_vec_f32++ = vec_vspltw(var_vec_f32[0], 3);
+ *var_vec_f32++ = vec_vspltw(var_vec_f32[0], 4);
+ *var_vec_f32++ = vec_vspltw(var_vec_f32[0], 5);
+ *var_vec_f32++ = vec_vspltw(var_vec_f32[0], 6);
+ *var_vec_f32++ = vec_vspltw(var_vec_f32[0], 7);
+ *var_vec_f32++ = vec_vspltw(var_vec_f32[0], 8);
+ *var_vec_f32++ = vec_vspltw(var_vec_f32[0], 9);
+ *var_vec_f32++ = vec_vspltw(var_vec_f32[0], 10);
+ *var_vec_f32++ = vec_vspltw(var_vec_f32[0], 11);
+ *var_vec_f32++ = vec_vspltw(var_vec_f32[0], 12);
+ *var_vec_f32++ = vec_vspltw(var_vec_f32[0], 13);
+ *var_vec_f32++ = vec_vspltw(var_vec_f32[0], 14);
+ *var_vec_f32++ = vec_vspltw(var_vec_f32[0], 15);
+ *var_vec_f32++ = vec_vspltw(var_vec_f32[0], 16);
+ *var_vec_f32++ = vec_vspltw(var_vec_f32[0], 17);
+ *var_vec_f32++ = vec_vspltw(var_vec_f32[0], 18);
+ *var_vec_f32++ = vec_vspltw(var_vec_f32[0], 19);
+ *var_vec_f32++ = vec_vspltw(var_vec_f32[0], 20);
+ *var_vec_f32++ = vec_vspltw(var_vec_f32[0], 21);
+ *var_vec_f32++ = vec_vspltw(var_vec_f32[0], 22);
+ *var_vec_f32++ = vec_vspltw(var_vec_f32[0], 23);
+ *var_vec_f32++ = vec_vspltw(var_vec_f32[0], 24);
+ *var_vec_f32++ = vec_vspltw(var_vec_f32[0], 25);
+ *var_vec_f32++ = vec_vspltw(var_vec_f32[0], 26);
+ *var_vec_f32++ = vec_vspltw(var_vec_f32[0], 27);
+ *var_vec_f32++ = vec_vspltw(var_vec_f32[0], 28);
+ *var_vec_f32++ = vec_vspltw(var_vec_f32[0], 29);
+ *var_vec_f32++ = vec_vspltw(var_vec_f32[0], 30);
+ *var_vec_f32++ = vec_vspltw(var_vec_f32[0], 31);
+ *var_vec_f32++ = vec_vsro(var_vec_f32[0], var_vec_s8[1]);
+ *var_vec_f32++ = vec_vsro(var_vec_f32[0], var_vec_u8[1]);
+ *var_vec_f32++ = vec_vsubfp(var_vec_f32[0], var_vec_f32[1]);
+}
+void f9() {
+ *var_vec_f32++ = vec_vxor(var_vec_b32[0], var_vec_f32[1]);
+ *var_vec_f32++ = vec_vxor(var_vec_f32[0], var_vec_b32[1]);
+ *var_vec_f32++ = vec_vxor(var_vec_f32[0], var_vec_f32[1]);
+ *var_vec_f32++ = vec_xor(var_vec_b32[0], var_vec_f32[1]);
+ *var_vec_f32++ = vec_xor(var_vec_f32[0], var_vec_b32[1]);
+ *var_vec_f32++ = vec_xor(var_vec_f32[0], var_vec_f32[1]);
+ *var_vec_p16++ = vec_ld(var_int[0], var_vec_p16_ptr[1]);
+ *var_vec_p16++ = vec_ldl(var_int[0], var_vec_p16_ptr[1]);
+ *var_vec_p16++ = vec_lvx(var_int[0], var_vec_p16_ptr[1]);
+ *var_vec_p16++ = vec_lvxl(var_int[0], var_vec_p16_ptr[1]);
+ *var_vec_p16++ = vec_mergeh(var_vec_p16[0], var_vec_p16[1]);
+ *var_vec_p16++ = vec_mergel(var_vec_p16[0], var_vec_p16[1]);
+ *var_vec_p16++ = vec_packpx(var_vec_u32[0], var_vec_u32[1]);
+ *var_vec_p16++ = vec_perm(var_vec_p16[0], var_vec_p16[1], var_vec_u8[2]);
+ *var_vec_p16++ = vec_sld(var_vec_p16[0], var_vec_p16[1], 0);
+ *var_vec_p16++ = vec_sld(var_vec_p16[0], var_vec_p16[1], 1);
+ *var_vec_p16++ = vec_sld(var_vec_p16[0], var_vec_p16[1], 2);
+ *var_vec_p16++ = vec_sld(var_vec_p16[0], var_vec_p16[1], 3);
+ *var_vec_p16++ = vec_sld(var_vec_p16[0], var_vec_p16[1], 4);
+ *var_vec_p16++ = vec_sld(var_vec_p16[0], var_vec_p16[1], 5);
+ *var_vec_p16++ = vec_sld(var_vec_p16[0], var_vec_p16[1], 6);
+ *var_vec_p16++ = vec_sld(var_vec_p16[0], var_vec_p16[1], 7);
+ *var_vec_p16++ = vec_sld(var_vec_p16[0], var_vec_p16[1], 8);
+ *var_vec_p16++ = vec_sld(var_vec_p16[0], var_vec_p16[1], 9);
+ *var_vec_p16++ = vec_sld(var_vec_p16[0], var_vec_p16[1], 10);
+ *var_vec_p16++ = vec_sld(var_vec_p16[0], var_vec_p16[1], 11);
+ *var_vec_p16++ = vec_sld(var_vec_p16[0], var_vec_p16[1], 12);
+ *var_vec_p16++ = vec_sld(var_vec_p16[0], var_vec_p16[1], 13);
+ *var_vec_p16++ = vec_sld(var_vec_p16[0], var_vec_p16[1], 14);
+ *var_vec_p16++ = vec_sld(var_vec_p16[0], var_vec_p16[1], 15);
+ *var_vec_p16++ = vec_sll(var_vec_p16[0], var_vec_u16[1]);
+ *var_vec_p16++ = vec_sll(var_vec_p16[0], var_vec_u32[1]);
+ *var_vec_p16++ = vec_sll(var_vec_p16[0], var_vec_u8[1]);
+ *var_vec_p16++ = vec_slo(var_vec_p16[0], var_vec_s8[1]);
+ *var_vec_p16++ = vec_slo(var_vec_p16[0], var_vec_u8[1]);
+ *var_vec_p16++ = vec_splat(var_vec_p16[0], 0);
+ *var_vec_p16++ = vec_splat(var_vec_p16[0], 1);
+ *var_vec_p16++ = vec_splat(var_vec_p16[0], 2);
+ *var_vec_p16++ = vec_splat(var_vec_p16[0], 3);
+ *var_vec_p16++ = vec_splat(var_vec_p16[0], 4);
+ *var_vec_p16++ = vec_splat(var_vec_p16[0], 5);
+ *var_vec_p16++ = vec_splat(var_vec_p16[0], 6);
+ *var_vec_p16++ = vec_splat(var_vec_p16[0], 7);
+ *var_vec_p16++ = vec_splat(var_vec_p16[0], 8);
+ *var_vec_p16++ = vec_splat(var_vec_p16[0], 9);
+ *var_vec_p16++ = vec_splat(var_vec_p16[0], 10);
+ *var_vec_p16++ = vec_splat(var_vec_p16[0], 11);
+ *var_vec_p16++ = vec_splat(var_vec_p16[0], 12);
+ *var_vec_p16++ = vec_splat(var_vec_p16[0], 13);
+ *var_vec_p16++ = vec_splat(var_vec_p16[0], 14);
+ *var_vec_p16++ = vec_splat(var_vec_p16[0], 15);
+ *var_vec_p16++ = vec_splat(var_vec_p16[0], 16);
+ *var_vec_p16++ = vec_splat(var_vec_p16[0], 17);
+ *var_vec_p16++ = vec_splat(var_vec_p16[0], 18);
+ *var_vec_p16++ = vec_splat(var_vec_p16[0], 19);
+ *var_vec_p16++ = vec_splat(var_vec_p16[0], 20);
+ *var_vec_p16++ = vec_splat(var_vec_p16[0], 21);
+ *var_vec_p16++ = vec_splat(var_vec_p16[0], 22);
+ *var_vec_p16++ = vec_splat(var_vec_p16[0], 23);
+ *var_vec_p16++ = vec_splat(var_vec_p16[0], 24);
+ *var_vec_p16++ = vec_splat(var_vec_p16[0], 25);
+ *var_vec_p16++ = vec_splat(var_vec_p16[0], 26);
+ *var_vec_p16++ = vec_splat(var_vec_p16[0], 27);
+ *var_vec_p16++ = vec_splat(var_vec_p16[0], 28);
+ *var_vec_p16++ = vec_splat(var_vec_p16[0], 29);
+ *var_vec_p16++ = vec_splat(var_vec_p16[0], 30);
+ *var_vec_p16++ = vec_splat(var_vec_p16[0], 31);
+ *var_vec_p16++ = vec_srl(var_vec_p16[0], var_vec_u16[1]);
+ *var_vec_p16++ = vec_srl(var_vec_p16[0], var_vec_u32[1]);
+ *var_vec_p16++ = vec_srl(var_vec_p16[0], var_vec_u8[1]);
+ *var_vec_p16++ = vec_sro(var_vec_p16[0], var_vec_s8[1]);
+ *var_vec_p16++ = vec_sro(var_vec_p16[0], var_vec_u8[1]);
+ *var_vec_p16++ = vec_vmrghh(var_vec_p16[0], var_vec_p16[1]);
+ *var_vec_p16++ = vec_vmrglh(var_vec_p16[0], var_vec_p16[1]);
+ *var_vec_p16++ = vec_vperm(var_vec_p16[0], var_vec_p16[1], var_vec_u8[2]);
+ *var_vec_p16++ = vec_vpkpx(var_vec_u32[0], var_vec_u32[1]);
+ *var_vec_p16++ = vec_vsl(var_vec_p16[0], var_vec_u16[1]);
+ *var_vec_p16++ = vec_vsl(var_vec_p16[0], var_vec_u32[1]);
+ *var_vec_p16++ = vec_vsl(var_vec_p16[0], var_vec_u8[1]);
+ *var_vec_p16++ = vec_vsldoi(var_vec_p16[0], var_vec_p16[1], 0);
+ *var_vec_p16++ = vec_vsldoi(var_vec_p16[0], var_vec_p16[1], 1);
+ *var_vec_p16++ = vec_vsldoi(var_vec_p16[0], var_vec_p16[1], 2);
+ *var_vec_p16++ = vec_vsldoi(var_vec_p16[0], var_vec_p16[1], 3);
+ *var_vec_p16++ = vec_vsldoi(var_vec_p16[0], var_vec_p16[1], 4);
+ *var_vec_p16++ = vec_vsldoi(var_vec_p16[0], var_vec_p16[1], 5);
+ *var_vec_p16++ = vec_vsldoi(var_vec_p16[0], var_vec_p16[1], 6);
+ *var_vec_p16++ = vec_vsldoi(var_vec_p16[0], var_vec_p16[1], 7);
+ *var_vec_p16++ = vec_vsldoi(var_vec_p16[0], var_vec_p16[1], 8);
+ *var_vec_p16++ = vec_vsldoi(var_vec_p16[0], var_vec_p16[1], 9);
+ *var_vec_p16++ = vec_vsldoi(var_vec_p16[0], var_vec_p16[1], 10);
+ *var_vec_p16++ = vec_vsldoi(var_vec_p16[0], var_vec_p16[1], 11);
+ *var_vec_p16++ = vec_vsldoi(var_vec_p16[0], var_vec_p16[1], 12);
+ *var_vec_p16++ = vec_vsldoi(var_vec_p16[0], var_vec_p16[1], 13);
+ *var_vec_p16++ = vec_vsldoi(var_vec_p16[0], var_vec_p16[1], 14);
+ *var_vec_p16++ = vec_vsldoi(var_vec_p16[0], var_vec_p16[1], 15);
+ *var_vec_p16++ = vec_vslo(var_vec_p16[0], var_vec_s8[1]);
+ *var_vec_p16++ = vec_vslo(var_vec_p16[0], var_vec_u8[1]);
+ *var_vec_p16++ = vec_vsplth(var_vec_p16[0], 0);
+ *var_vec_p16++ = vec_vsplth(var_vec_p16[0], 1);
+ *var_vec_p16++ = vec_vsplth(var_vec_p16[0], 2);
+}
+void f10() {
+ *var_vec_p16++ = vec_vsplth(var_vec_p16[0], 3);
+ *var_vec_p16++ = vec_vsplth(var_vec_p16[0], 4);
+ *var_vec_p16++ = vec_vsplth(var_vec_p16[0], 5);
+ *var_vec_p16++ = vec_vsplth(var_vec_p16[0], 6);
+ *var_vec_p16++ = vec_vsplth(var_vec_p16[0], 7);
+ *var_vec_p16++ = vec_vsplth(var_vec_p16[0], 8);
+ *var_vec_p16++ = vec_vsplth(var_vec_p16[0], 9);
+ *var_vec_p16++ = vec_vsplth(var_vec_p16[0], 10);
+ *var_vec_p16++ = vec_vsplth(var_vec_p16[0], 11);
+ *var_vec_p16++ = vec_vsplth(var_vec_p16[0], 12);
+ *var_vec_p16++ = vec_vsplth(var_vec_p16[0], 13);
+ *var_vec_p16++ = vec_vsplth(var_vec_p16[0], 14);
+ *var_vec_p16++ = vec_vsplth(var_vec_p16[0], 15);
+ *var_vec_p16++ = vec_vsplth(var_vec_p16[0], 16);
+ *var_vec_p16++ = vec_vsplth(var_vec_p16[0], 17);
+ *var_vec_p16++ = vec_vsplth(var_vec_p16[0], 18);
+ *var_vec_p16++ = vec_vsplth(var_vec_p16[0], 19);
+ *var_vec_p16++ = vec_vsplth(var_vec_p16[0], 20);
+ *var_vec_p16++ = vec_vsplth(var_vec_p16[0], 21);
+ *var_vec_p16++ = vec_vsplth(var_vec_p16[0], 22);
+ *var_vec_p16++ = vec_vsplth(var_vec_p16[0], 23);
+ *var_vec_p16++ = vec_vsplth(var_vec_p16[0], 24);
+ *var_vec_p16++ = vec_vsplth(var_vec_p16[0], 25);
+ *var_vec_p16++ = vec_vsplth(var_vec_p16[0], 26);
+ *var_vec_p16++ = vec_vsplth(var_vec_p16[0], 27);
+ *var_vec_p16++ = vec_vsplth(var_vec_p16[0], 28);
+ *var_vec_p16++ = vec_vsplth(var_vec_p16[0], 29);
+ *var_vec_p16++ = vec_vsplth(var_vec_p16[0], 30);
+ *var_vec_p16++ = vec_vsplth(var_vec_p16[0], 31);
+ *var_vec_p16++ = vec_vsr(var_vec_p16[0], var_vec_u16[1]);
+ *var_vec_p16++ = vec_vsr(var_vec_p16[0], var_vec_u32[1]);
+ *var_vec_p16++ = vec_vsr(var_vec_p16[0], var_vec_u8[1]);
+ *var_vec_p16++ = vec_vsro(var_vec_p16[0], var_vec_s8[1]);
+ *var_vec_p16++ = vec_vsro(var_vec_p16[0], var_vec_u8[1]);
+ *var_vec_s16++ = vec_add(var_vec_b16[0], var_vec_s16[1]);
+ *var_vec_s16++ = vec_add(var_vec_s16[0], var_vec_b16[1]);
+ *var_vec_s16++ = vec_add(var_vec_s16[0], var_vec_s16[1]);
+ *var_vec_s16++ = vec_adds(var_vec_b16[0], var_vec_s16[1]);
+ *var_vec_s16++ = vec_adds(var_vec_s16[0], var_vec_b16[1]);
+ *var_vec_s16++ = vec_adds(var_vec_s16[0], var_vec_s16[1]);
+ *var_vec_s16++ = vec_and(var_vec_b16[0], var_vec_s16[1]);
+ *var_vec_s16++ = vec_and(var_vec_s16[0], var_vec_b16[1]);
+ *var_vec_s16++ = vec_and(var_vec_s16[0], var_vec_s16[1]);
+ *var_vec_s16++ = vec_andc(var_vec_b16[0], var_vec_s16[1]);
+ *var_vec_s16++ = vec_andc(var_vec_s16[0], var_vec_b16[1]);
+ *var_vec_s16++ = vec_andc(var_vec_s16[0], var_vec_s16[1]);
+ *var_vec_s16++ = vec_avg(var_vec_s16[0], var_vec_s16[1]);
+ *var_vec_s16++ = vec_ld(var_int[0], var_short_ptr[1]);
+ *var_vec_s16++ = vec_ld(var_int[0], var_vec_s16_ptr[1]);
+ *var_vec_s16++ = vec_lde(var_int[0], var_short_ptr[1]);
+ *var_vec_s16++ = vec_ldl(var_int[0], var_short_ptr[1]);
+ *var_vec_s16++ = vec_ldl(var_int[0], var_vec_s16_ptr[1]);
+ *var_vec_s16++ = vec_lvehx(var_int[0], var_short_ptr[1]);
+ *var_vec_s16++ = vec_lvx(var_int[0], var_short_ptr[1]);
+ *var_vec_s16++ = vec_lvx(var_int[0], var_vec_s16_ptr[1]);
+ *var_vec_s16++ = vec_lvxl(var_int[0], var_short_ptr[1]);
+ *var_vec_s16++ = vec_lvxl(var_int[0], var_vec_s16_ptr[1]);
+ *var_vec_s16++ = vec_madds(var_vec_s16[0], var_vec_s16[1], var_vec_s16[2]);
+ *var_vec_s16++ = vec_max(var_vec_b16[0], var_vec_s16[1]);
+ *var_vec_s16++ = vec_max(var_vec_s16[0], var_vec_b16[1]);
+ *var_vec_s16++ = vec_max(var_vec_s16[0], var_vec_s16[1]);
+ *var_vec_s16++ = vec_mergeh(var_vec_s16[0], var_vec_s16[1]);
+ *var_vec_s16++ = vec_mergel(var_vec_s16[0], var_vec_s16[1]);
+ *var_vec_s16++ = vec_min(var_vec_b16[0], var_vec_s16[1]);
+ *var_vec_s16++ = vec_min(var_vec_s16[0], var_vec_b16[1]);
+ *var_vec_s16++ = vec_min(var_vec_s16[0], var_vec_s16[1]);
+ *var_vec_s16++ = vec_mladd(var_vec_s16[0], var_vec_s16[1], var_vec_s16[2]);
+ *var_vec_s16++ = vec_mladd(var_vec_s16[0], var_vec_u16[1], var_vec_u16[2]);
+ *var_vec_s16++ = vec_mladd(var_vec_u16[0], var_vec_s16[1], var_vec_s16[2]);
+ *var_vec_s16++ = vec_mradds(var_vec_s16[0], var_vec_s16[1], var_vec_s16[2]);
+ *var_vec_s16++ = vec_mule(var_vec_s8[0], var_vec_s8[1]);
+ *var_vec_s16++ = vec_mulo(var_vec_s8[0], var_vec_s8[1]);
+ *var_vec_s16++ = vec_nor(var_vec_s16[0], var_vec_s16[1]);
+ *var_vec_s16++ = vec_or(var_vec_b16[0], var_vec_s16[1]);
+ *var_vec_s16++ = vec_or(var_vec_s16[0], var_vec_b16[1]);
+ *var_vec_s16++ = vec_or(var_vec_s16[0], var_vec_s16[1]);
+ *var_vec_s16++ = vec_pack(var_vec_s32[0], var_vec_s32[1]);
+ *var_vec_s16++ = vec_packs(var_vec_s32[0], var_vec_s32[1]);
+ *var_vec_s16++ = vec_perm(var_vec_s16[0], var_vec_s16[1], var_vec_u8[2]);
+ *var_vec_s16++ = vec_rl(var_vec_s16[0], var_vec_u16[1]);
+ *var_vec_s16++ = vec_sel(var_vec_s16[0], var_vec_s16[1], var_vec_b16[2]);
+ *var_vec_s16++ = vec_sel(var_vec_s16[0], var_vec_s16[1], var_vec_u16[2]);
+ *var_vec_s16++ = vec_sl(var_vec_s16[0], var_vec_u16[1]);
+ *var_vec_s16++ = vec_sld(var_vec_s16[0], var_vec_s16[1], 0);
+ *var_vec_s16++ = vec_sld(var_vec_s16[0], var_vec_s16[1], 1);
+ *var_vec_s16++ = vec_sld(var_vec_s16[0], var_vec_s16[1], 2);
+ *var_vec_s16++ = vec_sld(var_vec_s16[0], var_vec_s16[1], 3);
+ *var_vec_s16++ = vec_sld(var_vec_s16[0], var_vec_s16[1], 4);
+ *var_vec_s16++ = vec_sld(var_vec_s16[0], var_vec_s16[1], 5);
+ *var_vec_s16++ = vec_sld(var_vec_s16[0], var_vec_s16[1], 6);
+ *var_vec_s16++ = vec_sld(var_vec_s16[0], var_vec_s16[1], 7);
+ *var_vec_s16++ = vec_sld(var_vec_s16[0], var_vec_s16[1], 8);
+ *var_vec_s16++ = vec_sld(var_vec_s16[0], var_vec_s16[1], 9);
+ *var_vec_s16++ = vec_sld(var_vec_s16[0], var_vec_s16[1], 10);
+ *var_vec_s16++ = vec_sld(var_vec_s16[0], var_vec_s16[1], 11);
+ *var_vec_s16++ = vec_sld(var_vec_s16[0], var_vec_s16[1], 12);
+ *var_vec_s16++ = vec_sld(var_vec_s16[0], var_vec_s16[1], 13);
+ *var_vec_s16++ = vec_sld(var_vec_s16[0], var_vec_s16[1], 14);
+ *var_vec_s16++ = vec_sld(var_vec_s16[0], var_vec_s16[1], 15);
+ *var_vec_s16++ = vec_sll(var_vec_s16[0], var_vec_u16[1]);
+}
+void f11() {
+ *var_vec_s16++ = vec_sll(var_vec_s16[0], var_vec_u32[1]);
+ *var_vec_s16++ = vec_sll(var_vec_s16[0], var_vec_u8[1]);
+ *var_vec_s16++ = vec_slo(var_vec_s16[0], var_vec_s8[1]);
+ *var_vec_s16++ = vec_slo(var_vec_s16[0], var_vec_u8[1]);
+ *var_vec_s16++ = vec_splat(var_vec_s16[0], 0);
+ *var_vec_s16++ = vec_splat(var_vec_s16[0], 1);
+ *var_vec_s16++ = vec_splat(var_vec_s16[0], 2);
+ *var_vec_s16++ = vec_splat(var_vec_s16[0], 3);
+ *var_vec_s16++ = vec_splat(var_vec_s16[0], 4);
+ *var_vec_s16++ = vec_splat(var_vec_s16[0], 5);
+ *var_vec_s16++ = vec_splat(var_vec_s16[0], 6);
+ *var_vec_s16++ = vec_splat(var_vec_s16[0], 7);
+ *var_vec_s16++ = vec_splat(var_vec_s16[0], 8);
+ *var_vec_s16++ = vec_splat(var_vec_s16[0], 9);
+ *var_vec_s16++ = vec_splat(var_vec_s16[0], 10);
+ *var_vec_s16++ = vec_splat(var_vec_s16[0], 11);
+ *var_vec_s16++ = vec_splat(var_vec_s16[0], 12);
+ *var_vec_s16++ = vec_splat(var_vec_s16[0], 13);
+ *var_vec_s16++ = vec_splat(var_vec_s16[0], 14);
+ *var_vec_s16++ = vec_splat(var_vec_s16[0], 15);
+ *var_vec_s16++ = vec_splat(var_vec_s16[0], 16);
+ *var_vec_s16++ = vec_splat(var_vec_s16[0], 17);
+ *var_vec_s16++ = vec_splat(var_vec_s16[0], 18);
+ *var_vec_s16++ = vec_splat(var_vec_s16[0], 19);
+ *var_vec_s16++ = vec_splat(var_vec_s16[0], 20);
+ *var_vec_s16++ = vec_splat(var_vec_s16[0], 21);
+ *var_vec_s16++ = vec_splat(var_vec_s16[0], 22);
+ *var_vec_s16++ = vec_splat(var_vec_s16[0], 23);
+ *var_vec_s16++ = vec_splat(var_vec_s16[0], 24);
+ *var_vec_s16++ = vec_splat(var_vec_s16[0], 25);
+ *var_vec_s16++ = vec_splat(var_vec_s16[0], 26);
+ *var_vec_s16++ = vec_splat(var_vec_s16[0], 27);
+ *var_vec_s16++ = vec_splat(var_vec_s16[0], 28);
+ *var_vec_s16++ = vec_splat(var_vec_s16[0], 29);
+ *var_vec_s16++ = vec_splat(var_vec_s16[0], 30);
+ *var_vec_s16++ = vec_splat(var_vec_s16[0], 31);
+ *var_vec_s16++ = vec_splat_s16( 0);
+ *var_vec_s16++ = vec_splat_s16( 1);
+ *var_vec_s16++ = vec_splat_s16( 2);
+ *var_vec_s16++ = vec_splat_s16( 3);
+ *var_vec_s16++ = vec_splat_s16( 4);
+ *var_vec_s16++ = vec_splat_s16( 5);
+ *var_vec_s16++ = vec_splat_s16( 6);
+ *var_vec_s16++ = vec_splat_s16( 7);
+ *var_vec_s16++ = vec_splat_s16( 8);
+ *var_vec_s16++ = vec_splat_s16( 9);
+ *var_vec_s16++ = vec_splat_s16( -1);
+ *var_vec_s16++ = vec_splat_s16( -2);
+ *var_vec_s16++ = vec_splat_s16( -3);
+ *var_vec_s16++ = vec_splat_s16( -4);
+ *var_vec_s16++ = vec_splat_s16( -5);
+ *var_vec_s16++ = vec_splat_s16( -6);
+ *var_vec_s16++ = vec_splat_s16( -7);
+ *var_vec_s16++ = vec_splat_s16( -8);
+ *var_vec_s16++ = vec_splat_s16( -9);
+ *var_vec_s16++ = vec_splat_s16( 10);
+ *var_vec_s16++ = vec_splat_s16( 11);
+ *var_vec_s16++ = vec_splat_s16( 12);
+ *var_vec_s16++ = vec_splat_s16( 13);
+ *var_vec_s16++ = vec_splat_s16( 14);
+ *var_vec_s16++ = vec_splat_s16( 15);
+ *var_vec_s16++ = vec_splat_s16(-10);
+ *var_vec_s16++ = vec_splat_s16(-11);
+ *var_vec_s16++ = vec_splat_s16(-12);
+ *var_vec_s16++ = vec_splat_s16(-13);
+ *var_vec_s16++ = vec_splat_s16(-14);
+ *var_vec_s16++ = vec_splat_s16(-15);
+ *var_vec_s16++ = vec_splat_s16(-16);
+ *var_vec_s16++ = vec_sr(var_vec_s16[0], var_vec_u16[1]);
+ *var_vec_s16++ = vec_sra(var_vec_s16[0], var_vec_u16[1]);
+ *var_vec_s16++ = vec_srl(var_vec_s16[0], var_vec_u16[1]);
+ *var_vec_s16++ = vec_srl(var_vec_s16[0], var_vec_u32[1]);
+ *var_vec_s16++ = vec_srl(var_vec_s16[0], var_vec_u8[1]);
+ *var_vec_s16++ = vec_sro(var_vec_s16[0], var_vec_s8[1]);
+ *var_vec_s16++ = vec_sro(var_vec_s16[0], var_vec_u8[1]);
+ *var_vec_s16++ = vec_sub(var_vec_b16[0], var_vec_s16[1]);
+ *var_vec_s16++ = vec_sub(var_vec_s16[0], var_vec_b16[1]);
+ *var_vec_s16++ = vec_sub(var_vec_s16[0], var_vec_s16[1]);
+ *var_vec_s16++ = vec_subs(var_vec_b16[0], var_vec_s16[1]);
+ *var_vec_s16++ = vec_subs(var_vec_s16[0], var_vec_b16[1]);
+ *var_vec_s16++ = vec_subs(var_vec_s16[0], var_vec_s16[1]);
+ *var_vec_s16++ = vec_unpackh(var_vec_s8[0]);
+ *var_vec_s16++ = vec_unpackl(var_vec_s8[0]);
+ *var_vec_s16++ = vec_vaddshs(var_vec_b16[0], var_vec_s16[1]);
+ *var_vec_s16++ = vec_vaddshs(var_vec_s16[0], var_vec_b16[1]);
+ *var_vec_s16++ = vec_vaddshs(var_vec_s16[0], var_vec_s16[1]);
+ *var_vec_s16++ = vec_vadduhm(var_vec_b16[0], var_vec_s16[1]);
+ *var_vec_s16++ = vec_vadduhm(var_vec_s16[0], var_vec_b16[1]);
+ *var_vec_s16++ = vec_vadduhm(var_vec_s16[0], var_vec_s16[1]);
+ *var_vec_s16++ = vec_vand(var_vec_b16[0], var_vec_s16[1]);
+ *var_vec_s16++ = vec_vand(var_vec_s16[0], var_vec_b16[1]);
+ *var_vec_s16++ = vec_vand(var_vec_s16[0], var_vec_s16[1]);
+ *var_vec_s16++ = vec_vandc(var_vec_b16[0], var_vec_s16[1]);
+ *var_vec_s16++ = vec_vandc(var_vec_s16[0], var_vec_b16[1]);
+ *var_vec_s16++ = vec_vandc(var_vec_s16[0], var_vec_s16[1]);
+ *var_vec_s16++ = vec_vavgsh(var_vec_s16[0], var_vec_s16[1]);
+ *var_vec_s16++ = vec_vmaxsh(var_vec_b16[0], var_vec_s16[1]);
+ *var_vec_s16++ = vec_vmaxsh(var_vec_s16[0], var_vec_b16[1]);
+}
+void f12() {
+ *var_vec_s16++ = vec_vmaxsh(var_vec_s16[0], var_vec_s16[1]);
+ *var_vec_s16++ = vec_vmhaddshs(var_vec_s16[0], var_vec_s16[1], var_vec_s16[2]);
+ *var_vec_s16++ = vec_vmhraddshs(var_vec_s16[0], var_vec_s16[1], var_vec_s16[2]);
+ *var_vec_s16++ = vec_vminsh(var_vec_b16[0], var_vec_s16[1]);
+ *var_vec_s16++ = vec_vminsh(var_vec_s16[0], var_vec_b16[1]);
+ *var_vec_s16++ = vec_vminsh(var_vec_s16[0], var_vec_s16[1]);
+ *var_vec_s16++ = vec_vmladduhm(var_vec_s16[0], var_vec_s16[1], var_vec_s16[2]);
+ *var_vec_s16++ = vec_vmladduhm(var_vec_s16[0], var_vec_u16[1], var_vec_u16[2]);
+ *var_vec_s16++ = vec_vmladduhm(var_vec_u16[0], var_vec_s16[1], var_vec_s16[2]);
+ *var_vec_s16++ = vec_vmrghh(var_vec_s16[0], var_vec_s16[1]);
+ *var_vec_s16++ = vec_vmrglh(var_vec_s16[0], var_vec_s16[1]);
+ *var_vec_s16++ = vec_vmulesb(var_vec_s8[0], var_vec_s8[1]);
+ *var_vec_s16++ = vec_vmulosb(var_vec_s8[0], var_vec_s8[1]);
+ *var_vec_s16++ = vec_vnor(var_vec_s16[0], var_vec_s16[1]);
+ *var_vec_s16++ = vec_vor(var_vec_b16[0], var_vec_s16[1]);
+ *var_vec_s16++ = vec_vor(var_vec_s16[0], var_vec_b16[1]);
+ *var_vec_s16++ = vec_vor(var_vec_s16[0], var_vec_s16[1]);
+ *var_vec_s16++ = vec_vperm(var_vec_s16[0], var_vec_s16[1], var_vec_u8[2]);
+ *var_vec_s16++ = vec_vpkswss(var_vec_s32[0], var_vec_s32[1]);
+ *var_vec_s16++ = vec_vpkuwum(var_vec_s32[0], var_vec_s32[1]);
+ *var_vec_s16++ = vec_vrlh(var_vec_s16[0], var_vec_u16[1]);
+ *var_vec_s16++ = vec_vsel(var_vec_s16[0], var_vec_s16[1], var_vec_b16[2]);
+ *var_vec_s16++ = vec_vsel(var_vec_s16[0], var_vec_s16[1], var_vec_u16[2]);
+ *var_vec_s16++ = vec_vsl(var_vec_s16[0], var_vec_u16[1]);
+ *var_vec_s16++ = vec_vsl(var_vec_s16[0], var_vec_u32[1]);
+ *var_vec_s16++ = vec_vsl(var_vec_s16[0], var_vec_u8[1]);
+ *var_vec_s16++ = vec_vsldoi(var_vec_s16[0], var_vec_s16[1], 0);
+ *var_vec_s16++ = vec_vsldoi(var_vec_s16[0], var_vec_s16[1], 1);
+ *var_vec_s16++ = vec_vsldoi(var_vec_s16[0], var_vec_s16[1], 2);
+ *var_vec_s16++ = vec_vsldoi(var_vec_s16[0], var_vec_s16[1], 3);
+ *var_vec_s16++ = vec_vsldoi(var_vec_s16[0], var_vec_s16[1], 4);
+ *var_vec_s16++ = vec_vsldoi(var_vec_s16[0], var_vec_s16[1], 5);
+ *var_vec_s16++ = vec_vsldoi(var_vec_s16[0], var_vec_s16[1], 6);
+ *var_vec_s16++ = vec_vsldoi(var_vec_s16[0], var_vec_s16[1], 7);
+ *var_vec_s16++ = vec_vsldoi(var_vec_s16[0], var_vec_s16[1], 8);
+ *var_vec_s16++ = vec_vsldoi(var_vec_s16[0], var_vec_s16[1], 9);
+ *var_vec_s16++ = vec_vsldoi(var_vec_s16[0], var_vec_s16[1], 10);
+ *var_vec_s16++ = vec_vsldoi(var_vec_s16[0], var_vec_s16[1], 11);
+ *var_vec_s16++ = vec_vsldoi(var_vec_s16[0], var_vec_s16[1], 12);
+ *var_vec_s16++ = vec_vsldoi(var_vec_s16[0], var_vec_s16[1], 13);
+ *var_vec_s16++ = vec_vsldoi(var_vec_s16[0], var_vec_s16[1], 14);
+ *var_vec_s16++ = vec_vsldoi(var_vec_s16[0], var_vec_s16[1], 15);
+ *var_vec_s16++ = vec_vslh(var_vec_s16[0], var_vec_u16[1]);
+ *var_vec_s16++ = vec_vslo(var_vec_s16[0], var_vec_s8[1]);
+ *var_vec_s16++ = vec_vslo(var_vec_s16[0], var_vec_u8[1]);
+ *var_vec_s16++ = vec_vsplth(var_vec_s16[0], 0);
+ *var_vec_s16++ = vec_vsplth(var_vec_s16[0], 1);
+ *var_vec_s16++ = vec_vsplth(var_vec_s16[0], 2);
+ *var_vec_s16++ = vec_vsplth(var_vec_s16[0], 3);
+ *var_vec_s16++ = vec_vsplth(var_vec_s16[0], 4);
+ *var_vec_s16++ = vec_vsplth(var_vec_s16[0], 5);
+ *var_vec_s16++ = vec_vsplth(var_vec_s16[0], 6);
+ *var_vec_s16++ = vec_vsplth(var_vec_s16[0], 7);
+ *var_vec_s16++ = vec_vsplth(var_vec_s16[0], 8);
+ *var_vec_s16++ = vec_vsplth(var_vec_s16[0], 9);
+ *var_vec_s16++ = vec_vsplth(var_vec_s16[0], 10);
+ *var_vec_s16++ = vec_vsplth(var_vec_s16[0], 11);
+ *var_vec_s16++ = vec_vsplth(var_vec_s16[0], 12);
+ *var_vec_s16++ = vec_vsplth(var_vec_s16[0], 13);
+ *var_vec_s16++ = vec_vsplth(var_vec_s16[0], 14);
+ *var_vec_s16++ = vec_vsplth(var_vec_s16[0], 15);
+ *var_vec_s16++ = vec_vsplth(var_vec_s16[0], 16);
+ *var_vec_s16++ = vec_vsplth(var_vec_s16[0], 17);
+ *var_vec_s16++ = vec_vsplth(var_vec_s16[0], 18);
+ *var_vec_s16++ = vec_vsplth(var_vec_s16[0], 19);
+ *var_vec_s16++ = vec_vsplth(var_vec_s16[0], 20);
+ *var_vec_s16++ = vec_vsplth(var_vec_s16[0], 21);
+ *var_vec_s16++ = vec_vsplth(var_vec_s16[0], 22);
+ *var_vec_s16++ = vec_vsplth(var_vec_s16[0], 23);
+ *var_vec_s16++ = vec_vsplth(var_vec_s16[0], 24);
+ *var_vec_s16++ = vec_vsplth(var_vec_s16[0], 25);
+ *var_vec_s16++ = vec_vsplth(var_vec_s16[0], 26);
+ *var_vec_s16++ = vec_vsplth(var_vec_s16[0], 27);
+ *var_vec_s16++ = vec_vsplth(var_vec_s16[0], 28);
+ *var_vec_s16++ = vec_vsplth(var_vec_s16[0], 29);
+ *var_vec_s16++ = vec_vsplth(var_vec_s16[0], 30);
+ *var_vec_s16++ = vec_vsplth(var_vec_s16[0], 31);
+ *var_vec_s16++ = vec_vspltish( 0);
+ *var_vec_s16++ = vec_vspltish( 1);
+ *var_vec_s16++ = vec_vspltish( 2);
+ *var_vec_s16++ = vec_vspltish( 3);
+ *var_vec_s16++ = vec_vspltish( 4);
+ *var_vec_s16++ = vec_vspltish( 5);
+ *var_vec_s16++ = vec_vspltish( 6);
+ *var_vec_s16++ = vec_vspltish( 7);
+ *var_vec_s16++ = vec_vspltish( 8);
+ *var_vec_s16++ = vec_vspltish( 9);
+ *var_vec_s16++ = vec_vspltish( -1);
+ *var_vec_s16++ = vec_vspltish( -2);
+ *var_vec_s16++ = vec_vspltish( -3);
+ *var_vec_s16++ = vec_vspltish( -4);
+ *var_vec_s16++ = vec_vspltish( -5);
+ *var_vec_s16++ = vec_vspltish( -6);
+ *var_vec_s16++ = vec_vspltish( -7);
+ *var_vec_s16++ = vec_vspltish( -8);
+ *var_vec_s16++ = vec_vspltish( -9);
+ *var_vec_s16++ = vec_vspltish( 10);
+ *var_vec_s16++ = vec_vspltish( 11);
+ *var_vec_s16++ = vec_vspltish( 12);
+ *var_vec_s16++ = vec_vspltish( 13);
+}
+void f13() {
+ *var_vec_s16++ = vec_vspltish( 14);
+ *var_vec_s16++ = vec_vspltish( 15);
+ *var_vec_s16++ = vec_vspltish(-10);
+ *var_vec_s16++ = vec_vspltish(-11);
+ *var_vec_s16++ = vec_vspltish(-12);
+ *var_vec_s16++ = vec_vspltish(-13);
+ *var_vec_s16++ = vec_vspltish(-14);
+ *var_vec_s16++ = vec_vspltish(-15);
+ *var_vec_s16++ = vec_vspltish(-16);
+ *var_vec_s16++ = vec_vsr(var_vec_s16[0], var_vec_u16[1]);
+ *var_vec_s16++ = vec_vsr(var_vec_s16[0], var_vec_u32[1]);
+ *var_vec_s16++ = vec_vsr(var_vec_s16[0], var_vec_u8[1]);
+ *var_vec_s16++ = vec_vsrah(var_vec_s16[0], var_vec_u16[1]);
+ *var_vec_s16++ = vec_vsrh(var_vec_s16[0], var_vec_u16[1]);
+ *var_vec_s16++ = vec_vsro(var_vec_s16[0], var_vec_s8[1]);
+ *var_vec_s16++ = vec_vsro(var_vec_s16[0], var_vec_u8[1]);
+ *var_vec_s16++ = vec_vsubshs(var_vec_b16[0], var_vec_s16[1]);
+ *var_vec_s16++ = vec_vsubshs(var_vec_s16[0], var_vec_b16[1]);
+ *var_vec_s16++ = vec_vsubshs(var_vec_s16[0], var_vec_s16[1]);
+ *var_vec_s16++ = vec_vsubuhm(var_vec_b16[0], var_vec_s16[1]);
+ *var_vec_s16++ = vec_vsubuhm(var_vec_s16[0], var_vec_b16[1]);
+ *var_vec_s16++ = vec_vsubuhm(var_vec_s16[0], var_vec_s16[1]);
+ *var_vec_s16++ = vec_vupkhsb(var_vec_s8[0]);
+ *var_vec_s16++ = vec_vupklsb(var_vec_s8[0]);
+ *var_vec_s16++ = vec_vxor(var_vec_b16[0], var_vec_s16[1]);
+ *var_vec_s16++ = vec_vxor(var_vec_s16[0], var_vec_b16[1]);
+ *var_vec_s16++ = vec_vxor(var_vec_s16[0], var_vec_s16[1]);
+ *var_vec_s16++ = vec_xor(var_vec_b16[0], var_vec_s16[1]);
+ *var_vec_s16++ = vec_xor(var_vec_s16[0], var_vec_b16[1]);
+ *var_vec_s16++ = vec_xor(var_vec_s16[0], var_vec_s16[1]);
+ *var_vec_s32++ = vec_add(var_vec_b32[0], var_vec_s32[1]);
+ *var_vec_s32++ = vec_add(var_vec_s32[0], var_vec_b32[1]);
+ *var_vec_s32++ = vec_add(var_vec_s32[0], var_vec_s32[1]);
+ *var_vec_s32++ = vec_adds(var_vec_b32[0], var_vec_s32[1]);
+ *var_vec_s32++ = vec_adds(var_vec_s32[0], var_vec_b32[1]);
+ *var_vec_s32++ = vec_adds(var_vec_s32[0], var_vec_s32[1]);
+ *var_vec_s32++ = vec_and(var_vec_b32[0], var_vec_s32[1]);
+ *var_vec_s32++ = vec_and(var_vec_s32[0], var_vec_b32[1]);
+ *var_vec_s32++ = vec_and(var_vec_s32[0], var_vec_s32[1]);
+ *var_vec_s32++ = vec_andc(var_vec_b32[0], var_vec_s32[1]);
+ *var_vec_s32++ = vec_andc(var_vec_s32[0], var_vec_b32[1]);
+ *var_vec_s32++ = vec_andc(var_vec_s32[0], var_vec_s32[1]);
+ *var_vec_s32++ = vec_avg(var_vec_s32[0], var_vec_s32[1]);
+ *var_vec_s32++ = vec_cmpb(var_vec_f32[0], var_vec_f32[1]);
+ *var_vec_s32++ = vec_cts(var_vec_f32[0], 0);
+ *var_vec_s32++ = vec_cts(var_vec_f32[0], 1);
+ *var_vec_s32++ = vec_cts(var_vec_f32[0], 2);
+ *var_vec_s32++ = vec_cts(var_vec_f32[0], 3);
+ *var_vec_s32++ = vec_cts(var_vec_f32[0], 4);
+ *var_vec_s32++ = vec_cts(var_vec_f32[0], 5);
+ *var_vec_s32++ = vec_cts(var_vec_f32[0], 6);
+ *var_vec_s32++ = vec_cts(var_vec_f32[0], 7);
+ *var_vec_s32++ = vec_cts(var_vec_f32[0], 8);
+ *var_vec_s32++ = vec_cts(var_vec_f32[0], 9);
+ *var_vec_s32++ = vec_cts(var_vec_f32[0], 10);
+ *var_vec_s32++ = vec_cts(var_vec_f32[0], 11);
+ *var_vec_s32++ = vec_cts(var_vec_f32[0], 12);
+ *var_vec_s32++ = vec_cts(var_vec_f32[0], 13);
+ *var_vec_s32++ = vec_cts(var_vec_f32[0], 14);
+ *var_vec_s32++ = vec_cts(var_vec_f32[0], 15);
+ *var_vec_s32++ = vec_cts(var_vec_f32[0], 16);
+ *var_vec_s32++ = vec_cts(var_vec_f32[0], 17);
+ *var_vec_s32++ = vec_cts(var_vec_f32[0], 18);
+ *var_vec_s32++ = vec_cts(var_vec_f32[0], 19);
+ *var_vec_s32++ = vec_cts(var_vec_f32[0], 20);
+ *var_vec_s32++ = vec_cts(var_vec_f32[0], 21);
+ *var_vec_s32++ = vec_cts(var_vec_f32[0], 22);
+ *var_vec_s32++ = vec_cts(var_vec_f32[0], 23);
+ *var_vec_s32++ = vec_cts(var_vec_f32[0], 24);
+ *var_vec_s32++ = vec_cts(var_vec_f32[0], 25);
+ *var_vec_s32++ = vec_cts(var_vec_f32[0], 26);
+ *var_vec_s32++ = vec_cts(var_vec_f32[0], 27);
+ *var_vec_s32++ = vec_cts(var_vec_f32[0], 28);
+ *var_vec_s32++ = vec_cts(var_vec_f32[0], 29);
+ *var_vec_s32++ = vec_cts(var_vec_f32[0], 30);
+ *var_vec_s32++ = vec_cts(var_vec_f32[0], 31);
+ *var_vec_s32++ = vec_ld(var_int[0], var_int_ptr[1]);
+ *var_vec_s32++ = vec_ld(var_int[0], var_vec_s32_ptr[1]);
+ *var_vec_s32++ = vec_lde(var_int[0], var_int_ptr[1]);
+ *var_vec_s32++ = vec_ldl(var_int[0], var_int_ptr[1]);
+ *var_vec_s32++ = vec_ldl(var_int[0], var_vec_s32_ptr[1]);
+ *var_vec_s32++ = vec_lvewx(var_int[0], var_int_ptr[1]);
+ *var_vec_s32++ = vec_lvx(var_int[0], var_int_ptr[1]);
+ *var_vec_s32++ = vec_lvx(var_int[0], var_vec_s32_ptr[1]);
+ *var_vec_s32++ = vec_lvxl(var_int[0], var_int_ptr[1]);
+ *var_vec_s32++ = vec_lvxl(var_int[0], var_vec_s32_ptr[1]);
+ *var_vec_s32++ = vec_max(var_vec_b32[0], var_vec_s32[1]);
+ *var_vec_s32++ = vec_max(var_vec_s32[0], var_vec_b32[1]);
+ *var_vec_s32++ = vec_max(var_vec_s32[0], var_vec_s32[1]);
+ *var_vec_s32++ = vec_mergeh(var_vec_s32[0], var_vec_s32[1]);
+ *var_vec_s32++ = vec_mergel(var_vec_s32[0], var_vec_s32[1]);
+ *var_vec_s32++ = vec_min(var_vec_b32[0], var_vec_s32[1]);
+ *var_vec_s32++ = vec_min(var_vec_s32[0], var_vec_b32[1]);
+ *var_vec_s32++ = vec_min(var_vec_s32[0], var_vec_s32[1]);
+}
+void f14() {
+ *var_vec_s32++ = vec_msum(var_vec_s16[0], var_vec_s16[1], var_vec_s32[2]);
+ *var_vec_s32++ = vec_msum(var_vec_s8[0], var_vec_u8[1], var_vec_s32[2]);
+ *var_vec_s32++ = vec_msums(var_vec_s16[0], var_vec_s16[1], var_vec_s32[2]);
+ *var_vec_s32++ = vec_mule(var_vec_s16[0], var_vec_s16[1]);
+ *var_vec_s32++ = vec_mulo(var_vec_s16[0], var_vec_s16[1]);
+ *var_vec_s32++ = vec_nor(var_vec_s32[0], var_vec_s32[1]);
+ *var_vec_s32++ = vec_or(var_vec_b32[0], var_vec_s32[1]);
+ *var_vec_s32++ = vec_or(var_vec_s32[0], var_vec_b32[1]);
+ *var_vec_s32++ = vec_or(var_vec_s32[0], var_vec_s32[1]);
+ *var_vec_s32++ = vec_perm(var_vec_s32[0], var_vec_s32[1], var_vec_u8[2]);
+ *var_vec_s32++ = vec_rl(var_vec_s32[0], var_vec_u32[1]);
+ *var_vec_s32++ = vec_sel(var_vec_s32[0], var_vec_s32[1], var_vec_b32[2]);
+ *var_vec_s32++ = vec_sel(var_vec_s32[0], var_vec_s32[1], var_vec_u32[2]);
+ *var_vec_s32++ = vec_sl(var_vec_s32[0], var_vec_u32[1]);
+ *var_vec_s32++ = vec_sld(var_vec_s32[0], var_vec_s32[1], 0);
+ *var_vec_s32++ = vec_sld(var_vec_s32[0], var_vec_s32[1], 1);
+ *var_vec_s32++ = vec_sld(var_vec_s32[0], var_vec_s32[1], 2);
+ *var_vec_s32++ = vec_sld(var_vec_s32[0], var_vec_s32[1], 3);
+ *var_vec_s32++ = vec_sld(var_vec_s32[0], var_vec_s32[1], 4);
+ *var_vec_s32++ = vec_sld(var_vec_s32[0], var_vec_s32[1], 5);
+ *var_vec_s32++ = vec_sld(var_vec_s32[0], var_vec_s32[1], 6);
+ *var_vec_s32++ = vec_sld(var_vec_s32[0], var_vec_s32[1], 7);
+ *var_vec_s32++ = vec_sld(var_vec_s32[0], var_vec_s32[1], 8);
+ *var_vec_s32++ = vec_sld(var_vec_s32[0], var_vec_s32[1], 9);
+ *var_vec_s32++ = vec_sld(var_vec_s32[0], var_vec_s32[1], 10);
+ *var_vec_s32++ = vec_sld(var_vec_s32[0], var_vec_s32[1], 11);
+ *var_vec_s32++ = vec_sld(var_vec_s32[0], var_vec_s32[1], 12);
+ *var_vec_s32++ = vec_sld(var_vec_s32[0], var_vec_s32[1], 13);
+ *var_vec_s32++ = vec_sld(var_vec_s32[0], var_vec_s32[1], 14);
+ *var_vec_s32++ = vec_sld(var_vec_s32[0], var_vec_s32[1], 15);
+ *var_vec_s32++ = vec_sll(var_vec_s32[0], var_vec_u16[1]);
+ *var_vec_s32++ = vec_sll(var_vec_s32[0], var_vec_u32[1]);
+ *var_vec_s32++ = vec_sll(var_vec_s32[0], var_vec_u8[1]);
+ *var_vec_s32++ = vec_slo(var_vec_s32[0], var_vec_s8[1]);
+ *var_vec_s32++ = vec_slo(var_vec_s32[0], var_vec_u8[1]);
+ *var_vec_s32++ = vec_splat(var_vec_s32[0], 0);
+ *var_vec_s32++ = vec_splat(var_vec_s32[0], 1);
+ *var_vec_s32++ = vec_splat(var_vec_s32[0], 2);
+ *var_vec_s32++ = vec_splat(var_vec_s32[0], 3);
+ *var_vec_s32++ = vec_splat(var_vec_s32[0], 4);
+ *var_vec_s32++ = vec_splat(var_vec_s32[0], 5);
+ *var_vec_s32++ = vec_splat(var_vec_s32[0], 6);
+ *var_vec_s32++ = vec_splat(var_vec_s32[0], 7);
+ *var_vec_s32++ = vec_splat(var_vec_s32[0], 8);
+ *var_vec_s32++ = vec_splat(var_vec_s32[0], 9);
+ *var_vec_s32++ = vec_splat(var_vec_s32[0], 10);
+ *var_vec_s32++ = vec_splat(var_vec_s32[0], 11);
+ *var_vec_s32++ = vec_splat(var_vec_s32[0], 12);
+ *var_vec_s32++ = vec_splat(var_vec_s32[0], 13);
+ *var_vec_s32++ = vec_splat(var_vec_s32[0], 14);
+ *var_vec_s32++ = vec_splat(var_vec_s32[0], 15);
+ *var_vec_s32++ = vec_splat(var_vec_s32[0], 16);
+ *var_vec_s32++ = vec_splat(var_vec_s32[0], 17);
+ *var_vec_s32++ = vec_splat(var_vec_s32[0], 18);
+ *var_vec_s32++ = vec_splat(var_vec_s32[0], 19);
+ *var_vec_s32++ = vec_splat(var_vec_s32[0], 20);
+ *var_vec_s32++ = vec_splat(var_vec_s32[0], 21);
+ *var_vec_s32++ = vec_splat(var_vec_s32[0], 22);
+ *var_vec_s32++ = vec_splat(var_vec_s32[0], 23);
+ *var_vec_s32++ = vec_splat(var_vec_s32[0], 24);
+ *var_vec_s32++ = vec_splat(var_vec_s32[0], 25);
+ *var_vec_s32++ = vec_splat(var_vec_s32[0], 26);
+ *var_vec_s32++ = vec_splat(var_vec_s32[0], 27);
+ *var_vec_s32++ = vec_splat(var_vec_s32[0], 28);
+ *var_vec_s32++ = vec_splat(var_vec_s32[0], 29);
+ *var_vec_s32++ = vec_splat(var_vec_s32[0], 30);
+ *var_vec_s32++ = vec_splat(var_vec_s32[0], 31);
+ *var_vec_s32++ = vec_splat_s32( 0);
+ *var_vec_s32++ = vec_splat_s32( 1);
+ *var_vec_s32++ = vec_splat_s32( 2);
+ *var_vec_s32++ = vec_splat_s32( 3);
+ *var_vec_s32++ = vec_splat_s32( 4);
+ *var_vec_s32++ = vec_splat_s32( 5);
+ *var_vec_s32++ = vec_splat_s32( 6);
+ *var_vec_s32++ = vec_splat_s32( 7);
+ *var_vec_s32++ = vec_splat_s32( 8);
+ *var_vec_s32++ = vec_splat_s32( 9);
+ *var_vec_s32++ = vec_splat_s32( -1);
+ *var_vec_s32++ = vec_splat_s32( -2);
+ *var_vec_s32++ = vec_splat_s32( -3);
+ *var_vec_s32++ = vec_splat_s32( -4);
+ *var_vec_s32++ = vec_splat_s32( -5);
+ *var_vec_s32++ = vec_splat_s32( -6);
+ *var_vec_s32++ = vec_splat_s32( -7);
+ *var_vec_s32++ = vec_splat_s32( -8);
+ *var_vec_s32++ = vec_splat_s32( -9);
+ *var_vec_s32++ = vec_splat_s32( 10);
+ *var_vec_s32++ = vec_splat_s32( 11);
+ *var_vec_s32++ = vec_splat_s32( 12);
+ *var_vec_s32++ = vec_splat_s32( 13);
+ *var_vec_s32++ = vec_splat_s32( 14);
+ *var_vec_s32++ = vec_splat_s32( 15);
+ *var_vec_s32++ = vec_splat_s32(-10);
+ *var_vec_s32++ = vec_splat_s32(-11);
+ *var_vec_s32++ = vec_splat_s32(-12);
+ *var_vec_s32++ = vec_splat_s32(-13);
+ *var_vec_s32++ = vec_splat_s32(-14);
+ *var_vec_s32++ = vec_splat_s32(-15);
+ *var_vec_s32++ = vec_splat_s32(-16);
+ *var_vec_s32++ = vec_sr(var_vec_s32[0], var_vec_u32[1]);
+}
+void f15() {
+ *var_vec_s32++ = vec_sra(var_vec_s32[0], var_vec_u32[1]);
+ *var_vec_s32++ = vec_srl(var_vec_s32[0], var_vec_u16[1]);
+ *var_vec_s32++ = vec_srl(var_vec_s32[0], var_vec_u32[1]);
+ *var_vec_s32++ = vec_srl(var_vec_s32[0], var_vec_u8[1]);
+ *var_vec_s32++ = vec_sro(var_vec_s32[0], var_vec_s8[1]);
+ *var_vec_s32++ = vec_sro(var_vec_s32[0], var_vec_u8[1]);
+ *var_vec_s32++ = vec_sub(var_vec_b32[0], var_vec_s32[1]);
+ *var_vec_s32++ = vec_sub(var_vec_s32[0], var_vec_b32[1]);
+ *var_vec_s32++ = vec_sub(var_vec_s32[0], var_vec_s32[1]);
+ *var_vec_s32++ = vec_subs(var_vec_b32[0], var_vec_s32[1]);
+ *var_vec_s32++ = vec_subs(var_vec_s32[0], var_vec_b32[1]);
+ *var_vec_s32++ = vec_subs(var_vec_s32[0], var_vec_s32[1]);
+ *var_vec_s32++ = vec_sum2s(var_vec_s32[0], var_vec_s32[1]);
+ *var_vec_s32++ = vec_sum4s(var_vec_s16[0], var_vec_s32[1]);
+ *var_vec_s32++ = vec_sum4s(var_vec_s8[0], var_vec_s32[1]);
+ *var_vec_s32++ = vec_sums(var_vec_s32[0], var_vec_s32[1]);
+ *var_vec_s32++ = vec_unpackh(var_vec_s16[0]);
+ *var_vec_s32++ = vec_unpackl(var_vec_s16[0]);
+ *var_vec_s32++ = vec_vaddsws(var_vec_b32[0], var_vec_s32[1]);
+ *var_vec_s32++ = vec_vaddsws(var_vec_s32[0], var_vec_b32[1]);
+ *var_vec_s32++ = vec_vaddsws(var_vec_s32[0], var_vec_s32[1]);
+ *var_vec_s32++ = vec_vadduwm(var_vec_b32[0], var_vec_s32[1]);
+ *var_vec_s32++ = vec_vadduwm(var_vec_s32[0], var_vec_b32[1]);
+ *var_vec_s32++ = vec_vadduwm(var_vec_s32[0], var_vec_s32[1]);
+ *var_vec_s32++ = vec_vand(var_vec_b32[0], var_vec_s32[1]);
+ *var_vec_s32++ = vec_vand(var_vec_s32[0], var_vec_b32[1]);
+ *var_vec_s32++ = vec_vand(var_vec_s32[0], var_vec_s32[1]);
+ *var_vec_s32++ = vec_vandc(var_vec_b32[0], var_vec_s32[1]);
+ *var_vec_s32++ = vec_vandc(var_vec_s32[0], var_vec_b32[1]);
+ *var_vec_s32++ = vec_vandc(var_vec_s32[0], var_vec_s32[1]);
+ *var_vec_s32++ = vec_vavgsw(var_vec_s32[0], var_vec_s32[1]);
+ *var_vec_s32++ = vec_vcmpbfp(var_vec_f32[0], var_vec_f32[1]);
+ *var_vec_s32++ = vec_vctsxs(var_vec_f32[0], 0);
+ *var_vec_s32++ = vec_vctsxs(var_vec_f32[0], 1);
+ *var_vec_s32++ = vec_vctsxs(var_vec_f32[0], 2);
+ *var_vec_s32++ = vec_vctsxs(var_vec_f32[0], 3);
+ *var_vec_s32++ = vec_vctsxs(var_vec_f32[0], 4);
+ *var_vec_s32++ = vec_vctsxs(var_vec_f32[0], 5);
+ *var_vec_s32++ = vec_vctsxs(var_vec_f32[0], 6);
+ *var_vec_s32++ = vec_vctsxs(var_vec_f32[0], 7);
+ *var_vec_s32++ = vec_vctsxs(var_vec_f32[0], 8);
+ *var_vec_s32++ = vec_vctsxs(var_vec_f32[0], 9);
+ *var_vec_s32++ = vec_vctsxs(var_vec_f32[0], 10);
+ *var_vec_s32++ = vec_vctsxs(var_vec_f32[0], 11);
+ *var_vec_s32++ = vec_vctsxs(var_vec_f32[0], 12);
+ *var_vec_s32++ = vec_vctsxs(var_vec_f32[0], 13);
+ *var_vec_s32++ = vec_vctsxs(var_vec_f32[0], 14);
+ *var_vec_s32++ = vec_vctsxs(var_vec_f32[0], 15);
+ *var_vec_s32++ = vec_vctsxs(var_vec_f32[0], 16);
+ *var_vec_s32++ = vec_vctsxs(var_vec_f32[0], 17);
+ *var_vec_s32++ = vec_vctsxs(var_vec_f32[0], 18);
+ *var_vec_s32++ = vec_vctsxs(var_vec_f32[0], 19);
+ *var_vec_s32++ = vec_vctsxs(var_vec_f32[0], 20);
+ *var_vec_s32++ = vec_vctsxs(var_vec_f32[0], 21);
+ *var_vec_s32++ = vec_vctsxs(var_vec_f32[0], 22);
+ *var_vec_s32++ = vec_vctsxs(var_vec_f32[0], 23);
+ *var_vec_s32++ = vec_vctsxs(var_vec_f32[0], 24);
+ *var_vec_s32++ = vec_vctsxs(var_vec_f32[0], 25);
+ *var_vec_s32++ = vec_vctsxs(var_vec_f32[0], 26);
+ *var_vec_s32++ = vec_vctsxs(var_vec_f32[0], 27);
+ *var_vec_s32++ = vec_vctsxs(var_vec_f32[0], 28);
+ *var_vec_s32++ = vec_vctsxs(var_vec_f32[0], 29);
+ *var_vec_s32++ = vec_vctsxs(var_vec_f32[0], 30);
+ *var_vec_s32++ = vec_vctsxs(var_vec_f32[0], 31);
+ *var_vec_s32++ = vec_vmaxsw(var_vec_b32[0], var_vec_s32[1]);
+ *var_vec_s32++ = vec_vmaxsw(var_vec_s32[0], var_vec_b32[1]);
+ *var_vec_s32++ = vec_vmaxsw(var_vec_s32[0], var_vec_s32[1]);
+ *var_vec_s32++ = vec_vminsw(var_vec_b32[0], var_vec_s32[1]);
+ *var_vec_s32++ = vec_vminsw(var_vec_s32[0], var_vec_b32[1]);
+ *var_vec_s32++ = vec_vminsw(var_vec_s32[0], var_vec_s32[1]);
+ *var_vec_s32++ = vec_vmrghw(var_vec_s32[0], var_vec_s32[1]);
+ *var_vec_s32++ = vec_vmrglw(var_vec_s32[0], var_vec_s32[1]);
+ *var_vec_s32++ = vec_vmsummbm(var_vec_s8[0], var_vec_u8[1], var_vec_s32[2]);
+ *var_vec_s32++ = vec_vmsumshm(var_vec_s16[0], var_vec_s16[1], var_vec_s32[2]);
+ *var_vec_s32++ = vec_vmsumshs(var_vec_s16[0], var_vec_s16[1], var_vec_s32[2]);
+ *var_vec_s32++ = vec_vmulesh(var_vec_s16[0], var_vec_s16[1]);
+ *var_vec_s32++ = vec_vmulosh(var_vec_s16[0], var_vec_s16[1]);
+ *var_vec_s32++ = vec_vnor(var_vec_s32[0], var_vec_s32[1]);
+ *var_vec_s32++ = vec_vor(var_vec_b32[0], var_vec_s32[1]);
+ *var_vec_s32++ = vec_vor(var_vec_s32[0], var_vec_b32[1]);
+ *var_vec_s32++ = vec_vor(var_vec_s32[0], var_vec_s32[1]);
+ *var_vec_s32++ = vec_vperm(var_vec_s32[0], var_vec_s32[1], var_vec_u8[2]);
+ *var_vec_s32++ = vec_vrlw(var_vec_s32[0], var_vec_u32[1]);
+ *var_vec_s32++ = vec_vsel(var_vec_s32[0], var_vec_s32[1], var_vec_b32[2]);
+ *var_vec_s32++ = vec_vsel(var_vec_s32[0], var_vec_s32[1], var_vec_u32[2]);
+ *var_vec_s32++ = vec_vsl(var_vec_s32[0], var_vec_u16[1]);
+ *var_vec_s32++ = vec_vsl(var_vec_s32[0], var_vec_u32[1]);
+ *var_vec_s32++ = vec_vsl(var_vec_s32[0], var_vec_u8[1]);
+ *var_vec_s32++ = vec_vsldoi(var_vec_s32[0], var_vec_s32[1], 0);
+ *var_vec_s32++ = vec_vsldoi(var_vec_s32[0], var_vec_s32[1], 1);
+ *var_vec_s32++ = vec_vsldoi(var_vec_s32[0], var_vec_s32[1], 2);
+ *var_vec_s32++ = vec_vsldoi(var_vec_s32[0], var_vec_s32[1], 3);
+ *var_vec_s32++ = vec_vsldoi(var_vec_s32[0], var_vec_s32[1], 4);
+ *var_vec_s32++ = vec_vsldoi(var_vec_s32[0], var_vec_s32[1], 5);
+ *var_vec_s32++ = vec_vsldoi(var_vec_s32[0], var_vec_s32[1], 6);
+ *var_vec_s32++ = vec_vsldoi(var_vec_s32[0], var_vec_s32[1], 7);
+ *var_vec_s32++ = vec_vsldoi(var_vec_s32[0], var_vec_s32[1], 8);
+ *var_vec_s32++ = vec_vsldoi(var_vec_s32[0], var_vec_s32[1], 9);
+}
+void f16() {
+ *var_vec_s32++ = vec_vsldoi(var_vec_s32[0], var_vec_s32[1], 10);
+ *var_vec_s32++ = vec_vsldoi(var_vec_s32[0], var_vec_s32[1], 11);
+ *var_vec_s32++ = vec_vsldoi(var_vec_s32[0], var_vec_s32[1], 12);
+ *var_vec_s32++ = vec_vsldoi(var_vec_s32[0], var_vec_s32[1], 13);
+ *var_vec_s32++ = vec_vsldoi(var_vec_s32[0], var_vec_s32[1], 14);
+ *var_vec_s32++ = vec_vsldoi(var_vec_s32[0], var_vec_s32[1], 15);
+ *var_vec_s32++ = vec_vslo(var_vec_s32[0], var_vec_s8[1]);
+ *var_vec_s32++ = vec_vslo(var_vec_s32[0], var_vec_u8[1]);
+ *var_vec_s32++ = vec_vslw(var_vec_s32[0], var_vec_u32[1]);
+ *var_vec_s32++ = vec_vspltisw( 0);
+ *var_vec_s32++ = vec_vspltisw( 1);
+ *var_vec_s32++ = vec_vspltisw( 2);
+ *var_vec_s32++ = vec_vspltisw( 3);
+ *var_vec_s32++ = vec_vspltisw( 4);
+ *var_vec_s32++ = vec_vspltisw( 5);
+ *var_vec_s32++ = vec_vspltisw( 6);
+ *var_vec_s32++ = vec_vspltisw( 7);
+ *var_vec_s32++ = vec_vspltisw( 8);
+ *var_vec_s32++ = vec_vspltisw( 9);
+ *var_vec_s32++ = vec_vspltisw( -1);
+ *var_vec_s32++ = vec_vspltisw( -2);
+ *var_vec_s32++ = vec_vspltisw( -3);
+ *var_vec_s32++ = vec_vspltisw( -4);
+ *var_vec_s32++ = vec_vspltisw( -5);
+ *var_vec_s32++ = vec_vspltisw( -6);
+ *var_vec_s32++ = vec_vspltisw( -7);
+ *var_vec_s32++ = vec_vspltisw( -8);
+ *var_vec_s32++ = vec_vspltisw( -9);
+ *var_vec_s32++ = vec_vspltisw( 10);
+ *var_vec_s32++ = vec_vspltisw( 11);
+ *var_vec_s32++ = vec_vspltisw( 12);
+ *var_vec_s32++ = vec_vspltisw( 13);
+ *var_vec_s32++ = vec_vspltisw( 14);
+ *var_vec_s32++ = vec_vspltisw( 15);
+ *var_vec_s32++ = vec_vspltisw(-10);
+ *var_vec_s32++ = vec_vspltisw(-11);
+ *var_vec_s32++ = vec_vspltisw(-12);
+ *var_vec_s32++ = vec_vspltisw(-13);
+ *var_vec_s32++ = vec_vspltisw(-14);
+ *var_vec_s32++ = vec_vspltisw(-15);
+ *var_vec_s32++ = vec_vspltisw(-16);
+ *var_vec_s32++ = vec_vspltw(var_vec_s32[0], 0);
+ *var_vec_s32++ = vec_vspltw(var_vec_s32[0], 1);
+ *var_vec_s32++ = vec_vspltw(var_vec_s32[0], 2);
+ *var_vec_s32++ = vec_vspltw(var_vec_s32[0], 3);
+ *var_vec_s32++ = vec_vspltw(var_vec_s32[0], 4);
+ *var_vec_s32++ = vec_vspltw(var_vec_s32[0], 5);
+ *var_vec_s32++ = vec_vspltw(var_vec_s32[0], 6);
+ *var_vec_s32++ = vec_vspltw(var_vec_s32[0], 7);
+ *var_vec_s32++ = vec_vspltw(var_vec_s32[0], 8);
+ *var_vec_s32++ = vec_vspltw(var_vec_s32[0], 9);
+ *var_vec_s32++ = vec_vspltw(var_vec_s32[0], 10);
+ *var_vec_s32++ = vec_vspltw(var_vec_s32[0], 11);
+ *var_vec_s32++ = vec_vspltw(var_vec_s32[0], 12);
+ *var_vec_s32++ = vec_vspltw(var_vec_s32[0], 13);
+ *var_vec_s32++ = vec_vspltw(var_vec_s32[0], 14);
+ *var_vec_s32++ = vec_vspltw(var_vec_s32[0], 15);
+ *var_vec_s32++ = vec_vspltw(var_vec_s32[0], 16);
+ *var_vec_s32++ = vec_vspltw(var_vec_s32[0], 17);
+ *var_vec_s32++ = vec_vspltw(var_vec_s32[0], 18);
+ *var_vec_s32++ = vec_vspltw(var_vec_s32[0], 19);
+ *var_vec_s32++ = vec_vspltw(var_vec_s32[0], 20);
+ *var_vec_s32++ = vec_vspltw(var_vec_s32[0], 21);
+ *var_vec_s32++ = vec_vspltw(var_vec_s32[0], 22);
+ *var_vec_s32++ = vec_vspltw(var_vec_s32[0], 23);
+ *var_vec_s32++ = vec_vspltw(var_vec_s32[0], 24);
+ *var_vec_s32++ = vec_vspltw(var_vec_s32[0], 25);
+ *var_vec_s32++ = vec_vspltw(var_vec_s32[0], 26);
+ *var_vec_s32++ = vec_vspltw(var_vec_s32[0], 27);
+ *var_vec_s32++ = vec_vspltw(var_vec_s32[0], 28);
+ *var_vec_s32++ = vec_vspltw(var_vec_s32[0], 29);
+ *var_vec_s32++ = vec_vspltw(var_vec_s32[0], 30);
+ *var_vec_s32++ = vec_vspltw(var_vec_s32[0], 31);
+ *var_vec_s32++ = vec_vsr(var_vec_s32[0], var_vec_u16[1]);
+ *var_vec_s32++ = vec_vsr(var_vec_s32[0], var_vec_u32[1]);
+ *var_vec_s32++ = vec_vsr(var_vec_s32[0], var_vec_u8[1]);
+ *var_vec_s32++ = vec_vsraw(var_vec_s32[0], var_vec_u32[1]);
+ *var_vec_s32++ = vec_vsro(var_vec_s32[0], var_vec_s8[1]);
+ *var_vec_s32++ = vec_vsro(var_vec_s32[0], var_vec_u8[1]);
+ *var_vec_s32++ = vec_vsrw(var_vec_s32[0], var_vec_u32[1]);
+ *var_vec_s32++ = vec_vsubsws(var_vec_b32[0], var_vec_s32[1]);
+ *var_vec_s32++ = vec_vsubsws(var_vec_s32[0], var_vec_b32[1]);
+ *var_vec_s32++ = vec_vsubsws(var_vec_s32[0], var_vec_s32[1]);
+ *var_vec_s32++ = vec_vsubuwm(var_vec_b32[0], var_vec_s32[1]);
+ *var_vec_s32++ = vec_vsubuwm(var_vec_s32[0], var_vec_b32[1]);
+ *var_vec_s32++ = vec_vsubuwm(var_vec_s32[0], var_vec_s32[1]);
+ *var_vec_s32++ = vec_vsum2sws(var_vec_s32[0], var_vec_s32[1]);
+ *var_vec_s32++ = vec_vsum4sbs(var_vec_s8[0], var_vec_s32[1]);
+ *var_vec_s32++ = vec_vsum4shs(var_vec_s16[0], var_vec_s32[1]);
+ *var_vec_s32++ = vec_vsumsws(var_vec_s32[0], var_vec_s32[1]);
+ *var_vec_s32++ = vec_vupkhsh(var_vec_s16[0]);
+ *var_vec_s32++ = vec_vupklsh(var_vec_s16[0]);
+ *var_vec_s32++ = vec_vxor(var_vec_b32[0], var_vec_s32[1]);
+ *var_vec_s32++ = vec_vxor(var_vec_s32[0], var_vec_b32[1]);
+ *var_vec_s32++ = vec_vxor(var_vec_s32[0], var_vec_s32[1]);
+ *var_vec_s32++ = vec_xor(var_vec_b32[0], var_vec_s32[1]);
+ *var_vec_s32++ = vec_xor(var_vec_s32[0], var_vec_b32[1]);
+ *var_vec_s32++ = vec_xor(var_vec_s32[0], var_vec_s32[1]);
+ *var_vec_s8++ = vec_add(var_vec_b8[0], var_vec_s8[1]);
+ *var_vec_s8++ = vec_add(var_vec_s8[0], var_vec_b8[1]);
+}
+void f17() {
+ *var_vec_s8++ = vec_add(var_vec_s8[0], var_vec_s8[1]);
+ *var_vec_s8++ = vec_adds(var_vec_b8[0], var_vec_s8[1]);
+ *var_vec_s8++ = vec_adds(var_vec_s8[0], var_vec_b8[1]);
+ *var_vec_s8++ = vec_adds(var_vec_s8[0], var_vec_s8[1]);
+ *var_vec_s8++ = vec_and(var_vec_b8[0], var_vec_s8[1]);
+ *var_vec_s8++ = vec_and(var_vec_s8[0], var_vec_b8[1]);
+ *var_vec_s8++ = vec_and(var_vec_s8[0], var_vec_s8[1]);
+ *var_vec_s8++ = vec_andc(var_vec_b8[0], var_vec_s8[1]);
+ *var_vec_s8++ = vec_andc(var_vec_s8[0], var_vec_b8[1]);
+ *var_vec_s8++ = vec_andc(var_vec_s8[0], var_vec_s8[1]);
+ *var_vec_s8++ = vec_avg(var_vec_s8[0], var_vec_s8[1]);
+ *var_vec_s8++ = vec_ld(var_int[0], var_signed_char_ptr[1]);
+ *var_vec_s8++ = vec_ld(var_int[0], var_vec_s8_ptr[1]);
+ *var_vec_s8++ = vec_lde(var_int[0], var_signed_char_ptr[1]);
+ *var_vec_s8++ = vec_ldl(var_int[0], var_signed_char_ptr[1]);
+ *var_vec_s8++ = vec_ldl(var_int[0], var_vec_s8_ptr[1]);
+ *var_vec_s8++ = vec_lvebx(var_int[0], var_signed_char_ptr[1]);
+ *var_vec_s8++ = vec_lvx(var_int[0], var_signed_char_ptr[1]);
+ *var_vec_s8++ = vec_lvx(var_int[0], var_vec_s8_ptr[1]);
+ *var_vec_s8++ = vec_lvxl(var_int[0], var_signed_char_ptr[1]);
+ *var_vec_s8++ = vec_lvxl(var_int[0], var_vec_s8_ptr[1]);
+ *var_vec_s8++ = vec_max(var_vec_b8[0], var_vec_s8[1]);
+ *var_vec_s8++ = vec_max(var_vec_s8[0], var_vec_b8[1]);
+ *var_vec_s8++ = vec_max(var_vec_s8[0], var_vec_s8[1]);
+ *var_vec_s8++ = vec_mergeh(var_vec_s8[0], var_vec_s8[1]);
+ *var_vec_s8++ = vec_mergel(var_vec_s8[0], var_vec_s8[1]);
+ *var_vec_s8++ = vec_min(var_vec_b8[0], var_vec_s8[1]);
+ *var_vec_s8++ = vec_min(var_vec_s8[0], var_vec_b8[1]);
+ *var_vec_s8++ = vec_min(var_vec_s8[0], var_vec_s8[1]);
+ *var_vec_s8++ = vec_nor(var_vec_s8[0], var_vec_s8[1]);
+ *var_vec_s8++ = vec_or(var_vec_b8[0], var_vec_s8[1]);
+ *var_vec_s8++ = vec_or(var_vec_s8[0], var_vec_b8[1]);
+ *var_vec_s8++ = vec_or(var_vec_s8[0], var_vec_s8[1]);
+ *var_vec_s8++ = vec_pack(var_vec_s16[0], var_vec_s16[1]);
+ *var_vec_s8++ = vec_packs(var_vec_s16[0], var_vec_s16[1]);
+ *var_vec_s8++ = vec_perm(var_vec_s8[0], var_vec_s8[1], var_vec_u8[2]);
+ *var_vec_s8++ = vec_rl(var_vec_s8[0], var_vec_u8[1]);
+ *var_vec_s8++ = vec_sel(var_vec_s8[0], var_vec_s8[1], var_vec_b8[2]);
+ *var_vec_s8++ = vec_sel(var_vec_s8[0], var_vec_s8[1], var_vec_u8[2]);
+ *var_vec_s8++ = vec_sl(var_vec_s8[0], var_vec_u8[1]);
+ *var_vec_s8++ = vec_sld(var_vec_s8[0], var_vec_s8[1], 0);
+ *var_vec_s8++ = vec_sld(var_vec_s8[0], var_vec_s8[1], 1);
+ *var_vec_s8++ = vec_sld(var_vec_s8[0], var_vec_s8[1], 2);
+ *var_vec_s8++ = vec_sld(var_vec_s8[0], var_vec_s8[1], 3);
+ *var_vec_s8++ = vec_sld(var_vec_s8[0], var_vec_s8[1], 4);
+ *var_vec_s8++ = vec_sld(var_vec_s8[0], var_vec_s8[1], 5);
+ *var_vec_s8++ = vec_sld(var_vec_s8[0], var_vec_s8[1], 6);
+ *var_vec_s8++ = vec_sld(var_vec_s8[0], var_vec_s8[1], 7);
+ *var_vec_s8++ = vec_sld(var_vec_s8[0], var_vec_s8[1], 8);
+ *var_vec_s8++ = vec_sld(var_vec_s8[0], var_vec_s8[1], 9);
+ *var_vec_s8++ = vec_sld(var_vec_s8[0], var_vec_s8[1], 10);
+ *var_vec_s8++ = vec_sld(var_vec_s8[0], var_vec_s8[1], 11);
+ *var_vec_s8++ = vec_sld(var_vec_s8[0], var_vec_s8[1], 12);
+ *var_vec_s8++ = vec_sld(var_vec_s8[0], var_vec_s8[1], 13);
+ *var_vec_s8++ = vec_sld(var_vec_s8[0], var_vec_s8[1], 14);
+ *var_vec_s8++ = vec_sld(var_vec_s8[0], var_vec_s8[1], 15);
+ *var_vec_s8++ = vec_sll(var_vec_s8[0], var_vec_u16[1]);
+ *var_vec_s8++ = vec_sll(var_vec_s8[0], var_vec_u32[1]);
+ *var_vec_s8++ = vec_sll(var_vec_s8[0], var_vec_u8[1]);
+ *var_vec_s8++ = vec_slo(var_vec_s8[0], var_vec_s8[1]);
+ *var_vec_s8++ = vec_slo(var_vec_s8[0], var_vec_u8[1]);
+ *var_vec_s8++ = vec_splat(var_vec_s8[0], 0);
+ *var_vec_s8++ = vec_splat(var_vec_s8[0], 1);
+ *var_vec_s8++ = vec_splat(var_vec_s8[0], 2);
+ *var_vec_s8++ = vec_splat(var_vec_s8[0], 3);
+ *var_vec_s8++ = vec_splat(var_vec_s8[0], 4);
+ *var_vec_s8++ = vec_splat(var_vec_s8[0], 5);
+ *var_vec_s8++ = vec_splat(var_vec_s8[0], 6);
+ *var_vec_s8++ = vec_splat(var_vec_s8[0], 7);
+ *var_vec_s8++ = vec_splat(var_vec_s8[0], 8);
+ *var_vec_s8++ = vec_splat(var_vec_s8[0], 9);
+ *var_vec_s8++ = vec_splat(var_vec_s8[0], 10);
+ *var_vec_s8++ = vec_splat(var_vec_s8[0], 11);
+ *var_vec_s8++ = vec_splat(var_vec_s8[0], 12);
+ *var_vec_s8++ = vec_splat(var_vec_s8[0], 13);
+ *var_vec_s8++ = vec_splat(var_vec_s8[0], 14);
+ *var_vec_s8++ = vec_splat(var_vec_s8[0], 15);
+ *var_vec_s8++ = vec_splat(var_vec_s8[0], 16);
+ *var_vec_s8++ = vec_splat(var_vec_s8[0], 17);
+ *var_vec_s8++ = vec_splat(var_vec_s8[0], 18);
+ *var_vec_s8++ = vec_splat(var_vec_s8[0], 19);
+ *var_vec_s8++ = vec_splat(var_vec_s8[0], 20);
+ *var_vec_s8++ = vec_splat(var_vec_s8[0], 21);
+ *var_vec_s8++ = vec_splat(var_vec_s8[0], 22);
+ *var_vec_s8++ = vec_splat(var_vec_s8[0], 23);
+ *var_vec_s8++ = vec_splat(var_vec_s8[0], 24);
+ *var_vec_s8++ = vec_splat(var_vec_s8[0], 25);
+ *var_vec_s8++ = vec_splat(var_vec_s8[0], 26);
+ *var_vec_s8++ = vec_splat(var_vec_s8[0], 27);
+ *var_vec_s8++ = vec_splat(var_vec_s8[0], 28);
+ *var_vec_s8++ = vec_splat(var_vec_s8[0], 29);
+ *var_vec_s8++ = vec_splat(var_vec_s8[0], 30);
+ *var_vec_s8++ = vec_splat(var_vec_s8[0], 31);
+ *var_vec_s8++ = vec_splat_s8( 0);
+ *var_vec_s8++ = vec_splat_s8( 1);
+ *var_vec_s8++ = vec_splat_s8( 2);
+ *var_vec_s8++ = vec_splat_s8( 3);
+ *var_vec_s8++ = vec_splat_s8( 4);
+ *var_vec_s8++ = vec_splat_s8( 5);
+ *var_vec_s8++ = vec_splat_s8( 6);
+}
+void f18() {
+ *var_vec_s8++ = vec_splat_s8( 7);
+ *var_vec_s8++ = vec_splat_s8( 8);
+ *var_vec_s8++ = vec_splat_s8( 9);
+ *var_vec_s8++ = vec_splat_s8( -1);
+ *var_vec_s8++ = vec_splat_s8( -2);
+ *var_vec_s8++ = vec_splat_s8( -3);
+ *var_vec_s8++ = vec_splat_s8( -4);
+ *var_vec_s8++ = vec_splat_s8( -5);
+ *var_vec_s8++ = vec_splat_s8( -6);
+ *var_vec_s8++ = vec_splat_s8( -7);
+ *var_vec_s8++ = vec_splat_s8( -8);
+ *var_vec_s8++ = vec_splat_s8( -9);
+ *var_vec_s8++ = vec_splat_s8( 10);
+ *var_vec_s8++ = vec_splat_s8( 11);
+ *var_vec_s8++ = vec_splat_s8( 12);
+ *var_vec_s8++ = vec_splat_s8( 13);
+ *var_vec_s8++ = vec_splat_s8( 14);
+ *var_vec_s8++ = vec_splat_s8( 15);
+ *var_vec_s8++ = vec_splat_s8(-10);
+ *var_vec_s8++ = vec_splat_s8(-11);
+ *var_vec_s8++ = vec_splat_s8(-12);
+ *var_vec_s8++ = vec_splat_s8(-13);
+ *var_vec_s8++ = vec_splat_s8(-14);
+ *var_vec_s8++ = vec_splat_s8(-15);
+ *var_vec_s8++ = vec_splat_s8(-16);
+ *var_vec_s8++ = vec_sr(var_vec_s8[0], var_vec_u8[1]);
+ *var_vec_s8++ = vec_sra(var_vec_s8[0], var_vec_u8[1]);
+ *var_vec_s8++ = vec_srl(var_vec_s8[0], var_vec_u16[1]);
+ *var_vec_s8++ = vec_srl(var_vec_s8[0], var_vec_u32[1]);
+ *var_vec_s8++ = vec_srl(var_vec_s8[0], var_vec_u8[1]);
+ *var_vec_s8++ = vec_sro(var_vec_s8[0], var_vec_s8[1]);
+ *var_vec_s8++ = vec_sro(var_vec_s8[0], var_vec_u8[1]);
+ *var_vec_s8++ = vec_sub(var_vec_b8[0], var_vec_s8[1]);
+ *var_vec_s8++ = vec_sub(var_vec_s8[0], var_vec_b8[1]);
+ *var_vec_s8++ = vec_sub(var_vec_s8[0], var_vec_s8[1]);
+ *var_vec_s8++ = vec_subs(var_vec_b8[0], var_vec_s8[1]);
+ *var_vec_s8++ = vec_subs(var_vec_s8[0], var_vec_b8[1]);
+ *var_vec_s8++ = vec_subs(var_vec_s8[0], var_vec_s8[1]);
+ *var_vec_s8++ = vec_vaddsbs(var_vec_b8[0], var_vec_s8[1]);
+ *var_vec_s8++ = vec_vaddsbs(var_vec_s8[0], var_vec_b8[1]);
+ *var_vec_s8++ = vec_vaddsbs(var_vec_s8[0], var_vec_s8[1]);
+ *var_vec_s8++ = vec_vaddubm(var_vec_b8[0], var_vec_s8[1]);
+ *var_vec_s8++ = vec_vaddubm(var_vec_s8[0], var_vec_b8[1]);
+ *var_vec_s8++ = vec_vaddubm(var_vec_s8[0], var_vec_s8[1]);
+ *var_vec_s8++ = vec_vand(var_vec_b8[0], var_vec_s8[1]);
+ *var_vec_s8++ = vec_vand(var_vec_s8[0], var_vec_b8[1]);
+ *var_vec_s8++ = vec_vand(var_vec_s8[0], var_vec_s8[1]);
+ *var_vec_s8++ = vec_vandc(var_vec_b8[0], var_vec_s8[1]);
+ *var_vec_s8++ = vec_vandc(var_vec_s8[0], var_vec_b8[1]);
+ *var_vec_s8++ = vec_vandc(var_vec_s8[0], var_vec_s8[1]);
+ *var_vec_s8++ = vec_vavgsb(var_vec_s8[0], var_vec_s8[1]);
+ *var_vec_s8++ = vec_vmaxsb(var_vec_b8[0], var_vec_s8[1]);
+ *var_vec_s8++ = vec_vmaxsb(var_vec_s8[0], var_vec_b8[1]);
+ *var_vec_s8++ = vec_vmaxsb(var_vec_s8[0], var_vec_s8[1]);
+ *var_vec_s8++ = vec_vminsb(var_vec_b8[0], var_vec_s8[1]);
+ *var_vec_s8++ = vec_vminsb(var_vec_s8[0], var_vec_b8[1]);
+ *var_vec_s8++ = vec_vminsb(var_vec_s8[0], var_vec_s8[1]);
+ *var_vec_s8++ = vec_vmrghb(var_vec_s8[0], var_vec_s8[1]);
+ *var_vec_s8++ = vec_vmrglb(var_vec_s8[0], var_vec_s8[1]);
+ *var_vec_s8++ = vec_vnor(var_vec_s8[0], var_vec_s8[1]);
+ *var_vec_s8++ = vec_vor(var_vec_b8[0], var_vec_s8[1]);
+ *var_vec_s8++ = vec_vor(var_vec_s8[0], var_vec_b8[1]);
+ *var_vec_s8++ = vec_vor(var_vec_s8[0], var_vec_s8[1]);
+ *var_vec_s8++ = vec_vperm(var_vec_s8[0], var_vec_s8[1], var_vec_u8[2]);
+ *var_vec_s8++ = vec_vpkshss(var_vec_s16[0], var_vec_s16[1]);
+ *var_vec_s8++ = vec_vpkuhum(var_vec_s16[0], var_vec_s16[1]);
+ *var_vec_s8++ = vec_vrlb(var_vec_s8[0], var_vec_u8[1]);
+ *var_vec_s8++ = vec_vsel(var_vec_s8[0], var_vec_s8[1], var_vec_b8[2]);
+ *var_vec_s8++ = vec_vsel(var_vec_s8[0], var_vec_s8[1], var_vec_u8[2]);
+ *var_vec_s8++ = vec_vsl(var_vec_s8[0], var_vec_u16[1]);
+ *var_vec_s8++ = vec_vsl(var_vec_s8[0], var_vec_u32[1]);
+ *var_vec_s8++ = vec_vsl(var_vec_s8[0], var_vec_u8[1]);
+ *var_vec_s8++ = vec_vslb(var_vec_s8[0], var_vec_u8[1]);
+ *var_vec_s8++ = vec_vsldoi(var_vec_s8[0], var_vec_s8[1], 0);
+ *var_vec_s8++ = vec_vsldoi(var_vec_s8[0], var_vec_s8[1], 1);
+ *var_vec_s8++ = vec_vsldoi(var_vec_s8[0], var_vec_s8[1], 2);
+ *var_vec_s8++ = vec_vsldoi(var_vec_s8[0], var_vec_s8[1], 3);
+ *var_vec_s8++ = vec_vsldoi(var_vec_s8[0], var_vec_s8[1], 4);
+ *var_vec_s8++ = vec_vsldoi(var_vec_s8[0], var_vec_s8[1], 5);
+ *var_vec_s8++ = vec_vsldoi(var_vec_s8[0], var_vec_s8[1], 6);
+ *var_vec_s8++ = vec_vsldoi(var_vec_s8[0], var_vec_s8[1], 7);
+ *var_vec_s8++ = vec_vsldoi(var_vec_s8[0], var_vec_s8[1], 8);
+ *var_vec_s8++ = vec_vsldoi(var_vec_s8[0], var_vec_s8[1], 9);
+ *var_vec_s8++ = vec_vsldoi(var_vec_s8[0], var_vec_s8[1], 10);
+ *var_vec_s8++ = vec_vsldoi(var_vec_s8[0], var_vec_s8[1], 11);
+ *var_vec_s8++ = vec_vsldoi(var_vec_s8[0], var_vec_s8[1], 12);
+ *var_vec_s8++ = vec_vsldoi(var_vec_s8[0], var_vec_s8[1], 13);
+ *var_vec_s8++ = vec_vsldoi(var_vec_s8[0], var_vec_s8[1], 14);
+ *var_vec_s8++ = vec_vsldoi(var_vec_s8[0], var_vec_s8[1], 15);
+ *var_vec_s8++ = vec_vslo(var_vec_s8[0], var_vec_s8[1]);
+ *var_vec_s8++ = vec_vslo(var_vec_s8[0], var_vec_u8[1]);
+ *var_vec_s8++ = vec_vspltb(var_vec_s8[0], 0);
+ *var_vec_s8++ = vec_vspltb(var_vec_s8[0], 1);
+ *var_vec_s8++ = vec_vspltb(var_vec_s8[0], 2);
+ *var_vec_s8++ = vec_vspltb(var_vec_s8[0], 3);
+ *var_vec_s8++ = vec_vspltb(var_vec_s8[0], 4);
+ *var_vec_s8++ = vec_vspltb(var_vec_s8[0], 5);
+ *var_vec_s8++ = vec_vspltb(var_vec_s8[0], 6);
+ *var_vec_s8++ = vec_vspltb(var_vec_s8[0], 7);
+ *var_vec_s8++ = vec_vspltb(var_vec_s8[0], 8);
+}
+void f19() {
+ *var_vec_s8++ = vec_vspltb(var_vec_s8[0], 9);
+ *var_vec_s8++ = vec_vspltb(var_vec_s8[0], 10);
+ *var_vec_s8++ = vec_vspltb(var_vec_s8[0], 11);
+ *var_vec_s8++ = vec_vspltb(var_vec_s8[0], 12);
+ *var_vec_s8++ = vec_vspltb(var_vec_s8[0], 13);
+ *var_vec_s8++ = vec_vspltb(var_vec_s8[0], 14);
+ *var_vec_s8++ = vec_vspltb(var_vec_s8[0], 15);
+ *var_vec_s8++ = vec_vspltb(var_vec_s8[0], 16);
+ *var_vec_s8++ = vec_vspltb(var_vec_s8[0], 17);
+ *var_vec_s8++ = vec_vspltb(var_vec_s8[0], 18);
+ *var_vec_s8++ = vec_vspltb(var_vec_s8[0], 19);
+ *var_vec_s8++ = vec_vspltb(var_vec_s8[0], 20);
+ *var_vec_s8++ = vec_vspltb(var_vec_s8[0], 21);
+ *var_vec_s8++ = vec_vspltb(var_vec_s8[0], 22);
+ *var_vec_s8++ = vec_vspltb(var_vec_s8[0], 23);
+ *var_vec_s8++ = vec_vspltb(var_vec_s8[0], 24);
+ *var_vec_s8++ = vec_vspltb(var_vec_s8[0], 25);
+ *var_vec_s8++ = vec_vspltb(var_vec_s8[0], 26);
+ *var_vec_s8++ = vec_vspltb(var_vec_s8[0], 27);
+ *var_vec_s8++ = vec_vspltb(var_vec_s8[0], 28);
+ *var_vec_s8++ = vec_vspltb(var_vec_s8[0], 29);
+ *var_vec_s8++ = vec_vspltb(var_vec_s8[0], 30);
+ *var_vec_s8++ = vec_vspltb(var_vec_s8[0], 31);
+ *var_vec_s8++ = vec_vspltisb( 0);
+ *var_vec_s8++ = vec_vspltisb( 1);
+ *var_vec_s8++ = vec_vspltisb( 2);
+ *var_vec_s8++ = vec_vspltisb( 3);
+ *var_vec_s8++ = vec_vspltisb( 4);
+ *var_vec_s8++ = vec_vspltisb( 5);
+ *var_vec_s8++ = vec_vspltisb( 6);
+ *var_vec_s8++ = vec_vspltisb( 7);
+ *var_vec_s8++ = vec_vspltisb( 8);
+ *var_vec_s8++ = vec_vspltisb( 9);
+ *var_vec_s8++ = vec_vspltisb( -1);
+ *var_vec_s8++ = vec_vspltisb( -2);
+ *var_vec_s8++ = vec_vspltisb( -3);
+ *var_vec_s8++ = vec_vspltisb( -4);
+ *var_vec_s8++ = vec_vspltisb( -5);
+ *var_vec_s8++ = vec_vspltisb( -6);
+ *var_vec_s8++ = vec_vspltisb( -7);
+ *var_vec_s8++ = vec_vspltisb( -8);
+ *var_vec_s8++ = vec_vspltisb( -9);
+ *var_vec_s8++ = vec_vspltisb( 10);
+ *var_vec_s8++ = vec_vspltisb( 11);
+ *var_vec_s8++ = vec_vspltisb( 12);
+ *var_vec_s8++ = vec_vspltisb( 13);
+ *var_vec_s8++ = vec_vspltisb( 14);
+ *var_vec_s8++ = vec_vspltisb( 15);
+ *var_vec_s8++ = vec_vspltisb(-10);
+ *var_vec_s8++ = vec_vspltisb(-11);
+ *var_vec_s8++ = vec_vspltisb(-12);
+ *var_vec_s8++ = vec_vspltisb(-13);
+ *var_vec_s8++ = vec_vspltisb(-14);
+ *var_vec_s8++ = vec_vspltisb(-15);
+ *var_vec_s8++ = vec_vspltisb(-16);
+ *var_vec_s8++ = vec_vsr(var_vec_s8[0], var_vec_u16[1]);
+ *var_vec_s8++ = vec_vsr(var_vec_s8[0], var_vec_u32[1]);
+ *var_vec_s8++ = vec_vsr(var_vec_s8[0], var_vec_u8[1]);
+ *var_vec_s8++ = vec_vsrab(var_vec_s8[0], var_vec_u8[1]);
+ *var_vec_s8++ = vec_vsrb(var_vec_s8[0], var_vec_u8[1]);
+ *var_vec_s8++ = vec_vsro(var_vec_s8[0], var_vec_s8[1]);
+ *var_vec_s8++ = vec_vsro(var_vec_s8[0], var_vec_u8[1]);
+ *var_vec_s8++ = vec_vsubsbs(var_vec_b8[0], var_vec_s8[1]);
+ *var_vec_s8++ = vec_vsubsbs(var_vec_s8[0], var_vec_b8[1]);
+ *var_vec_s8++ = vec_vsubsbs(var_vec_s8[0], var_vec_s8[1]);
+ *var_vec_s8++ = vec_vsububm(var_vec_b8[0], var_vec_s8[1]);
+ *var_vec_s8++ = vec_vsububm(var_vec_s8[0], var_vec_b8[1]);
+ *var_vec_s8++ = vec_vsububm(var_vec_s8[0], var_vec_s8[1]);
+ *var_vec_s8++ = vec_vxor(var_vec_b8[0], var_vec_s8[1]);
+ *var_vec_s8++ = vec_vxor(var_vec_s8[0], var_vec_b8[1]);
+ *var_vec_s8++ = vec_vxor(var_vec_s8[0], var_vec_s8[1]);
+ *var_vec_s8++ = vec_xor(var_vec_b8[0], var_vec_s8[1]);
+ *var_vec_s8++ = vec_xor(var_vec_s8[0], var_vec_b8[1]);
+ *var_vec_s8++ = vec_xor(var_vec_s8[0], var_vec_s8[1]);
+ *var_vec_u16++ = vec_add(var_vec_b16[0], var_vec_u16[1]);
+ *var_vec_u16++ = vec_add(var_vec_u16[0], var_vec_b16[1]);
+ *var_vec_u16++ = vec_add(var_vec_u16[0], var_vec_u16[1]);
+ *var_vec_u16++ = vec_adds(var_vec_b16[0], var_vec_u16[1]);
+ *var_vec_u16++ = vec_adds(var_vec_u16[0], var_vec_b16[1]);
+ *var_vec_u16++ = vec_adds(var_vec_u16[0], var_vec_u16[1]);
+ *var_vec_u16++ = vec_and(var_vec_b16[0], var_vec_u16[1]);
+ *var_vec_u16++ = vec_and(var_vec_u16[0], var_vec_b16[1]);
+ *var_vec_u16++ = vec_and(var_vec_u16[0], var_vec_u16[1]);
+ *var_vec_u16++ = vec_andc(var_vec_b16[0], var_vec_u16[1]);
+ *var_vec_u16++ = vec_andc(var_vec_u16[0], var_vec_b16[1]);
+ *var_vec_u16++ = vec_andc(var_vec_u16[0], var_vec_u16[1]);
+ *var_vec_u16++ = vec_avg(var_vec_u16[0], var_vec_u16[1]);
+ *var_vec_u16++ = vec_ld(var_int[0], var_unsigned_short_ptr[1]);
+ *var_vec_u16++ = vec_ld(var_int[0], var_vec_u16_ptr[1]);
+ *var_vec_u16++ = vec_lde(var_int[0], var_unsigned_short_ptr[1]);
+ *var_vec_u16++ = vec_ldl(var_int[0], var_unsigned_short_ptr[1]);
+ *var_vec_u16++ = vec_ldl(var_int[0], var_vec_u16_ptr[1]);
+ *var_vec_u16++ = vec_lvehx(var_int[0], var_unsigned_short_ptr[1]);
+ *var_vec_u16++ = vec_lvx(var_int[0], var_unsigned_short_ptr[1]);
+ *var_vec_u16++ = vec_lvx(var_int[0], var_vec_u16_ptr[1]);
+ *var_vec_u16++ = vec_lvxl(var_int[0], var_unsigned_short_ptr[1]);
+ *var_vec_u16++ = vec_lvxl(var_int[0], var_vec_u16_ptr[1]);
+ *var_vec_u16++ = vec_max(var_vec_b16[0], var_vec_u16[1]);
+ *var_vec_u16++ = vec_max(var_vec_u16[0], var_vec_b16[1]);
+ *var_vec_u16++ = vec_max(var_vec_u16[0], var_vec_u16[1]);
+}
+void f20() {
+ *var_vec_u16++ = vec_mergeh(var_vec_u16[0], var_vec_u16[1]);
+ *var_vec_u16++ = vec_mergel(var_vec_u16[0], var_vec_u16[1]);
+ *var_vec_u16++ = vec_min(var_vec_b16[0], var_vec_u16[1]);
+ *var_vec_u16++ = vec_min(var_vec_u16[0], var_vec_b16[1]);
+ *var_vec_u16++ = vec_min(var_vec_u16[0], var_vec_u16[1]);
+ *var_vec_u16++ = vec_mladd(var_vec_u16[0], var_vec_u16[1], var_vec_u16[2]);
+ *var_vec_u16++ = vec_mule(var_vec_u8[0], var_vec_u8[1]);
+ *var_vec_u16++ = vec_mulo(var_vec_u8[0], var_vec_u8[1]);
+ *var_vec_u16++ = vec_nor(var_vec_u16[0], var_vec_u16[1]);
+ *var_vec_u16++ = vec_or(var_vec_b16[0], var_vec_u16[1]);
+ *var_vec_u16++ = vec_or(var_vec_u16[0], var_vec_b16[1]);
+ *var_vec_u16++ = vec_or(var_vec_u16[0], var_vec_u16[1]);
+ *var_vec_u16++ = vec_pack(var_vec_u32[0], var_vec_u32[1]);
+ *var_vec_u16++ = vec_packs(var_vec_u32[0], var_vec_u32[1]);
+ *var_vec_u16++ = vec_packsu(var_vec_s32[0], var_vec_s32[1]);
+ *var_vec_u16++ = vec_packsu(var_vec_u32[0], var_vec_u32[1]);
+ *var_vec_u16++ = vec_perm(var_vec_u16[0], var_vec_u16[1], var_vec_u8[2]);
+ *var_vec_u16++ = vec_rl(var_vec_u16[0], var_vec_u16[1]);
+ *var_vec_u16++ = vec_sel(var_vec_u16[0], var_vec_u16[1], var_vec_b16[2]);
+ *var_vec_u16++ = vec_sel(var_vec_u16[0], var_vec_u16[1], var_vec_u16[2]);
+ *var_vec_u16++ = vec_sl(var_vec_u16[0], var_vec_u16[1]);
+ *var_vec_u16++ = vec_sld(var_vec_u16[0], var_vec_u16[1], 0);
+ *var_vec_u16++ = vec_sld(var_vec_u16[0], var_vec_u16[1], 1);
+ *var_vec_u16++ = vec_sld(var_vec_u16[0], var_vec_u16[1], 2);
+ *var_vec_u16++ = vec_sld(var_vec_u16[0], var_vec_u16[1], 3);
+ *var_vec_u16++ = vec_sld(var_vec_u16[0], var_vec_u16[1], 4);
+ *var_vec_u16++ = vec_sld(var_vec_u16[0], var_vec_u16[1], 5);
+ *var_vec_u16++ = vec_sld(var_vec_u16[0], var_vec_u16[1], 6);
+ *var_vec_u16++ = vec_sld(var_vec_u16[0], var_vec_u16[1], 7);
+ *var_vec_u16++ = vec_sld(var_vec_u16[0], var_vec_u16[1], 8);
+ *var_vec_u16++ = vec_sld(var_vec_u16[0], var_vec_u16[1], 9);
+ *var_vec_u16++ = vec_sld(var_vec_u16[0], var_vec_u16[1], 10);
+ *var_vec_u16++ = vec_sld(var_vec_u16[0], var_vec_u16[1], 11);
+ *var_vec_u16++ = vec_sld(var_vec_u16[0], var_vec_u16[1], 12);
+ *var_vec_u16++ = vec_sld(var_vec_u16[0], var_vec_u16[1], 13);
+ *var_vec_u16++ = vec_sld(var_vec_u16[0], var_vec_u16[1], 14);
+ *var_vec_u16++ = vec_sld(var_vec_u16[0], var_vec_u16[1], 15);
+ *var_vec_u16++ = vec_sll(var_vec_u16[0], var_vec_u16[1]);
+ *var_vec_u16++ = vec_sll(var_vec_u16[0], var_vec_u32[1]);
+ *var_vec_u16++ = vec_sll(var_vec_u16[0], var_vec_u8[1]);
+ *var_vec_u16++ = vec_slo(var_vec_u16[0], var_vec_s8[1]);
+ *var_vec_u16++ = vec_slo(var_vec_u16[0], var_vec_u8[1]);
+ *var_vec_u16++ = vec_splat(var_vec_u16[0], 0);
+ *var_vec_u16++ = vec_splat(var_vec_u16[0], 1);
+ *var_vec_u16++ = vec_splat(var_vec_u16[0], 2);
+ *var_vec_u16++ = vec_splat(var_vec_u16[0], 3);
+ *var_vec_u16++ = vec_splat(var_vec_u16[0], 4);
+ *var_vec_u16++ = vec_splat(var_vec_u16[0], 5);
+ *var_vec_u16++ = vec_splat(var_vec_u16[0], 6);
+ *var_vec_u16++ = vec_splat(var_vec_u16[0], 7);
+ *var_vec_u16++ = vec_splat(var_vec_u16[0], 8);
+ *var_vec_u16++ = vec_splat(var_vec_u16[0], 9);
+ *var_vec_u16++ = vec_splat(var_vec_u16[0], 10);
+ *var_vec_u16++ = vec_splat(var_vec_u16[0], 11);
+ *var_vec_u16++ = vec_splat(var_vec_u16[0], 12);
+ *var_vec_u16++ = vec_splat(var_vec_u16[0], 13);
+ *var_vec_u16++ = vec_splat(var_vec_u16[0], 14);
+ *var_vec_u16++ = vec_splat(var_vec_u16[0], 15);
+ *var_vec_u16++ = vec_splat(var_vec_u16[0], 16);
+ *var_vec_u16++ = vec_splat(var_vec_u16[0], 17);
+ *var_vec_u16++ = vec_splat(var_vec_u16[0], 18);
+ *var_vec_u16++ = vec_splat(var_vec_u16[0], 19);
+ *var_vec_u16++ = vec_splat(var_vec_u16[0], 20);
+ *var_vec_u16++ = vec_splat(var_vec_u16[0], 21);
+ *var_vec_u16++ = vec_splat(var_vec_u16[0], 22);
+ *var_vec_u16++ = vec_splat(var_vec_u16[0], 23);
+ *var_vec_u16++ = vec_splat(var_vec_u16[0], 24);
+ *var_vec_u16++ = vec_splat(var_vec_u16[0], 25);
+ *var_vec_u16++ = vec_splat(var_vec_u16[0], 26);
+ *var_vec_u16++ = vec_splat(var_vec_u16[0], 27);
+ *var_vec_u16++ = vec_splat(var_vec_u16[0], 28);
+ *var_vec_u16++ = vec_splat(var_vec_u16[0], 29);
+ *var_vec_u16++ = vec_splat(var_vec_u16[0], 30);
+ *var_vec_u16++ = vec_splat(var_vec_u16[0], 31);
+ *var_vec_u16++ = vec_splat_u16( 0);
+ *var_vec_u16++ = vec_splat_u16( 1);
+ *var_vec_u16++ = vec_splat_u16( 2);
+ *var_vec_u16++ = vec_splat_u16( 3);
+ *var_vec_u16++ = vec_splat_u16( 4);
+ *var_vec_u16++ = vec_splat_u16( 5);
+ *var_vec_u16++ = vec_splat_u16( 6);
+ *var_vec_u16++ = vec_splat_u16( 7);
+ *var_vec_u16++ = vec_splat_u16( 8);
+ *var_vec_u16++ = vec_splat_u16( 9);
+ *var_vec_u16++ = vec_splat_u16( -1);
+ *var_vec_u16++ = vec_splat_u16( -2);
+ *var_vec_u16++ = vec_splat_u16( -3);
+ *var_vec_u16++ = vec_splat_u16( -4);
+ *var_vec_u16++ = vec_splat_u16( -5);
+ *var_vec_u16++ = vec_splat_u16( -6);
+ *var_vec_u16++ = vec_splat_u16( -7);
+ *var_vec_u16++ = vec_splat_u16( -8);
+ *var_vec_u16++ = vec_splat_u16( -9);
+ *var_vec_u16++ = vec_splat_u16( 10);
+ *var_vec_u16++ = vec_splat_u16( 11);
+ *var_vec_u16++ = vec_splat_u16( 12);
+ *var_vec_u16++ = vec_splat_u16( 13);
+ *var_vec_u16++ = vec_splat_u16( 14);
+ *var_vec_u16++ = vec_splat_u16( 15);
+ *var_vec_u16++ = vec_splat_u16(-10);
+}
+void f21() {
+ *var_vec_u16++ = vec_splat_u16(-11);
+ *var_vec_u16++ = vec_splat_u16(-12);
+ *var_vec_u16++ = vec_splat_u16(-13);
+ *var_vec_u16++ = vec_splat_u16(-14);
+ *var_vec_u16++ = vec_splat_u16(-15);
+ *var_vec_u16++ = vec_splat_u16(-16);
+ *var_vec_u16++ = vec_sr(var_vec_u16[0], var_vec_u16[1]);
+ *var_vec_u16++ = vec_sra(var_vec_u16[0], var_vec_u16[1]);
+ *var_vec_u16++ = vec_srl(var_vec_u16[0], var_vec_u16[1]);
+ *var_vec_u16++ = vec_srl(var_vec_u16[0], var_vec_u32[1]);
+ *var_vec_u16++ = vec_srl(var_vec_u16[0], var_vec_u8[1]);
+ *var_vec_u16++ = vec_sro(var_vec_u16[0], var_vec_s8[1]);
+ *var_vec_u16++ = vec_sro(var_vec_u16[0], var_vec_u8[1]);
+ *var_vec_u16++ = vec_sub(var_vec_b16[0], var_vec_u16[1]);
+ *var_vec_u16++ = vec_sub(var_vec_u16[0], var_vec_b16[1]);
+ *var_vec_u16++ = vec_sub(var_vec_u16[0], var_vec_u16[1]);
+ *var_vec_u16++ = vec_subs(var_vec_b16[0], var_vec_u16[1]);
+ *var_vec_u16++ = vec_subs(var_vec_u16[0], var_vec_b16[1]);
+ *var_vec_u16++ = vec_subs(var_vec_u16[0], var_vec_u16[1]);
+ *var_vec_u16++ = vec_vadduhm(var_vec_b16[0], var_vec_u16[1]);
+ *var_vec_u16++ = vec_vadduhm(var_vec_u16[0], var_vec_b16[1]);
+ *var_vec_u16++ = vec_vadduhm(var_vec_u16[0], var_vec_u16[1]);
+ *var_vec_u16++ = vec_vadduhs(var_vec_b16[0], var_vec_u16[1]);
+ *var_vec_u16++ = vec_vadduhs(var_vec_u16[0], var_vec_b16[1]);
+ *var_vec_u16++ = vec_vadduhs(var_vec_u16[0], var_vec_u16[1]);
+ *var_vec_u16++ = vec_vand(var_vec_b16[0], var_vec_u16[1]);
+ *var_vec_u16++ = vec_vand(var_vec_u16[0], var_vec_b16[1]);
+ *var_vec_u16++ = vec_vand(var_vec_u16[0], var_vec_u16[1]);
+ *var_vec_u16++ = vec_vandc(var_vec_b16[0], var_vec_u16[1]);
+ *var_vec_u16++ = vec_vandc(var_vec_u16[0], var_vec_b16[1]);
+ *var_vec_u16++ = vec_vandc(var_vec_u16[0], var_vec_u16[1]);
+ *var_vec_u16++ = vec_vavguh(var_vec_u16[0], var_vec_u16[1]);
+ *var_vec_u16++ = vec_vmaxuh(var_vec_b16[0], var_vec_u16[1]);
+ *var_vec_u16++ = vec_vmaxuh(var_vec_u16[0], var_vec_b16[1]);
+ *var_vec_u16++ = vec_vmaxuh(var_vec_u16[0], var_vec_u16[1]);
+ *var_vec_u16++ = vec_vminuh(var_vec_b16[0], var_vec_u16[1]);
+ *var_vec_u16++ = vec_vminuh(var_vec_u16[0], var_vec_b16[1]);
+ *var_vec_u16++ = vec_vminuh(var_vec_u16[0], var_vec_u16[1]);
+ *var_vec_u16++ = vec_vmladduhm(var_vec_u16[0], var_vec_u16[1], var_vec_u16[2]);
+ *var_vec_u16++ = vec_vmrghh(var_vec_u16[0], var_vec_u16[1]);
+ *var_vec_u16++ = vec_vmrglh(var_vec_u16[0], var_vec_u16[1]);
+ *var_vec_u16++ = vec_vmuleub(var_vec_u8[0], var_vec_u8[1]);
+ *var_vec_u16++ = vec_vmuloub(var_vec_u8[0], var_vec_u8[1]);
+ *var_vec_u16++ = vec_vnor(var_vec_u16[0], var_vec_u16[1]);
+ *var_vec_u16++ = vec_vor(var_vec_b16[0], var_vec_u16[1]);
+ *var_vec_u16++ = vec_vor(var_vec_u16[0], var_vec_b16[1]);
+ *var_vec_u16++ = vec_vor(var_vec_u16[0], var_vec_u16[1]);
+ *var_vec_u16++ = vec_vperm(var_vec_u16[0], var_vec_u16[1], var_vec_u8[2]);
+ *var_vec_u16++ = vec_vpkswus(var_vec_s32[0], var_vec_s32[1]);
+ *var_vec_u16++ = vec_vpkuwum(var_vec_u32[0], var_vec_u32[1]);
+ *var_vec_u16++ = vec_vpkuwus(var_vec_u32[0], var_vec_u32[1]);
+ *var_vec_u16++ = vec_vrlh(var_vec_u16[0], var_vec_u16[1]);
+ *var_vec_u16++ = vec_vsel(var_vec_u16[0], var_vec_u16[1], var_vec_b16[2]);
+ *var_vec_u16++ = vec_vsel(var_vec_u16[0], var_vec_u16[1], var_vec_u16[2]);
+ *var_vec_u16++ = vec_vsl(var_vec_u16[0], var_vec_u16[1]);
+ *var_vec_u16++ = vec_vsl(var_vec_u16[0], var_vec_u32[1]);
+ *var_vec_u16++ = vec_vsl(var_vec_u16[0], var_vec_u8[1]);
+ *var_vec_u16++ = vec_vsldoi(var_vec_u16[0], var_vec_u16[1], 0);
+ *var_vec_u16++ = vec_vsldoi(var_vec_u16[0], var_vec_u16[1], 1);
+ *var_vec_u16++ = vec_vsldoi(var_vec_u16[0], var_vec_u16[1], 2);
+ *var_vec_u16++ = vec_vsldoi(var_vec_u16[0], var_vec_u16[1], 3);
+ *var_vec_u16++ = vec_vsldoi(var_vec_u16[0], var_vec_u16[1], 4);
+ *var_vec_u16++ = vec_vsldoi(var_vec_u16[0], var_vec_u16[1], 5);
+ *var_vec_u16++ = vec_vsldoi(var_vec_u16[0], var_vec_u16[1], 6);
+ *var_vec_u16++ = vec_vsldoi(var_vec_u16[0], var_vec_u16[1], 7);
+ *var_vec_u16++ = vec_vsldoi(var_vec_u16[0], var_vec_u16[1], 8);
+ *var_vec_u16++ = vec_vsldoi(var_vec_u16[0], var_vec_u16[1], 9);
+ *var_vec_u16++ = vec_vsldoi(var_vec_u16[0], var_vec_u16[1], 10);
+ *var_vec_u16++ = vec_vsldoi(var_vec_u16[0], var_vec_u16[1], 11);
+ *var_vec_u16++ = vec_vsldoi(var_vec_u16[0], var_vec_u16[1], 12);
+ *var_vec_u16++ = vec_vsldoi(var_vec_u16[0], var_vec_u16[1], 13);
+ *var_vec_u16++ = vec_vsldoi(var_vec_u16[0], var_vec_u16[1], 14);
+ *var_vec_u16++ = vec_vsldoi(var_vec_u16[0], var_vec_u16[1], 15);
+ *var_vec_u16++ = vec_vslh(var_vec_u16[0], var_vec_u16[1]);
+ *var_vec_u16++ = vec_vslo(var_vec_u16[0], var_vec_s8[1]);
+ *var_vec_u16++ = vec_vslo(var_vec_u16[0], var_vec_u8[1]);
+ *var_vec_u16++ = vec_vsplth(var_vec_u16[0], 0);
+ *var_vec_u16++ = vec_vsplth(var_vec_u16[0], 1);
+ *var_vec_u16++ = vec_vsplth(var_vec_u16[0], 2);
+ *var_vec_u16++ = vec_vsplth(var_vec_u16[0], 3);
+ *var_vec_u16++ = vec_vsplth(var_vec_u16[0], 4);
+ *var_vec_u16++ = vec_vsplth(var_vec_u16[0], 5);
+ *var_vec_u16++ = vec_vsplth(var_vec_u16[0], 6);
+ *var_vec_u16++ = vec_vsplth(var_vec_u16[0], 7);
+ *var_vec_u16++ = vec_vsplth(var_vec_u16[0], 8);
+ *var_vec_u16++ = vec_vsplth(var_vec_u16[0], 9);
+ *var_vec_u16++ = vec_vsplth(var_vec_u16[0], 10);
+ *var_vec_u16++ = vec_vsplth(var_vec_u16[0], 11);
+ *var_vec_u16++ = vec_vsplth(var_vec_u16[0], 12);
+ *var_vec_u16++ = vec_vsplth(var_vec_u16[0], 13);
+ *var_vec_u16++ = vec_vsplth(var_vec_u16[0], 14);
+ *var_vec_u16++ = vec_vsplth(var_vec_u16[0], 15);
+ *var_vec_u16++ = vec_vsplth(var_vec_u16[0], 16);
+ *var_vec_u16++ = vec_vsplth(var_vec_u16[0], 17);
+ *var_vec_u16++ = vec_vsplth(var_vec_u16[0], 18);
+ *var_vec_u16++ = vec_vsplth(var_vec_u16[0], 19);
+ *var_vec_u16++ = vec_vsplth(var_vec_u16[0], 20);
+ *var_vec_u16++ = vec_vsplth(var_vec_u16[0], 21);
+}
+void f22() {
+ *var_vec_u16++ = vec_vsplth(var_vec_u16[0], 22);
+ *var_vec_u16++ = vec_vsplth(var_vec_u16[0], 23);
+ *var_vec_u16++ = vec_vsplth(var_vec_u16[0], 24);
+ *var_vec_u16++ = vec_vsplth(var_vec_u16[0], 25);
+ *var_vec_u16++ = vec_vsplth(var_vec_u16[0], 26);
+ *var_vec_u16++ = vec_vsplth(var_vec_u16[0], 27);
+ *var_vec_u16++ = vec_vsplth(var_vec_u16[0], 28);
+ *var_vec_u16++ = vec_vsplth(var_vec_u16[0], 29);
+ *var_vec_u16++ = vec_vsplth(var_vec_u16[0], 30);
+ *var_vec_u16++ = vec_vsplth(var_vec_u16[0], 31);
+ *var_vec_u16++ = vec_vsr(var_vec_u16[0], var_vec_u16[1]);
+ *var_vec_u16++ = vec_vsr(var_vec_u16[0], var_vec_u32[1]);
+ *var_vec_u16++ = vec_vsr(var_vec_u16[0], var_vec_u8[1]);
+ *var_vec_u16++ = vec_vsrah(var_vec_u16[0], var_vec_u16[1]);
+ *var_vec_u16++ = vec_vsrh(var_vec_u16[0], var_vec_u16[1]);
+ *var_vec_u16++ = vec_vsro(var_vec_u16[0], var_vec_s8[1]);
+ *var_vec_u16++ = vec_vsro(var_vec_u16[0], var_vec_u8[1]);
+ *var_vec_u16++ = vec_vsubuhm(var_vec_b16[0], var_vec_u16[1]);
+ *var_vec_u16++ = vec_vsubuhm(var_vec_u16[0], var_vec_b16[1]);
+ *var_vec_u16++ = vec_vsubuhm(var_vec_u16[0], var_vec_u16[1]);
+ *var_vec_u16++ = vec_vsubuhs(var_vec_b16[0], var_vec_u16[1]);
+ *var_vec_u16++ = vec_vsubuhs(var_vec_u16[0], var_vec_b16[1]);
+ *var_vec_u16++ = vec_vsubuhs(var_vec_u16[0], var_vec_u16[1]);
+ *var_vec_u16++ = vec_vxor(var_vec_b16[0], var_vec_u16[1]);
+ *var_vec_u16++ = vec_vxor(var_vec_u16[0], var_vec_b16[1]);
+ *var_vec_u16++ = vec_vxor(var_vec_u16[0], var_vec_u16[1]);
+ *var_vec_u16++ = vec_xor(var_vec_b16[0], var_vec_u16[1]);
+ *var_vec_u16++ = vec_xor(var_vec_u16[0], var_vec_b16[1]);
+ *var_vec_u16++ = vec_xor(var_vec_u16[0], var_vec_u16[1]);
+ *var_vec_u32++ = vec_add(var_vec_b32[0], var_vec_u32[1]);
+ *var_vec_u32++ = vec_add(var_vec_u32[0], var_vec_b32[1]);
+ *var_vec_u32++ = vec_add(var_vec_u32[0], var_vec_u32[1]);
+ *var_vec_u32++ = vec_addc(var_vec_u32[0], var_vec_u32[1]);
+ *var_vec_u32++ = vec_adds(var_vec_b32[0], var_vec_u32[1]);
+ *var_vec_u32++ = vec_adds(var_vec_u32[0], var_vec_b32[1]);
+ *var_vec_u32++ = vec_adds(var_vec_u32[0], var_vec_u32[1]);
+ *var_vec_u32++ = vec_and(var_vec_b32[0], var_vec_u32[1]);
+ *var_vec_u32++ = vec_and(var_vec_u32[0], var_vec_b32[1]);
+ *var_vec_u32++ = vec_and(var_vec_u32[0], var_vec_u32[1]);
+ *var_vec_u32++ = vec_andc(var_vec_b32[0], var_vec_u32[1]);
+ *var_vec_u32++ = vec_andc(var_vec_u32[0], var_vec_b32[1]);
+ *var_vec_u32++ = vec_andc(var_vec_u32[0], var_vec_u32[1]);
+ *var_vec_u32++ = vec_avg(var_vec_u32[0], var_vec_u32[1]);
+ *var_vec_u32++ = vec_ctu(var_vec_f32[0], 0);
+ *var_vec_u32++ = vec_ctu(var_vec_f32[0], 1);
+ *var_vec_u32++ = vec_ctu(var_vec_f32[0], 2);
+ *var_vec_u32++ = vec_ctu(var_vec_f32[0], 3);
+ *var_vec_u32++ = vec_ctu(var_vec_f32[0], 4);
+ *var_vec_u32++ = vec_ctu(var_vec_f32[0], 5);
+ *var_vec_u32++ = vec_ctu(var_vec_f32[0], 6);
+ *var_vec_u32++ = vec_ctu(var_vec_f32[0], 7);
+ *var_vec_u32++ = vec_ctu(var_vec_f32[0], 8);
+ *var_vec_u32++ = vec_ctu(var_vec_f32[0], 9);
+ *var_vec_u32++ = vec_ctu(var_vec_f32[0], 10);
+ *var_vec_u32++ = vec_ctu(var_vec_f32[0], 11);
+ *var_vec_u32++ = vec_ctu(var_vec_f32[0], 12);
+ *var_vec_u32++ = vec_ctu(var_vec_f32[0], 13);
+ *var_vec_u32++ = vec_ctu(var_vec_f32[0], 14);
+ *var_vec_u32++ = vec_ctu(var_vec_f32[0], 15);
+ *var_vec_u32++ = vec_ctu(var_vec_f32[0], 16);
+ *var_vec_u32++ = vec_ctu(var_vec_f32[0], 17);
+ *var_vec_u32++ = vec_ctu(var_vec_f32[0], 18);
+ *var_vec_u32++ = vec_ctu(var_vec_f32[0], 19);
+ *var_vec_u32++ = vec_ctu(var_vec_f32[0], 20);
+ *var_vec_u32++ = vec_ctu(var_vec_f32[0], 21);
+ *var_vec_u32++ = vec_ctu(var_vec_f32[0], 22);
+ *var_vec_u32++ = vec_ctu(var_vec_f32[0], 23);
+ *var_vec_u32++ = vec_ctu(var_vec_f32[0], 24);
+ *var_vec_u32++ = vec_ctu(var_vec_f32[0], 25);
+ *var_vec_u32++ = vec_ctu(var_vec_f32[0], 26);
+ *var_vec_u32++ = vec_ctu(var_vec_f32[0], 27);
+ *var_vec_u32++ = vec_ctu(var_vec_f32[0], 28);
+ *var_vec_u32++ = vec_ctu(var_vec_f32[0], 29);
+ *var_vec_u32++ = vec_ctu(var_vec_f32[0], 30);
+ *var_vec_u32++ = vec_ctu(var_vec_f32[0], 31);
+ *var_vec_u32++ = vec_ld(var_int[0], var_unsigned_int_ptr[1]);
+ *var_vec_u32++ = vec_ld(var_int[0], var_vec_u32_ptr[1]);
+ *var_vec_u32++ = vec_lde(var_int[0], var_unsigned_int_ptr[1]);
+ *var_vec_u32++ = vec_ldl(var_int[0], var_unsigned_int_ptr[1]);
+ *var_vec_u32++ = vec_ldl(var_int[0], var_vec_u32_ptr[1]);
+ *var_vec_u32++ = vec_lvewx(var_int[0], var_unsigned_int_ptr[1]);
+ *var_vec_u32++ = vec_lvx(var_int[0], var_unsigned_int_ptr[1]);
+ *var_vec_u32++ = vec_lvx(var_int[0], var_vec_u32_ptr[1]);
+ *var_vec_u32++ = vec_lvxl(var_int[0], var_unsigned_int_ptr[1]);
+ *var_vec_u32++ = vec_lvxl(var_int[0], var_vec_u32_ptr[1]);
+ *var_vec_u32++ = vec_max(var_vec_b32[0], var_vec_u32[1]);
+ *var_vec_u32++ = vec_max(var_vec_u32[0], var_vec_b32[1]);
+ *var_vec_u32++ = vec_max(var_vec_u32[0], var_vec_u32[1]);
+ *var_vec_u32++ = vec_mergeh(var_vec_u32[0], var_vec_u32[1]);
+ *var_vec_u32++ = vec_mergel(var_vec_u32[0], var_vec_u32[1]);
+ *var_vec_u32++ = vec_min(var_vec_b32[0], var_vec_u32[1]);
+ *var_vec_u32++ = vec_min(var_vec_u32[0], var_vec_b32[1]);
+ *var_vec_u32++ = vec_min(var_vec_u32[0], var_vec_u32[1]);
+ *var_vec_u32++ = vec_msum(var_vec_u16[0], var_vec_u16[1], var_vec_u32[2]);
+}
+void f23() {
+ *var_vec_u32++ = vec_msum(var_vec_u8[0], var_vec_u8[1], var_vec_u32[2]);
+ *var_vec_u32++ = vec_msums(var_vec_u16[0], var_vec_u16[1], var_vec_u32[2]);
+ *var_vec_u32++ = vec_mule(var_vec_u16[0], var_vec_u16[1]);
+ *var_vec_u32++ = vec_mulo(var_vec_u16[0], var_vec_u16[1]);
+ *var_vec_u32++ = vec_nor(var_vec_u32[0], var_vec_u32[1]);
+ *var_vec_u32++ = vec_or(var_vec_b32[0], var_vec_u32[1]);
+ *var_vec_u32++ = vec_or(var_vec_u32[0], var_vec_b32[1]);
+ *var_vec_u32++ = vec_or(var_vec_u32[0], var_vec_u32[1]);
+ *var_vec_u32++ = vec_perm(var_vec_u32[0], var_vec_u32[1], var_vec_u8[2]);
+ *var_vec_u32++ = vec_rl(var_vec_u32[0], var_vec_u32[1]);
+ *var_vec_u32++ = vec_sel(var_vec_u32[0], var_vec_u32[1], var_vec_b32[2]);
+ *var_vec_u32++ = vec_sel(var_vec_u32[0], var_vec_u32[1], var_vec_u32[2]);
+ *var_vec_u32++ = vec_sl(var_vec_u32[0], var_vec_u32[1]);
+ *var_vec_u32++ = vec_sld(var_vec_u32[0], var_vec_u32[1], 0);
+ *var_vec_u32++ = vec_sld(var_vec_u32[0], var_vec_u32[1], 1);
+ *var_vec_u32++ = vec_sld(var_vec_u32[0], var_vec_u32[1], 2);
+ *var_vec_u32++ = vec_sld(var_vec_u32[0], var_vec_u32[1], 3);
+ *var_vec_u32++ = vec_sld(var_vec_u32[0], var_vec_u32[1], 4);
+ *var_vec_u32++ = vec_sld(var_vec_u32[0], var_vec_u32[1], 5);
+ *var_vec_u32++ = vec_sld(var_vec_u32[0], var_vec_u32[1], 6);
+ *var_vec_u32++ = vec_sld(var_vec_u32[0], var_vec_u32[1], 7);
+ *var_vec_u32++ = vec_sld(var_vec_u32[0], var_vec_u32[1], 8);
+ *var_vec_u32++ = vec_sld(var_vec_u32[0], var_vec_u32[1], 9);
+ *var_vec_u32++ = vec_sld(var_vec_u32[0], var_vec_u32[1], 10);
+ *var_vec_u32++ = vec_sld(var_vec_u32[0], var_vec_u32[1], 11);
+ *var_vec_u32++ = vec_sld(var_vec_u32[0], var_vec_u32[1], 12);
+ *var_vec_u32++ = vec_sld(var_vec_u32[0], var_vec_u32[1], 13);
+ *var_vec_u32++ = vec_sld(var_vec_u32[0], var_vec_u32[1], 14);
+ *var_vec_u32++ = vec_sld(var_vec_u32[0], var_vec_u32[1], 15);
+ *var_vec_u32++ = vec_sll(var_vec_u32[0], var_vec_u16[1]);
+ *var_vec_u32++ = vec_sll(var_vec_u32[0], var_vec_u32[1]);
+ *var_vec_u32++ = vec_sll(var_vec_u32[0], var_vec_u8[1]);
+ *var_vec_u32++ = vec_slo(var_vec_u32[0], var_vec_s8[1]);
+ *var_vec_u32++ = vec_slo(var_vec_u32[0], var_vec_u8[1]);
+ *var_vec_u32++ = vec_splat(var_vec_u32[0], 0);
+ *var_vec_u32++ = vec_splat(var_vec_u32[0], 1);
+ *var_vec_u32++ = vec_splat(var_vec_u32[0], 2);
+ *var_vec_u32++ = vec_splat(var_vec_u32[0], 3);
+ *var_vec_u32++ = vec_splat(var_vec_u32[0], 4);
+ *var_vec_u32++ = vec_splat(var_vec_u32[0], 5);
+ *var_vec_u32++ = vec_splat(var_vec_u32[0], 6);
+ *var_vec_u32++ = vec_splat(var_vec_u32[0], 7);
+ *var_vec_u32++ = vec_splat(var_vec_u32[0], 8);
+ *var_vec_u32++ = vec_splat(var_vec_u32[0], 9);
+ *var_vec_u32++ = vec_splat(var_vec_u32[0], 10);
+ *var_vec_u32++ = vec_splat(var_vec_u32[0], 11);
+ *var_vec_u32++ = vec_splat(var_vec_u32[0], 12);
+ *var_vec_u32++ = vec_splat(var_vec_u32[0], 13);
+ *var_vec_u32++ = vec_splat(var_vec_u32[0], 14);
+ *var_vec_u32++ = vec_splat(var_vec_u32[0], 15);
+ *var_vec_u32++ = vec_splat(var_vec_u32[0], 16);
+ *var_vec_u32++ = vec_splat(var_vec_u32[0], 17);
+ *var_vec_u32++ = vec_splat(var_vec_u32[0], 18);
+ *var_vec_u32++ = vec_splat(var_vec_u32[0], 19);
+ *var_vec_u32++ = vec_splat(var_vec_u32[0], 20);
+ *var_vec_u32++ = vec_splat(var_vec_u32[0], 21);
+ *var_vec_u32++ = vec_splat(var_vec_u32[0], 22);
+ *var_vec_u32++ = vec_splat(var_vec_u32[0], 23);
+ *var_vec_u32++ = vec_splat(var_vec_u32[0], 24);
+ *var_vec_u32++ = vec_splat(var_vec_u32[0], 25);
+ *var_vec_u32++ = vec_splat(var_vec_u32[0], 26);
+ *var_vec_u32++ = vec_splat(var_vec_u32[0], 27);
+ *var_vec_u32++ = vec_splat(var_vec_u32[0], 28);
+ *var_vec_u32++ = vec_splat(var_vec_u32[0], 29);
+ *var_vec_u32++ = vec_splat(var_vec_u32[0], 30);
+ *var_vec_u32++ = vec_splat(var_vec_u32[0], 31);
+ *var_vec_u32++ = vec_splat_u32( 0);
+ *var_vec_u32++ = vec_splat_u32( 1);
+ *var_vec_u32++ = vec_splat_u32( 2);
+ *var_vec_u32++ = vec_splat_u32( 3);
+ *var_vec_u32++ = vec_splat_u32( 4);
+ *var_vec_u32++ = vec_splat_u32( 5);
+ *var_vec_u32++ = vec_splat_u32( 6);
+ *var_vec_u32++ = vec_splat_u32( 7);
+ *var_vec_u32++ = vec_splat_u32( 8);
+ *var_vec_u32++ = vec_splat_u32( 9);
+ *var_vec_u32++ = vec_splat_u32( -1);
+ *var_vec_u32++ = vec_splat_u32( -2);
+ *var_vec_u32++ = vec_splat_u32( -3);
+ *var_vec_u32++ = vec_splat_u32( -4);
+ *var_vec_u32++ = vec_splat_u32( -5);
+ *var_vec_u32++ = vec_splat_u32( -6);
+ *var_vec_u32++ = vec_splat_u32( -7);
+ *var_vec_u32++ = vec_splat_u32( -8);
+ *var_vec_u32++ = vec_splat_u32( -9);
+ *var_vec_u32++ = vec_splat_u32( 10);
+ *var_vec_u32++ = vec_splat_u32( 11);
+ *var_vec_u32++ = vec_splat_u32( 12);
+ *var_vec_u32++ = vec_splat_u32( 13);
+ *var_vec_u32++ = vec_splat_u32( 14);
+ *var_vec_u32++ = vec_splat_u32( 15);
+ *var_vec_u32++ = vec_splat_u32(-10);
+ *var_vec_u32++ = vec_splat_u32(-11);
+ *var_vec_u32++ = vec_splat_u32(-12);
+ *var_vec_u32++ = vec_splat_u32(-13);
+ *var_vec_u32++ = vec_splat_u32(-14);
+ *var_vec_u32++ = vec_splat_u32(-15);
+ *var_vec_u32++ = vec_splat_u32(-16);
+ *var_vec_u32++ = vec_sr(var_vec_u32[0], var_vec_u32[1]);
+ *var_vec_u32++ = vec_sra(var_vec_u32[0], var_vec_u32[1]);
+}
+void f24() {
+ *var_vec_u32++ = vec_srl(var_vec_u32[0], var_vec_u16[1]);
+ *var_vec_u32++ = vec_srl(var_vec_u32[0], var_vec_u32[1]);
+ *var_vec_u32++ = vec_srl(var_vec_u32[0], var_vec_u8[1]);
+ *var_vec_u32++ = vec_sro(var_vec_u32[0], var_vec_s8[1]);
+ *var_vec_u32++ = vec_sro(var_vec_u32[0], var_vec_u8[1]);
+ *var_vec_u32++ = vec_sub(var_vec_b32[0], var_vec_u32[1]);
+ *var_vec_u32++ = vec_sub(var_vec_u32[0], var_vec_b32[1]);
+ *var_vec_u32++ = vec_sub(var_vec_u32[0], var_vec_u32[1]);
+ *var_vec_u32++ = vec_subc(var_vec_u32[0], var_vec_u32[1]);
+ *var_vec_u32++ = vec_subs(var_vec_b32[0], var_vec_u32[1]);
+ *var_vec_u32++ = vec_subs(var_vec_u32[0], var_vec_b32[1]);
+ *var_vec_u32++ = vec_subs(var_vec_u32[0], var_vec_u32[1]);
+ *var_vec_u32++ = vec_sum4s(var_vec_u8[0], var_vec_u32[1]);
+ *var_vec_u32++ = vec_unpackh(var_vec_p16[0]);
+ *var_vec_u32++ = vec_unpackl(var_vec_p16[0]);
+ *var_vec_u32++ = vec_vaddcuw(var_vec_u32[0], var_vec_u32[1]);
+ *var_vec_u32++ = vec_vadduwm(var_vec_b32[0], var_vec_u32[1]);
+ *var_vec_u32++ = vec_vadduwm(var_vec_u32[0], var_vec_b32[1]);
+ *var_vec_u32++ = vec_vadduwm(var_vec_u32[0], var_vec_u32[1]);
+ *var_vec_u32++ = vec_vadduws(var_vec_b32[0], var_vec_u32[1]);
+ *var_vec_u32++ = vec_vadduws(var_vec_u32[0], var_vec_b32[1]);
+ *var_vec_u32++ = vec_vadduws(var_vec_u32[0], var_vec_u32[1]);
+ *var_vec_u32++ = vec_vand(var_vec_b32[0], var_vec_u32[1]);
+ *var_vec_u32++ = vec_vand(var_vec_u32[0], var_vec_b32[1]);
+ *var_vec_u32++ = vec_vand(var_vec_u32[0], var_vec_u32[1]);
+ *var_vec_u32++ = vec_vandc(var_vec_b32[0], var_vec_u32[1]);
+ *var_vec_u32++ = vec_vandc(var_vec_u32[0], var_vec_b32[1]);
+ *var_vec_u32++ = vec_vandc(var_vec_u32[0], var_vec_u32[1]);
+ *var_vec_u32++ = vec_vavguw(var_vec_u32[0], var_vec_u32[1]);
+ *var_vec_u32++ = vec_vctuxs(var_vec_f32[0], 0);
+ *var_vec_u32++ = vec_vctuxs(var_vec_f32[0], 1);
+ *var_vec_u32++ = vec_vctuxs(var_vec_f32[0], 2);
+ *var_vec_u32++ = vec_vctuxs(var_vec_f32[0], 3);
+ *var_vec_u32++ = vec_vctuxs(var_vec_f32[0], 4);
+ *var_vec_u32++ = vec_vctuxs(var_vec_f32[0], 5);
+ *var_vec_u32++ = vec_vctuxs(var_vec_f32[0], 6);
+ *var_vec_u32++ = vec_vctuxs(var_vec_f32[0], 7);
+ *var_vec_u32++ = vec_vctuxs(var_vec_f32[0], 8);
+ *var_vec_u32++ = vec_vctuxs(var_vec_f32[0], 9);
+ *var_vec_u32++ = vec_vctuxs(var_vec_f32[0], 10);
+ *var_vec_u32++ = vec_vctuxs(var_vec_f32[0], 11);
+ *var_vec_u32++ = vec_vctuxs(var_vec_f32[0], 12);
+ *var_vec_u32++ = vec_vctuxs(var_vec_f32[0], 13);
+ *var_vec_u32++ = vec_vctuxs(var_vec_f32[0], 14);
+ *var_vec_u32++ = vec_vctuxs(var_vec_f32[0], 15);
+ *var_vec_u32++ = vec_vctuxs(var_vec_f32[0], 16);
+ *var_vec_u32++ = vec_vctuxs(var_vec_f32[0], 17);
+ *var_vec_u32++ = vec_vctuxs(var_vec_f32[0], 18);
+ *var_vec_u32++ = vec_vctuxs(var_vec_f32[0], 19);
+ *var_vec_u32++ = vec_vctuxs(var_vec_f32[0], 20);
+ *var_vec_u32++ = vec_vctuxs(var_vec_f32[0], 21);
+ *var_vec_u32++ = vec_vctuxs(var_vec_f32[0], 22);
+ *var_vec_u32++ = vec_vctuxs(var_vec_f32[0], 23);
+ *var_vec_u32++ = vec_vctuxs(var_vec_f32[0], 24);
+ *var_vec_u32++ = vec_vctuxs(var_vec_f32[0], 25);
+ *var_vec_u32++ = vec_vctuxs(var_vec_f32[0], 26);
+ *var_vec_u32++ = vec_vctuxs(var_vec_f32[0], 27);
+ *var_vec_u32++ = vec_vctuxs(var_vec_f32[0], 28);
+ *var_vec_u32++ = vec_vctuxs(var_vec_f32[0], 29);
+ *var_vec_u32++ = vec_vctuxs(var_vec_f32[0], 30);
+ *var_vec_u32++ = vec_vctuxs(var_vec_f32[0], 31);
+ *var_vec_u32++ = vec_vmaxuw(var_vec_b32[0], var_vec_u32[1]);
+ *var_vec_u32++ = vec_vmaxuw(var_vec_u32[0], var_vec_b32[1]);
+ *var_vec_u32++ = vec_vmaxuw(var_vec_u32[0], var_vec_u32[1]);
+ *var_vec_u32++ = vec_vminuw(var_vec_b32[0], var_vec_u32[1]);
+ *var_vec_u32++ = vec_vminuw(var_vec_u32[0], var_vec_b32[1]);
+ *var_vec_u32++ = vec_vminuw(var_vec_u32[0], var_vec_u32[1]);
+ *var_vec_u32++ = vec_vmrghw(var_vec_u32[0], var_vec_u32[1]);
+ *var_vec_u32++ = vec_vmrglw(var_vec_u32[0], var_vec_u32[1]);
+ *var_vec_u32++ = vec_vmsumubm(var_vec_u8[0], var_vec_u8[1], var_vec_u32[2]);
+ *var_vec_u32++ = vec_vmsumuhm(var_vec_u16[0], var_vec_u16[1], var_vec_u32[2]);
+ *var_vec_u32++ = vec_vmsumuhs(var_vec_u16[0], var_vec_u16[1], var_vec_u32[2]);
+ *var_vec_u32++ = vec_vmuleuh(var_vec_u16[0], var_vec_u16[1]);
+ *var_vec_u32++ = vec_vmulouh(var_vec_u16[0], var_vec_u16[1]);
+ *var_vec_u32++ = vec_vnor(var_vec_u32[0], var_vec_u32[1]);
+ *var_vec_u32++ = vec_vor(var_vec_b32[0], var_vec_u32[1]);
+ *var_vec_u32++ = vec_vor(var_vec_u32[0], var_vec_b32[1]);
+ *var_vec_u32++ = vec_vor(var_vec_u32[0], var_vec_u32[1]);
+ *var_vec_u32++ = vec_vperm(var_vec_u32[0], var_vec_u32[1], var_vec_u8[2]);
+ *var_vec_u32++ = vec_vrlw(var_vec_u32[0], var_vec_u32[1]);
+ *var_vec_u32++ = vec_vsel(var_vec_u32[0], var_vec_u32[1], var_vec_b32[2]);
+ *var_vec_u32++ = vec_vsel(var_vec_u32[0], var_vec_u32[1], var_vec_u32[2]);
+ *var_vec_u32++ = vec_vsl(var_vec_u32[0], var_vec_u16[1]);
+ *var_vec_u32++ = vec_vsl(var_vec_u32[0], var_vec_u32[1]);
+ *var_vec_u32++ = vec_vsl(var_vec_u32[0], var_vec_u8[1]);
+ *var_vec_u32++ = vec_vsldoi(var_vec_u32[0], var_vec_u32[1], 0);
+ *var_vec_u32++ = vec_vsldoi(var_vec_u32[0], var_vec_u32[1], 1);
+ *var_vec_u32++ = vec_vsldoi(var_vec_u32[0], var_vec_u32[1], 2);
+ *var_vec_u32++ = vec_vsldoi(var_vec_u32[0], var_vec_u32[1], 3);
+ *var_vec_u32++ = vec_vsldoi(var_vec_u32[0], var_vec_u32[1], 4);
+ *var_vec_u32++ = vec_vsldoi(var_vec_u32[0], var_vec_u32[1], 5);
+ *var_vec_u32++ = vec_vsldoi(var_vec_u32[0], var_vec_u32[1], 6);
+ *var_vec_u32++ = vec_vsldoi(var_vec_u32[0], var_vec_u32[1], 7);
+ *var_vec_u32++ = vec_vsldoi(var_vec_u32[0], var_vec_u32[1], 8);
+ *var_vec_u32++ = vec_vsldoi(var_vec_u32[0], var_vec_u32[1], 9);
+ *var_vec_u32++ = vec_vsldoi(var_vec_u32[0], var_vec_u32[1], 10);
+ *var_vec_u32++ = vec_vsldoi(var_vec_u32[0], var_vec_u32[1], 11);
+ *var_vec_u32++ = vec_vsldoi(var_vec_u32[0], var_vec_u32[1], 12);
+}
+void f25() {
+ *var_vec_u32++ = vec_vsldoi(var_vec_u32[0], var_vec_u32[1], 13);
+ *var_vec_u32++ = vec_vsldoi(var_vec_u32[0], var_vec_u32[1], 14);
+ *var_vec_u32++ = vec_vsldoi(var_vec_u32[0], var_vec_u32[1], 15);
+ *var_vec_u32++ = vec_vslo(var_vec_u32[0], var_vec_s8[1]);
+ *var_vec_u32++ = vec_vslo(var_vec_u32[0], var_vec_u8[1]);
+ *var_vec_u32++ = vec_vslw(var_vec_u32[0], var_vec_u32[1]);
+ *var_vec_u32++ = vec_vspltw(var_vec_u32[0], 0);
+ *var_vec_u32++ = vec_vspltw(var_vec_u32[0], 1);
+ *var_vec_u32++ = vec_vspltw(var_vec_u32[0], 2);
+ *var_vec_u32++ = vec_vspltw(var_vec_u32[0], 3);
+ *var_vec_u32++ = vec_vspltw(var_vec_u32[0], 4);
+ *var_vec_u32++ = vec_vspltw(var_vec_u32[0], 5);
+ *var_vec_u32++ = vec_vspltw(var_vec_u32[0], 6);
+ *var_vec_u32++ = vec_vspltw(var_vec_u32[0], 7);
+ *var_vec_u32++ = vec_vspltw(var_vec_u32[0], 8);
+ *var_vec_u32++ = vec_vspltw(var_vec_u32[0], 9);
+ *var_vec_u32++ = vec_vspltw(var_vec_u32[0], 10);
+ *var_vec_u32++ = vec_vspltw(var_vec_u32[0], 11);
+ *var_vec_u32++ = vec_vspltw(var_vec_u32[0], 12);
+ *var_vec_u32++ = vec_vspltw(var_vec_u32[0], 13);
+ *var_vec_u32++ = vec_vspltw(var_vec_u32[0], 14);
+ *var_vec_u32++ = vec_vspltw(var_vec_u32[0], 15);
+ *var_vec_u32++ = vec_vspltw(var_vec_u32[0], 16);
+ *var_vec_u32++ = vec_vspltw(var_vec_u32[0], 17);
+ *var_vec_u32++ = vec_vspltw(var_vec_u32[0], 18);
+ *var_vec_u32++ = vec_vspltw(var_vec_u32[0], 19);
+ *var_vec_u32++ = vec_vspltw(var_vec_u32[0], 20);
+ *var_vec_u32++ = vec_vspltw(var_vec_u32[0], 21);
+ *var_vec_u32++ = vec_vspltw(var_vec_u32[0], 22);
+ *var_vec_u32++ = vec_vspltw(var_vec_u32[0], 23);
+ *var_vec_u32++ = vec_vspltw(var_vec_u32[0], 24);
+ *var_vec_u32++ = vec_vspltw(var_vec_u32[0], 25);
+ *var_vec_u32++ = vec_vspltw(var_vec_u32[0], 26);
+ *var_vec_u32++ = vec_vspltw(var_vec_u32[0], 27);
+ *var_vec_u32++ = vec_vspltw(var_vec_u32[0], 28);
+ *var_vec_u32++ = vec_vspltw(var_vec_u32[0], 29);
+ *var_vec_u32++ = vec_vspltw(var_vec_u32[0], 30);
+ *var_vec_u32++ = vec_vspltw(var_vec_u32[0], 31);
+ *var_vec_u32++ = vec_vsr(var_vec_u32[0], var_vec_u16[1]);
+ *var_vec_u32++ = vec_vsr(var_vec_u32[0], var_vec_u32[1]);
+ *var_vec_u32++ = vec_vsr(var_vec_u32[0], var_vec_u8[1]);
+ *var_vec_u32++ = vec_vsraw(var_vec_u32[0], var_vec_u32[1]);
+ *var_vec_u32++ = vec_vsro(var_vec_u32[0], var_vec_s8[1]);
+ *var_vec_u32++ = vec_vsro(var_vec_u32[0], var_vec_u8[1]);
+ *var_vec_u32++ = vec_vsrw(var_vec_u32[0], var_vec_u32[1]);
+ *var_vec_u32++ = vec_vsubcuw(var_vec_u32[0], var_vec_u32[1]);
+ *var_vec_u32++ = vec_vsubuwm(var_vec_b32[0], var_vec_u32[1]);
+ *var_vec_u32++ = vec_vsubuwm(var_vec_u32[0], var_vec_b32[1]);
+ *var_vec_u32++ = vec_vsubuwm(var_vec_u32[0], var_vec_u32[1]);
+ *var_vec_u32++ = vec_vsubuws(var_vec_b32[0], var_vec_u32[1]);
+ *var_vec_u32++ = vec_vsubuws(var_vec_u32[0], var_vec_b32[1]);
+ *var_vec_u32++ = vec_vsubuws(var_vec_u32[0], var_vec_u32[1]);
+ *var_vec_u32++ = vec_vsum4ubs(var_vec_u8[0], var_vec_u32[1]);
+ *var_vec_u32++ = vec_vupkhpx(var_vec_p16[0]);
+ *var_vec_u32++ = vec_vupklpx(var_vec_p16[0]);
+ *var_vec_u32++ = vec_vxor(var_vec_b32[0], var_vec_u32[1]);
+ *var_vec_u32++ = vec_vxor(var_vec_u32[0], var_vec_b32[1]);
+ *var_vec_u32++ = vec_vxor(var_vec_u32[0], var_vec_u32[1]);
+ *var_vec_u32++ = vec_xor(var_vec_b32[0], var_vec_u32[1]);
+ *var_vec_u32++ = vec_xor(var_vec_u32[0], var_vec_b32[1]);
+ *var_vec_u32++ = vec_xor(var_vec_u32[0], var_vec_u32[1]);
+ *var_vec_u8++ = vec_add(var_vec_b8[0], var_vec_u8[1]);
+ *var_vec_u8++ = vec_add(var_vec_u8[0], var_vec_b8[1]);
+ *var_vec_u8++ = vec_add(var_vec_u8[0], var_vec_u8[1]);
+ *var_vec_u8++ = vec_adds(var_vec_b8[0], var_vec_u8[1]);
+ *var_vec_u8++ = vec_adds(var_vec_u8[0], var_vec_b8[1]);
+ *var_vec_u8++ = vec_adds(var_vec_u8[0], var_vec_u8[1]);
+ *var_vec_u8++ = vec_and(var_vec_b8[0], var_vec_u8[1]);
+ *var_vec_u8++ = vec_and(var_vec_u8[0], var_vec_b8[1]);
+ *var_vec_u8++ = vec_and(var_vec_u8[0], var_vec_u8[1]);
+ *var_vec_u8++ = vec_andc(var_vec_b8[0], var_vec_u8[1]);
+ *var_vec_u8++ = vec_andc(var_vec_u8[0], var_vec_b8[1]);
+ *var_vec_u8++ = vec_andc(var_vec_u8[0], var_vec_u8[1]);
+ *var_vec_u8++ = vec_avg(var_vec_u8[0], var_vec_u8[1]);
+ *var_vec_u8++ = vec_ld(var_int[0], var_unsigned_char_ptr[1]);
+ *var_vec_u8++ = vec_ld(var_int[0], var_vec_u8_ptr[1]);
+ *var_vec_u8++ = vec_lde(var_int[0], var_unsigned_char_ptr[1]);
+ *var_vec_u8++ = vec_ldl(var_int[0], var_unsigned_char_ptr[1]);
+ *var_vec_u8++ = vec_ldl(var_int[0], var_vec_u8_ptr[1]);
+ *var_vec_u8++ = vec_lvebx(var_int[0], var_unsigned_char_ptr[1]);
+ *var_vec_u8++ = vec_lvsl(var_int[0], var_float_ptr[1]);
+ *var_vec_u8++ = vec_lvsl(var_int[0], var_int_ptr[1]);
+ *var_vec_u8++ = vec_lvsl(var_int[0], var_short_ptr[1]);
+ *var_vec_u8++ = vec_lvsl(var_int[0], var_signed_char_ptr[1]);
+ *var_vec_u8++ = vec_lvsl(var_int[0], var_unsigned_char_ptr[1]);
+ *var_vec_u8++ = vec_lvsl(var_int[0], var_unsigned_int_ptr[1]);
+ *var_vec_u8++ = vec_lvsl(var_int[0], var_unsigned_short_ptr[1]);
+ *var_vec_u8++ = vec_lvsr(var_int[0], var_float_ptr[1]);
+ *var_vec_u8++ = vec_lvsr(var_int[0], var_int_ptr[1]);
+ *var_vec_u8++ = vec_lvsr(var_int[0], var_short_ptr[1]);
+ *var_vec_u8++ = vec_lvsr(var_int[0], var_signed_char_ptr[1]);
+ *var_vec_u8++ = vec_lvsr(var_int[0], var_unsigned_char_ptr[1]);
+ *var_vec_u8++ = vec_lvsr(var_int[0], var_unsigned_int_ptr[1]);
+ *var_vec_u8++ = vec_lvsr(var_int[0], var_unsigned_short_ptr[1]);
+ *var_vec_u8++ = vec_lvx(var_int[0], var_unsigned_char_ptr[1]);
+ *var_vec_u8++ = vec_lvx(var_int[0], var_vec_u8_ptr[1]);
+}
+void f26() {
+ *var_vec_u8++ = vec_lvxl(var_int[0], var_unsigned_char_ptr[1]);
+ *var_vec_u8++ = vec_lvxl(var_int[0], var_vec_u8_ptr[1]);
+ *var_vec_u8++ = vec_max(var_vec_b8[0], var_vec_u8[1]);
+ *var_vec_u8++ = vec_max(var_vec_u8[0], var_vec_b8[1]);
+ *var_vec_u8++ = vec_max(var_vec_u8[0], var_vec_u8[1]);
+ *var_vec_u8++ = vec_mergeh(var_vec_u8[0], var_vec_u8[1]);
+ *var_vec_u8++ = vec_mergel(var_vec_u8[0], var_vec_u8[1]);
+ *var_vec_u8++ = vec_min(var_vec_b8[0], var_vec_u8[1]);
+ *var_vec_u8++ = vec_min(var_vec_u8[0], var_vec_b8[1]);
+ *var_vec_u8++ = vec_min(var_vec_u8[0], var_vec_u8[1]);
+ *var_vec_u8++ = vec_nor(var_vec_u8[0], var_vec_u8[1]);
+ *var_vec_u8++ = vec_or(var_vec_b8[0], var_vec_u8[1]);
+ *var_vec_u8++ = vec_or(var_vec_u8[0], var_vec_b8[1]);
+ *var_vec_u8++ = vec_or(var_vec_u8[0], var_vec_u8[1]);
+ *var_vec_u8++ = vec_pack(var_vec_u16[0], var_vec_u16[1]);
+ *var_vec_u8++ = vec_packs(var_vec_u16[0], var_vec_u16[1]);
+ *var_vec_u8++ = vec_packsu(var_vec_s16[0], var_vec_s16[1]);
+ *var_vec_u8++ = vec_packsu(var_vec_u16[0], var_vec_u16[1]);
+ *var_vec_u8++ = vec_perm(var_vec_u8[0], var_vec_u8[1], var_vec_u8[2]);
+ *var_vec_u8++ = vec_rl(var_vec_u8[0], var_vec_u8[1]);
+ *var_vec_u8++ = vec_sel(var_vec_u8[0], var_vec_u8[1], var_vec_b8[2]);
+ *var_vec_u8++ = vec_sel(var_vec_u8[0], var_vec_u8[1], var_vec_u8[2]);
+ *var_vec_u8++ = vec_sl(var_vec_u8[0], var_vec_u8[1]);
+ *var_vec_u8++ = vec_sld(var_vec_u8[0], var_vec_u8[1], 0);
+ *var_vec_u8++ = vec_sld(var_vec_u8[0], var_vec_u8[1], 1);
+ *var_vec_u8++ = vec_sld(var_vec_u8[0], var_vec_u8[1], 2);
+ *var_vec_u8++ = vec_sld(var_vec_u8[0], var_vec_u8[1], 3);
+ *var_vec_u8++ = vec_sld(var_vec_u8[0], var_vec_u8[1], 4);
+ *var_vec_u8++ = vec_sld(var_vec_u8[0], var_vec_u8[1], 5);
+ *var_vec_u8++ = vec_sld(var_vec_u8[0], var_vec_u8[1], 6);
+ *var_vec_u8++ = vec_sld(var_vec_u8[0], var_vec_u8[1], 7);
+ *var_vec_u8++ = vec_sld(var_vec_u8[0], var_vec_u8[1], 8);
+ *var_vec_u8++ = vec_sld(var_vec_u8[0], var_vec_u8[1], 9);
+ *var_vec_u8++ = vec_sld(var_vec_u8[0], var_vec_u8[1], 10);
+ *var_vec_u8++ = vec_sld(var_vec_u8[0], var_vec_u8[1], 11);
+ *var_vec_u8++ = vec_sld(var_vec_u8[0], var_vec_u8[1], 12);
+ *var_vec_u8++ = vec_sld(var_vec_u8[0], var_vec_u8[1], 13);
+ *var_vec_u8++ = vec_sld(var_vec_u8[0], var_vec_u8[1], 14);
+ *var_vec_u8++ = vec_sld(var_vec_u8[0], var_vec_u8[1], 15);
+ *var_vec_u8++ = vec_sll(var_vec_u8[0], var_vec_u16[1]);
+ *var_vec_u8++ = vec_sll(var_vec_u8[0], var_vec_u32[1]);
+ *var_vec_u8++ = vec_sll(var_vec_u8[0], var_vec_u8[1]);
+ *var_vec_u8++ = vec_slo(var_vec_u8[0], var_vec_s8[1]);
+ *var_vec_u8++ = vec_slo(var_vec_u8[0], var_vec_u8[1]);
+ *var_vec_u8++ = vec_splat(var_vec_u8[0], 0);
+ *var_vec_u8++ = vec_splat(var_vec_u8[0], 1);
+ *var_vec_u8++ = vec_splat(var_vec_u8[0], 2);
+ *var_vec_u8++ = vec_splat(var_vec_u8[0], 3);
+ *var_vec_u8++ = vec_splat(var_vec_u8[0], 4);
+ *var_vec_u8++ = vec_splat(var_vec_u8[0], 5);
+ *var_vec_u8++ = vec_splat(var_vec_u8[0], 6);
+ *var_vec_u8++ = vec_splat(var_vec_u8[0], 7);
+ *var_vec_u8++ = vec_splat(var_vec_u8[0], 8);
+ *var_vec_u8++ = vec_splat(var_vec_u8[0], 9);
+ *var_vec_u8++ = vec_splat(var_vec_u8[0], 10);
+ *var_vec_u8++ = vec_splat(var_vec_u8[0], 11);
+ *var_vec_u8++ = vec_splat(var_vec_u8[0], 12);
+ *var_vec_u8++ = vec_splat(var_vec_u8[0], 13);
+ *var_vec_u8++ = vec_splat(var_vec_u8[0], 14);
+ *var_vec_u8++ = vec_splat(var_vec_u8[0], 15);
+ *var_vec_u8++ = vec_splat(var_vec_u8[0], 16);
+ *var_vec_u8++ = vec_splat(var_vec_u8[0], 17);
+ *var_vec_u8++ = vec_splat(var_vec_u8[0], 18);
+ *var_vec_u8++ = vec_splat(var_vec_u8[0], 19);
+ *var_vec_u8++ = vec_splat(var_vec_u8[0], 20);
+ *var_vec_u8++ = vec_splat(var_vec_u8[0], 21);
+ *var_vec_u8++ = vec_splat(var_vec_u8[0], 22);
+ *var_vec_u8++ = vec_splat(var_vec_u8[0], 23);
+ *var_vec_u8++ = vec_splat(var_vec_u8[0], 24);
+ *var_vec_u8++ = vec_splat(var_vec_u8[0], 25);
+ *var_vec_u8++ = vec_splat(var_vec_u8[0], 26);
+ *var_vec_u8++ = vec_splat(var_vec_u8[0], 27);
+ *var_vec_u8++ = vec_splat(var_vec_u8[0], 28);
+ *var_vec_u8++ = vec_splat(var_vec_u8[0], 29);
+ *var_vec_u8++ = vec_splat(var_vec_u8[0], 30);
+ *var_vec_u8++ = vec_splat(var_vec_u8[0], 31);
+ *var_vec_u8++ = vec_splat_u8( 0);
+ *var_vec_u8++ = vec_splat_u8( 1);
+ *var_vec_u8++ = vec_splat_u8( 2);
+ *var_vec_u8++ = vec_splat_u8( 3);
+ *var_vec_u8++ = vec_splat_u8( 4);
+ *var_vec_u8++ = vec_splat_u8( 5);
+ *var_vec_u8++ = vec_splat_u8( 6);
+ *var_vec_u8++ = vec_splat_u8( 7);
+ *var_vec_u8++ = vec_splat_u8( 8);
+ *var_vec_u8++ = vec_splat_u8( 9);
+ *var_vec_u8++ = vec_splat_u8( -1);
+ *var_vec_u8++ = vec_splat_u8( -2);
+ *var_vec_u8++ = vec_splat_u8( -3);
+ *var_vec_u8++ = vec_splat_u8( -4);
+ *var_vec_u8++ = vec_splat_u8( -5);
+ *var_vec_u8++ = vec_splat_u8( -6);
+ *var_vec_u8++ = vec_splat_u8( -7);
+ *var_vec_u8++ = vec_splat_u8( -8);
+ *var_vec_u8++ = vec_splat_u8( -9);
+ *var_vec_u8++ = vec_splat_u8( 10);
+ *var_vec_u8++ = vec_splat_u8( 11);
+ *var_vec_u8++ = vec_splat_u8( 12);
+ *var_vec_u8++ = vec_splat_u8( 13);
+ *var_vec_u8++ = vec_splat_u8( 14);
+}
+void f27() {
+ *var_vec_u8++ = vec_splat_u8( 15);
+ *var_vec_u8++ = vec_splat_u8(-10);
+ *var_vec_u8++ = vec_splat_u8(-11);
+ *var_vec_u8++ = vec_splat_u8(-12);
+ *var_vec_u8++ = vec_splat_u8(-13);
+ *var_vec_u8++ = vec_splat_u8(-14);
+ *var_vec_u8++ = vec_splat_u8(-15);
+ *var_vec_u8++ = vec_splat_u8(-16);
+ *var_vec_u8++ = vec_sr(var_vec_u8[0], var_vec_u8[1]);
+ *var_vec_u8++ = vec_sra(var_vec_u8[0], var_vec_u8[1]);
+ *var_vec_u8++ = vec_srl(var_vec_u8[0], var_vec_u16[1]);
+ *var_vec_u8++ = vec_srl(var_vec_u8[0], var_vec_u32[1]);
+ *var_vec_u8++ = vec_srl(var_vec_u8[0], var_vec_u8[1]);
+ *var_vec_u8++ = vec_sro(var_vec_u8[0], var_vec_s8[1]);
+ *var_vec_u8++ = vec_sro(var_vec_u8[0], var_vec_u8[1]);
+ *var_vec_u8++ = vec_sub(var_vec_b8[0], var_vec_u8[1]);
+ *var_vec_u8++ = vec_sub(var_vec_u8[0], var_vec_b8[1]);
+ *var_vec_u8++ = vec_sub(var_vec_u8[0], var_vec_u8[1]);
+ *var_vec_u8++ = vec_subs(var_vec_b8[0], var_vec_u8[1]);
+ *var_vec_u8++ = vec_subs(var_vec_u8[0], var_vec_b8[1]);
+ *var_vec_u8++ = vec_subs(var_vec_u8[0], var_vec_u8[1]);
+ *var_vec_u8++ = vec_vaddubm(var_vec_b8[0], var_vec_u8[1]);
+ *var_vec_u8++ = vec_vaddubm(var_vec_u8[0], var_vec_b8[1]);
+ *var_vec_u8++ = vec_vaddubm(var_vec_u8[0], var_vec_u8[1]);
+ *var_vec_u8++ = vec_vaddubs(var_vec_b8[0], var_vec_u8[1]);
+ *var_vec_u8++ = vec_vaddubs(var_vec_u8[0], var_vec_b8[1]);
+ *var_vec_u8++ = vec_vaddubs(var_vec_u8[0], var_vec_u8[1]);
+ *var_vec_u8++ = vec_vand(var_vec_b8[0], var_vec_u8[1]);
+ *var_vec_u8++ = vec_vand(var_vec_u8[0], var_vec_b8[1]);
+ *var_vec_u8++ = vec_vand(var_vec_u8[0], var_vec_u8[1]);
+ *var_vec_u8++ = vec_vandc(var_vec_b8[0], var_vec_u8[1]);
+ *var_vec_u8++ = vec_vandc(var_vec_u8[0], var_vec_b8[1]);
+ *var_vec_u8++ = vec_vandc(var_vec_u8[0], var_vec_u8[1]);
+ *var_vec_u8++ = vec_vavgub(var_vec_u8[0], var_vec_u8[1]);
+ *var_vec_u8++ = vec_vmaxub(var_vec_b8[0], var_vec_u8[1]);
+ *var_vec_u8++ = vec_vmaxub(var_vec_u8[0], var_vec_b8[1]);
+ *var_vec_u8++ = vec_vmaxub(var_vec_u8[0], var_vec_u8[1]);
+ *var_vec_u8++ = vec_vminub(var_vec_b8[0], var_vec_u8[1]);
+ *var_vec_u8++ = vec_vminub(var_vec_u8[0], var_vec_b8[1]);
+ *var_vec_u8++ = vec_vminub(var_vec_u8[0], var_vec_u8[1]);
+ *var_vec_u8++ = vec_vmrghb(var_vec_u8[0], var_vec_u8[1]);
+ *var_vec_u8++ = vec_vmrglb(var_vec_u8[0], var_vec_u8[1]);
+ *var_vec_u8++ = vec_vnor(var_vec_u8[0], var_vec_u8[1]);
+ *var_vec_u8++ = vec_vor(var_vec_b8[0], var_vec_u8[1]);
+ *var_vec_u8++ = vec_vor(var_vec_u8[0], var_vec_b8[1]);
+ *var_vec_u8++ = vec_vor(var_vec_u8[0], var_vec_u8[1]);
+ *var_vec_u8++ = vec_vperm(var_vec_u8[0], var_vec_u8[1], var_vec_u8[2]);
+ *var_vec_u8++ = vec_vpkshus(var_vec_s16[0], var_vec_s16[1]);
+ *var_vec_u8++ = vec_vpkuhum(var_vec_u16[0], var_vec_u16[1]);
+ *var_vec_u8++ = vec_vpkuhus(var_vec_u16[0], var_vec_u16[1]);
+ *var_vec_u8++ = vec_vrlb(var_vec_u8[0], var_vec_u8[1]);
+ *var_vec_u8++ = vec_vsel(var_vec_u8[0], var_vec_u8[1], var_vec_b8[2]);
+ *var_vec_u8++ = vec_vsel(var_vec_u8[0], var_vec_u8[1], var_vec_u8[2]);
+ *var_vec_u8++ = vec_vsl(var_vec_u8[0], var_vec_u16[1]);
+ *var_vec_u8++ = vec_vsl(var_vec_u8[0], var_vec_u32[1]);
+ *var_vec_u8++ = vec_vsl(var_vec_u8[0], var_vec_u8[1]);
+ *var_vec_u8++ = vec_vslb(var_vec_u8[0], var_vec_u8[1]);
+ *var_vec_u8++ = vec_vsldoi(var_vec_u8[0], var_vec_u8[1], 0);
+ *var_vec_u8++ = vec_vsldoi(var_vec_u8[0], var_vec_u8[1], 1);
+ *var_vec_u8++ = vec_vsldoi(var_vec_u8[0], var_vec_u8[1], 2);
+ *var_vec_u8++ = vec_vsldoi(var_vec_u8[0], var_vec_u8[1], 3);
+ *var_vec_u8++ = vec_vsldoi(var_vec_u8[0], var_vec_u8[1], 4);
+ *var_vec_u8++ = vec_vsldoi(var_vec_u8[0], var_vec_u8[1], 5);
+ *var_vec_u8++ = vec_vsldoi(var_vec_u8[0], var_vec_u8[1], 6);
+ *var_vec_u8++ = vec_vsldoi(var_vec_u8[0], var_vec_u8[1], 7);
+ *var_vec_u8++ = vec_vsldoi(var_vec_u8[0], var_vec_u8[1], 8);
+ *var_vec_u8++ = vec_vsldoi(var_vec_u8[0], var_vec_u8[1], 9);
+ *var_vec_u8++ = vec_vsldoi(var_vec_u8[0], var_vec_u8[1], 10);
+ *var_vec_u8++ = vec_vsldoi(var_vec_u8[0], var_vec_u8[1], 11);
+ *var_vec_u8++ = vec_vsldoi(var_vec_u8[0], var_vec_u8[1], 12);
+ *var_vec_u8++ = vec_vsldoi(var_vec_u8[0], var_vec_u8[1], 13);
+ *var_vec_u8++ = vec_vsldoi(var_vec_u8[0], var_vec_u8[1], 14);
+ *var_vec_u8++ = vec_vsldoi(var_vec_u8[0], var_vec_u8[1], 15);
+ *var_vec_u8++ = vec_vslo(var_vec_u8[0], var_vec_s8[1]);
+ *var_vec_u8++ = vec_vslo(var_vec_u8[0], var_vec_u8[1]);
+ *var_vec_u8++ = vec_vspltb(var_vec_u8[0], 0);
+ *var_vec_u8++ = vec_vspltb(var_vec_u8[0], 1);
+ *var_vec_u8++ = vec_vspltb(var_vec_u8[0], 2);
+ *var_vec_u8++ = vec_vspltb(var_vec_u8[0], 3);
+ *var_vec_u8++ = vec_vspltb(var_vec_u8[0], 4);
+ *var_vec_u8++ = vec_vspltb(var_vec_u8[0], 5);
+ *var_vec_u8++ = vec_vspltb(var_vec_u8[0], 6);
+ *var_vec_u8++ = vec_vspltb(var_vec_u8[0], 7);
+ *var_vec_u8++ = vec_vspltb(var_vec_u8[0], 8);
+ *var_vec_u8++ = vec_vspltb(var_vec_u8[0], 9);
+ *var_vec_u8++ = vec_vspltb(var_vec_u8[0], 10);
+ *var_vec_u8++ = vec_vspltb(var_vec_u8[0], 11);
+ *var_vec_u8++ = vec_vspltb(var_vec_u8[0], 12);
+ *var_vec_u8++ = vec_vspltb(var_vec_u8[0], 13);
+ *var_vec_u8++ = vec_vspltb(var_vec_u8[0], 14);
+ *var_vec_u8++ = vec_vspltb(var_vec_u8[0], 15);
+ *var_vec_u8++ = vec_vspltb(var_vec_u8[0], 16);
+ *var_vec_u8++ = vec_vspltb(var_vec_u8[0], 17);
+ *var_vec_u8++ = vec_vspltb(var_vec_u8[0], 18);
+ *var_vec_u8++ = vec_vspltb(var_vec_u8[0], 19);
+ *var_vec_u8++ = vec_vspltb(var_vec_u8[0], 20);
+ *var_vec_u8++ = vec_vspltb(var_vec_u8[0], 21);
+ *var_vec_u8++ = vec_vspltb(var_vec_u8[0], 22);
+ *var_vec_u8++ = vec_vspltb(var_vec_u8[0], 23);
+ *var_vec_u8++ = vec_vspltb(var_vec_u8[0], 24);
+}
+void f28() {
+ *var_vec_u8++ = vec_vspltb(var_vec_u8[0], 25);
+ *var_vec_u8++ = vec_vspltb(var_vec_u8[0], 26);
+ *var_vec_u8++ = vec_vspltb(var_vec_u8[0], 27);
+ *var_vec_u8++ = vec_vspltb(var_vec_u8[0], 28);
+ *var_vec_u8++ = vec_vspltb(var_vec_u8[0], 29);
+ *var_vec_u8++ = vec_vspltb(var_vec_u8[0], 30);
+ *var_vec_u8++ = vec_vspltb(var_vec_u8[0], 31);
+ *var_vec_u8++ = vec_vsr(var_vec_u8[0], var_vec_u16[1]);
+ *var_vec_u8++ = vec_vsr(var_vec_u8[0], var_vec_u32[1]);
+ *var_vec_u8++ = vec_vsr(var_vec_u8[0], var_vec_u8[1]);
+ *var_vec_u8++ = vec_vsrab(var_vec_u8[0], var_vec_u8[1]);
+ *var_vec_u8++ = vec_vsrb(var_vec_u8[0], var_vec_u8[1]);
+ *var_vec_u8++ = vec_vsro(var_vec_u8[0], var_vec_s8[1]);
+ *var_vec_u8++ = vec_vsro(var_vec_u8[0], var_vec_u8[1]);
+ *var_vec_u8++ = vec_vsububm(var_vec_b8[0], var_vec_u8[1]);
+ *var_vec_u8++ = vec_vsububm(var_vec_u8[0], var_vec_b8[1]);
+ *var_vec_u8++ = vec_vsububm(var_vec_u8[0], var_vec_u8[1]);
+ *var_vec_u8++ = vec_vsububs(var_vec_b8[0], var_vec_u8[1]);
+ *var_vec_u8++ = vec_vsububs(var_vec_u8[0], var_vec_b8[1]);
+ *var_vec_u8++ = vec_vsububs(var_vec_u8[0], var_vec_u8[1]);
+ *var_vec_u8++ = vec_vxor(var_vec_b8[0], var_vec_u8[1]);
+ *var_vec_u8++ = vec_vxor(var_vec_u8[0], var_vec_b8[1]);
+ *var_vec_u8++ = vec_vxor(var_vec_u8[0], var_vec_u8[1]);
+ *var_vec_u8++ = vec_xor(var_vec_b8[0], var_vec_u8[1]);
+ *var_vec_u8++ = vec_xor(var_vec_u8[0], var_vec_b8[1]);
+ *var_vec_u8++ = vec_xor(var_vec_u8[0], var_vec_u8[1]);
+ *var_volatile_vec_u16++ = vec_mfvscr();
+ if(!vec_all_eq(var_vec_b16[0], var_vec_b16[1])) *var_cc24t++;
+ if(!vec_all_eq(var_vec_b16[0], var_vec_s16[1])) *var_cc24t++;
+ if(!vec_all_eq(var_vec_b16[0], var_vec_u16[1])) *var_cc24t++;
+ if(!vec_all_eq(var_vec_b32[0], var_vec_b32[1])) *var_cc24t++;
+ if(!vec_all_eq(var_vec_b32[0], var_vec_s32[1])) *var_cc24t++;
+ if(!vec_all_eq(var_vec_b32[0], var_vec_u32[1])) *var_cc24t++;
+ if(!vec_all_eq(var_vec_b8[0], var_vec_b8[1])) *var_cc24t++;
+ if(!vec_all_eq(var_vec_b8[0], var_vec_s8[1])) *var_cc24t++;
+ if(!vec_all_eq(var_vec_b8[0], var_vec_u8[1])) *var_cc24t++;
+ if(!vec_all_eq(var_vec_f32[0], var_vec_f32[1])) *var_cc24t++;
+ if(!vec_all_eq(var_vec_p16[0], var_vec_p16[1])) *var_cc24t++;
+ if(!vec_all_eq(var_vec_s16[0], var_vec_b16[1])) *var_cc24t++;
+ if(!vec_all_eq(var_vec_s16[0], var_vec_s16[1])) *var_cc24t++;
+ if(!vec_all_eq(var_vec_s32[0], var_vec_b32[1])) *var_cc24t++;
+ if(!vec_all_eq(var_vec_s32[0], var_vec_s32[1])) *var_cc24t++;
+ if(!vec_all_eq(var_vec_s8[0], var_vec_b8[1])) *var_cc24t++;
+ if(!vec_all_eq(var_vec_s8[0], var_vec_s8[1])) *var_cc24t++;
+ if(!vec_all_eq(var_vec_u16[0], var_vec_b16[1])) *var_cc24t++;
+ if(!vec_all_eq(var_vec_u16[0], var_vec_u16[1])) *var_cc24t++;
+ if(!vec_all_eq(var_vec_u32[0], var_vec_b32[1])) *var_cc24t++;
+ if(!vec_all_eq(var_vec_u32[0], var_vec_u32[1])) *var_cc24t++;
+ if(!vec_all_eq(var_vec_u8[0], var_vec_b8[1])) *var_cc24t++;
+ if(!vec_all_eq(var_vec_u8[0], var_vec_u8[1])) *var_cc24t++;
+ if(!vec_all_ge(var_vec_b16[0], var_vec_s16[1])) *var_cc26tr++;
+ if(!vec_all_ge(var_vec_b16[0], var_vec_u16[1])) *var_cc26tr++;
+ if(!vec_all_ge(var_vec_b32[0], var_vec_s32[1])) *var_cc26tr++;
+ if(!vec_all_ge(var_vec_b32[0], var_vec_u32[1])) *var_cc26tr++;
+ if(!vec_all_ge(var_vec_b8[0], var_vec_s8[1])) *var_cc26tr++;
+ if(!vec_all_ge(var_vec_b8[0], var_vec_u8[1])) *var_cc26tr++;
+ if(!vec_all_ge(var_vec_f32[0], var_vec_f32[1])) *var_cc24t++;
+ if(!vec_all_ge(var_vec_s16[0], var_vec_b16[1])) *var_cc26tr++;
+ if(!vec_all_ge(var_vec_s16[0], var_vec_s16[1])) *var_cc26tr++;
+ if(!vec_all_ge(var_vec_s32[0], var_vec_b32[1])) *var_cc26tr++;
+ if(!vec_all_ge(var_vec_s32[0], var_vec_s32[1])) *var_cc26tr++;
+ if(!vec_all_ge(var_vec_s8[0], var_vec_b8[1])) *var_cc26tr++;
+ if(!vec_all_ge(var_vec_s8[0], var_vec_s8[1])) *var_cc26tr++;
+ if(!vec_all_ge(var_vec_u16[0], var_vec_b16[1])) *var_cc26tr++;
+ if(!vec_all_ge(var_vec_u16[0], var_vec_u16[1])) *var_cc26tr++;
+ if(!vec_all_ge(var_vec_u32[0], var_vec_b32[1])) *var_cc26tr++;
+ if(!vec_all_ge(var_vec_u32[0], var_vec_u32[1])) *var_cc26tr++;
+ if(!vec_all_ge(var_vec_u8[0], var_vec_b8[1])) *var_cc26tr++;
+ if(!vec_all_ge(var_vec_u8[0], var_vec_u8[1])) *var_cc26tr++;
+ if(!vec_all_gt(var_vec_b16[0], var_vec_s16[1])) *var_cc24t++;
+ if(!vec_all_gt(var_vec_b16[0], var_vec_u16[1])) *var_cc24t++;
+ if(!vec_all_gt(var_vec_b32[0], var_vec_s32[1])) *var_cc24t++;
+ if(!vec_all_gt(var_vec_b32[0], var_vec_u32[1])) *var_cc24t++;
+ if(!vec_all_gt(var_vec_b8[0], var_vec_s8[1])) *var_cc24t++;
+ if(!vec_all_gt(var_vec_b8[0], var_vec_u8[1])) *var_cc24t++;
+ if(!vec_all_gt(var_vec_f32[0], var_vec_f32[1])) *var_cc24t++;
+ if(!vec_all_gt(var_vec_s16[0], var_vec_b16[1])) *var_cc24t++;
+ if(!vec_all_gt(var_vec_s16[0], var_vec_s16[1])) *var_cc24t++;
+ if(!vec_all_gt(var_vec_s32[0], var_vec_b32[1])) *var_cc24t++;
+ if(!vec_all_gt(var_vec_s32[0], var_vec_s32[1])) *var_cc24t++;
+ if(!vec_all_gt(var_vec_s8[0], var_vec_b8[1])) *var_cc24t++;
+ if(!vec_all_gt(var_vec_s8[0], var_vec_s8[1])) *var_cc24t++;
+ if(!vec_all_gt(var_vec_u16[0], var_vec_b16[1])) *var_cc24t++;
+ if(!vec_all_gt(var_vec_u16[0], var_vec_u16[1])) *var_cc24t++;
+ if(!vec_all_gt(var_vec_u32[0], var_vec_b32[1])) *var_cc24t++;
+ if(!vec_all_gt(var_vec_u32[0], var_vec_u32[1])) *var_cc24t++;
+ if(!vec_all_gt(var_vec_u8[0], var_vec_b8[1])) *var_cc24t++;
+ if(!vec_all_gt(var_vec_u8[0], var_vec_u8[1])) *var_cc24t++;
+ if(!vec_all_in(var_vec_f32[0], var_vec_f32[1])) *var_cc26t++;
+ if(!vec_all_le(var_vec_b16[0], var_vec_s16[1])) *var_cc26t++;
+ if(!vec_all_le(var_vec_b16[0], var_vec_u16[1])) *var_cc26t++;
+ if(!vec_all_le(var_vec_b32[0], var_vec_s32[1])) *var_cc26t++;
+ if(!vec_all_le(var_vec_b32[0], var_vec_u32[1])) *var_cc26t++;
+ if(!vec_all_le(var_vec_b8[0], var_vec_s8[1])) *var_cc26t++;
+ if(!vec_all_le(var_vec_b8[0], var_vec_u8[1])) *var_cc26t++;
+ if(!vec_all_le(var_vec_f32[0], var_vec_f32[1])) *var_cc24tr++;
+ if(!vec_all_le(var_vec_s16[0], var_vec_b16[1])) *var_cc26t++;
+ if(!vec_all_le(var_vec_s16[0], var_vec_s16[1])) *var_cc26t++;
+ if(!vec_all_le(var_vec_s32[0], var_vec_b32[1])) *var_cc26t++;
+ if(!vec_all_le(var_vec_s32[0], var_vec_s32[1])) *var_cc26t++;
+}
+void f29() {
+ if(!vec_all_le(var_vec_s8[0], var_vec_b8[1])) *var_cc26t++;
+ if(!vec_all_le(var_vec_s8[0], var_vec_s8[1])) *var_cc26t++;
+ if(!vec_all_le(var_vec_u16[0], var_vec_b16[1])) *var_cc26t++;
+ if(!vec_all_le(var_vec_u16[0], var_vec_u16[1])) *var_cc26t++;
+ if(!vec_all_le(var_vec_u32[0], var_vec_b32[1])) *var_cc26t++;
+ if(!vec_all_le(var_vec_u32[0], var_vec_u32[1])) *var_cc26t++;
+ if(!vec_all_le(var_vec_u8[0], var_vec_b8[1])) *var_cc26t++;
+ if(!vec_all_le(var_vec_u8[0], var_vec_u8[1])) *var_cc26t++;
+ if(!vec_all_lt(var_vec_b16[0], var_vec_s16[1])) *var_cc24tr++;
+ if(!vec_all_lt(var_vec_b16[0], var_vec_u16[1])) *var_cc24tr++;
+ if(!vec_all_lt(var_vec_b32[0], var_vec_s32[1])) *var_cc24tr++;
+ if(!vec_all_lt(var_vec_b32[0], var_vec_u32[1])) *var_cc24tr++;
+ if(!vec_all_lt(var_vec_b8[0], var_vec_s8[1])) *var_cc24tr++;
+ if(!vec_all_lt(var_vec_b8[0], var_vec_u8[1])) *var_cc24tr++;
+ if(!vec_all_lt(var_vec_f32[0], var_vec_f32[1])) *var_cc24tr++;
+ if(!vec_all_lt(var_vec_s16[0], var_vec_b16[1])) *var_cc24tr++;
+ if(!vec_all_lt(var_vec_s16[0], var_vec_s16[1])) *var_cc24tr++;
+ if(!vec_all_lt(var_vec_s32[0], var_vec_b32[1])) *var_cc24tr++;
+ if(!vec_all_lt(var_vec_s32[0], var_vec_s32[1])) *var_cc24tr++;
+ if(!vec_all_lt(var_vec_s8[0], var_vec_b8[1])) *var_cc24tr++;
+ if(!vec_all_lt(var_vec_s8[0], var_vec_s8[1])) *var_cc24tr++;
+ if(!vec_all_lt(var_vec_u16[0], var_vec_b16[1])) *var_cc24tr++;
+ if(!vec_all_lt(var_vec_u16[0], var_vec_u16[1])) *var_cc24tr++;
+ if(!vec_all_lt(var_vec_u32[0], var_vec_b32[1])) *var_cc24tr++;
+ if(!vec_all_lt(var_vec_u32[0], var_vec_u32[1])) *var_cc24tr++;
+ if(!vec_all_lt(var_vec_u8[0], var_vec_b8[1])) *var_cc24tr++;
+ if(!vec_all_lt(var_vec_u8[0], var_vec_u8[1])) *var_cc24tr++;
+ if(!vec_all_nan(var_vec_f32[0])) *var_cc26td++;
+ if(!vec_all_ne(var_vec_b16[0], var_vec_b16[1])) *var_cc26t++;
+ if(!vec_all_ne(var_vec_b16[0], var_vec_s16[1])) *var_cc26t++;
+ if(!vec_all_ne(var_vec_b16[0], var_vec_u16[1])) *var_cc26t++;
+ if(!vec_all_ne(var_vec_b32[0], var_vec_b32[1])) *var_cc26t++;
+ if(!vec_all_ne(var_vec_b32[0], var_vec_s32[1])) *var_cc26t++;
+ if(!vec_all_ne(var_vec_b32[0], var_vec_u32[1])) *var_cc26t++;
+ if(!vec_all_ne(var_vec_b8[0], var_vec_b8[1])) *var_cc26t++;
+ if(!vec_all_ne(var_vec_b8[0], var_vec_s8[1])) *var_cc26t++;
+ if(!vec_all_ne(var_vec_b8[0], var_vec_u8[1])) *var_cc26t++;
+ if(!vec_all_ne(var_vec_f32[0], var_vec_f32[1])) *var_cc26t++;
+ if(!vec_all_ne(var_vec_p16[0], var_vec_p16[1])) *var_cc26t++;
+ if(!vec_all_ne(var_vec_s16[0], var_vec_b16[1])) *var_cc26t++;
+ if(!vec_all_ne(var_vec_s16[0], var_vec_s16[1])) *var_cc26t++;
+ if(!vec_all_ne(var_vec_s32[0], var_vec_b32[1])) *var_cc26t++;
+ if(!vec_all_ne(var_vec_s32[0], var_vec_s32[1])) *var_cc26t++;
+ if(!vec_all_ne(var_vec_s8[0], var_vec_b8[1])) *var_cc26t++;
+ if(!vec_all_ne(var_vec_s8[0], var_vec_s8[1])) *var_cc26t++;
+ if(!vec_all_ne(var_vec_u16[0], var_vec_b16[1])) *var_cc26t++;
+ if(!vec_all_ne(var_vec_u16[0], var_vec_u16[1])) *var_cc26t++;
+ if(!vec_all_ne(var_vec_u32[0], var_vec_b32[1])) *var_cc26t++;
+ if(!vec_all_ne(var_vec_u32[0], var_vec_u32[1])) *var_cc26t++;
+ if(!vec_all_ne(var_vec_u8[0], var_vec_b8[1])) *var_cc26t++;
+ if(!vec_all_ne(var_vec_u8[0], var_vec_u8[1])) *var_cc26t++;
+ if(!vec_all_nge(var_vec_f32[0], var_vec_f32[1])) *var_cc26t++;
+ if(!vec_all_ngt(var_vec_f32[0], var_vec_f32[1])) *var_cc26t++;
+ if(!vec_all_nle(var_vec_f32[0], var_vec_f32[1])) *var_cc26tr++;
+ if(!vec_all_nlt(var_vec_f32[0], var_vec_f32[1])) *var_cc26tr++;
+ if(!vec_all_numeric(var_vec_f32[0])) *var_cc24td++;
+ if(!vec_any_eq(var_vec_b16[0], var_vec_b16[1])) *var_cc26f++;
+ if(!vec_any_eq(var_vec_b16[0], var_vec_s16[1])) *var_cc26f++;
+ if(!vec_any_eq(var_vec_b16[0], var_vec_u16[1])) *var_cc26f++;
+ if(!vec_any_eq(var_vec_b32[0], var_vec_b32[1])) *var_cc26f++;
+ if(!vec_any_eq(var_vec_b32[0], var_vec_s32[1])) *var_cc26f++;
+ if(!vec_any_eq(var_vec_b32[0], var_vec_u32[1])) *var_cc26f++;
+ if(!vec_any_eq(var_vec_b8[0], var_vec_b8[1])) *var_cc26f++;
+ if(!vec_any_eq(var_vec_b8[0], var_vec_s8[1])) *var_cc26f++;
+ if(!vec_any_eq(var_vec_b8[0], var_vec_u8[1])) *var_cc26f++;
+ if(!vec_any_eq(var_vec_f32[0], var_vec_f32[1])) *var_cc26f++;
+ if(!vec_any_eq(var_vec_p16[0], var_vec_p16[1])) *var_cc26f++;
+ if(!vec_any_eq(var_vec_s16[0], var_vec_b16[1])) *var_cc26f++;
+ if(!vec_any_eq(var_vec_s16[0], var_vec_s16[1])) *var_cc26f++;
+ if(!vec_any_eq(var_vec_s32[0], var_vec_b32[1])) *var_cc26f++;
+ if(!vec_any_eq(var_vec_s32[0], var_vec_s32[1])) *var_cc26f++;
+ if(!vec_any_eq(var_vec_s8[0], var_vec_b8[1])) *var_cc26f++;
+ if(!vec_any_eq(var_vec_s8[0], var_vec_s8[1])) *var_cc26f++;
+ if(!vec_any_eq(var_vec_u16[0], var_vec_b16[1])) *var_cc26f++;
+ if(!vec_any_eq(var_vec_u16[0], var_vec_u16[1])) *var_cc26f++;
+ if(!vec_any_eq(var_vec_u32[0], var_vec_b32[1])) *var_cc26f++;
+ if(!vec_any_eq(var_vec_u32[0], var_vec_u32[1])) *var_cc26f++;
+ if(!vec_any_eq(var_vec_u8[0], var_vec_b8[1])) *var_cc26f++;
+ if(!vec_any_eq(var_vec_u8[0], var_vec_u8[1])) *var_cc26f++;
+ if(!vec_any_ge(var_vec_b16[0], var_vec_s16[1])) *var_cc24fr++;
+ if(!vec_any_ge(var_vec_b16[0], var_vec_u16[1])) *var_cc24fr++;
+ if(!vec_any_ge(var_vec_b32[0], var_vec_s32[1])) *var_cc24fr++;
+ if(!vec_any_ge(var_vec_b32[0], var_vec_u32[1])) *var_cc24fr++;
+ if(!vec_any_ge(var_vec_b8[0], var_vec_s8[1])) *var_cc24fr++;
+ if(!vec_any_ge(var_vec_b8[0], var_vec_u8[1])) *var_cc24fr++;
+ if(!vec_any_ge(var_vec_f32[0], var_vec_f32[1])) *var_cc26f++;
+ if(!vec_any_ge(var_vec_s16[0], var_vec_b16[1])) *var_cc24fr++;
+ if(!vec_any_ge(var_vec_s16[0], var_vec_s16[1])) *var_cc24fr++;
+ if(!vec_any_ge(var_vec_s32[0], var_vec_b32[1])) *var_cc24fr++;
+ if(!vec_any_ge(var_vec_s32[0], var_vec_s32[1])) *var_cc24fr++;
+ if(!vec_any_ge(var_vec_s8[0], var_vec_b8[1])) *var_cc24fr++;
+ if(!vec_any_ge(var_vec_s8[0], var_vec_s8[1])) *var_cc24fr++;
+ if(!vec_any_ge(var_vec_u16[0], var_vec_b16[1])) *var_cc24fr++;
+ if(!vec_any_ge(var_vec_u16[0], var_vec_u16[1])) *var_cc24fr++;
+ if(!vec_any_ge(var_vec_u32[0], var_vec_b32[1])) *var_cc24fr++;
+ if(!vec_any_ge(var_vec_u32[0], var_vec_u32[1])) *var_cc24fr++;
+ if(!vec_any_ge(var_vec_u8[0], var_vec_b8[1])) *var_cc24fr++;
+ if(!vec_any_ge(var_vec_u8[0], var_vec_u8[1])) *var_cc24fr++;
+ if(!vec_any_gt(var_vec_b16[0], var_vec_s16[1])) *var_cc26f++;
+ if(!vec_any_gt(var_vec_b16[0], var_vec_u16[1])) *var_cc26f++;
+}
+void f30() {
+ if(!vec_any_gt(var_vec_b32[0], var_vec_s32[1])) *var_cc26f++;
+ if(!vec_any_gt(var_vec_b32[0], var_vec_u32[1])) *var_cc26f++;
+ if(!vec_any_gt(var_vec_b8[0], var_vec_s8[1])) *var_cc26f++;
+ if(!vec_any_gt(var_vec_b8[0], var_vec_u8[1])) *var_cc26f++;
+ if(!vec_any_gt(var_vec_f32[0], var_vec_f32[1])) *var_cc26f++;
+ if(!vec_any_gt(var_vec_s16[0], var_vec_b16[1])) *var_cc26f++;
+ if(!vec_any_gt(var_vec_s16[0], var_vec_s16[1])) *var_cc26f++;
+ if(!vec_any_gt(var_vec_s32[0], var_vec_b32[1])) *var_cc26f++;
+ if(!vec_any_gt(var_vec_s32[0], var_vec_s32[1])) *var_cc26f++;
+ if(!vec_any_gt(var_vec_s8[0], var_vec_b8[1])) *var_cc26f++;
+ if(!vec_any_gt(var_vec_s8[0], var_vec_s8[1])) *var_cc26f++;
+ if(!vec_any_gt(var_vec_u16[0], var_vec_b16[1])) *var_cc26f++;
+ if(!vec_any_gt(var_vec_u16[0], var_vec_u16[1])) *var_cc26f++;
+ if(!vec_any_gt(var_vec_u32[0], var_vec_b32[1])) *var_cc26f++;
+ if(!vec_any_gt(var_vec_u32[0], var_vec_u32[1])) *var_cc26f++;
+ if(!vec_any_gt(var_vec_u8[0], var_vec_b8[1])) *var_cc26f++;
+ if(!vec_any_gt(var_vec_u8[0], var_vec_u8[1])) *var_cc26f++;
+ if(!vec_any_le(var_vec_b16[0], var_vec_s16[1])) *var_cc24f++;
+ if(!vec_any_le(var_vec_b16[0], var_vec_u16[1])) *var_cc24f++;
+ if(!vec_any_le(var_vec_b32[0], var_vec_s32[1])) *var_cc24f++;
+ if(!vec_any_le(var_vec_b32[0], var_vec_u32[1])) *var_cc24f++;
+ if(!vec_any_le(var_vec_b8[0], var_vec_s8[1])) *var_cc24f++;
+ if(!vec_any_le(var_vec_b8[0], var_vec_u8[1])) *var_cc24f++;
+ if(!vec_any_le(var_vec_f32[0], var_vec_f32[1])) *var_cc26fr++;
+ if(!vec_any_le(var_vec_s16[0], var_vec_b16[1])) *var_cc24f++;
+ if(!vec_any_le(var_vec_s16[0], var_vec_s16[1])) *var_cc24f++;
+ if(!vec_any_le(var_vec_s32[0], var_vec_b32[1])) *var_cc24f++;
+ if(!vec_any_le(var_vec_s32[0], var_vec_s32[1])) *var_cc24f++;
+ if(!vec_any_le(var_vec_s8[0], var_vec_b8[1])) *var_cc24f++;
+ if(!vec_any_le(var_vec_s8[0], var_vec_s8[1])) *var_cc24f++;
+ if(!vec_any_le(var_vec_u16[0], var_vec_b16[1])) *var_cc24f++;
+ if(!vec_any_le(var_vec_u16[0], var_vec_u16[1])) *var_cc24f++;
+ if(!vec_any_le(var_vec_u32[0], var_vec_b32[1])) *var_cc24f++;
+ if(!vec_any_le(var_vec_u32[0], var_vec_u32[1])) *var_cc24f++;
+ if(!vec_any_le(var_vec_u8[0], var_vec_b8[1])) *var_cc24f++;
+ if(!vec_any_le(var_vec_u8[0], var_vec_u8[1])) *var_cc24f++;
+ if(!vec_any_lt(var_vec_b16[0], var_vec_s16[1])) *var_cc26fr++;
+ if(!vec_any_lt(var_vec_b16[0], var_vec_u16[1])) *var_cc26fr++;
+ if(!vec_any_lt(var_vec_b32[0], var_vec_s32[1])) *var_cc26fr++;
+ if(!vec_any_lt(var_vec_b32[0], var_vec_u32[1])) *var_cc26fr++;
+ if(!vec_any_lt(var_vec_b8[0], var_vec_s8[1])) *var_cc26fr++;
+ if(!vec_any_lt(var_vec_b8[0], var_vec_u8[1])) *var_cc26fr++;
+ if(!vec_any_lt(var_vec_f32[0], var_vec_f32[1])) *var_cc26fr++;
+ if(!vec_any_lt(var_vec_s16[0], var_vec_b16[1])) *var_cc26fr++;
+ if(!vec_any_lt(var_vec_s16[0], var_vec_s16[1])) *var_cc26fr++;
+ if(!vec_any_lt(var_vec_s32[0], var_vec_b32[1])) *var_cc26fr++;
+ if(!vec_any_lt(var_vec_s32[0], var_vec_s32[1])) *var_cc26fr++;
+ if(!vec_any_lt(var_vec_s8[0], var_vec_b8[1])) *var_cc26fr++;
+ if(!vec_any_lt(var_vec_s8[0], var_vec_s8[1])) *var_cc26fr++;
+ if(!vec_any_lt(var_vec_u16[0], var_vec_b16[1])) *var_cc26fr++;
+ if(!vec_any_lt(var_vec_u16[0], var_vec_u16[1])) *var_cc26fr++;
+ if(!vec_any_lt(var_vec_u32[0], var_vec_b32[1])) *var_cc26fr++;
+ if(!vec_any_lt(var_vec_u32[0], var_vec_u32[1])) *var_cc26fr++;
+ if(!vec_any_lt(var_vec_u8[0], var_vec_b8[1])) *var_cc26fr++;
+ if(!vec_any_lt(var_vec_u8[0], var_vec_u8[1])) *var_cc26fr++;
+ if(!vec_any_nan(var_vec_f32[0])) *var_cc24fd++;
+ if(!vec_any_ne(var_vec_b16[0], var_vec_b16[1])) *var_cc24f++;
+ if(!vec_any_ne(var_vec_b16[0], var_vec_s16[1])) *var_cc24f++;
+ if(!vec_any_ne(var_vec_b16[0], var_vec_u16[1])) *var_cc24f++;
+ if(!vec_any_ne(var_vec_b32[0], var_vec_b32[1])) *var_cc24f++;
+ if(!vec_any_ne(var_vec_b32[0], var_vec_s32[1])) *var_cc24f++;
+ if(!vec_any_ne(var_vec_b32[0], var_vec_u32[1])) *var_cc24f++;
+ if(!vec_any_ne(var_vec_b8[0], var_vec_b8[1])) *var_cc24f++;
+ if(!vec_any_ne(var_vec_b8[0], var_vec_s8[1])) *var_cc24f++;
+ if(!vec_any_ne(var_vec_b8[0], var_vec_u8[1])) *var_cc24f++;
+ if(!vec_any_ne(var_vec_f32[0], var_vec_f32[1])) *var_cc24f++;
+ if(!vec_any_ne(var_vec_p16[0], var_vec_p16[1])) *var_cc24f++;
+ if(!vec_any_ne(var_vec_s16[0], var_vec_b16[1])) *var_cc24f++;
+ if(!vec_any_ne(var_vec_s16[0], var_vec_s16[1])) *var_cc24f++;
+ if(!vec_any_ne(var_vec_s32[0], var_vec_b32[1])) *var_cc24f++;
+ if(!vec_any_ne(var_vec_s32[0], var_vec_s32[1])) *var_cc24f++;
+ if(!vec_any_ne(var_vec_s8[0], var_vec_b8[1])) *var_cc24f++;
+ if(!vec_any_ne(var_vec_s8[0], var_vec_s8[1])) *var_cc24f++;
+ if(!vec_any_ne(var_vec_u16[0], var_vec_b16[1])) *var_cc24f++;
+ if(!vec_any_ne(var_vec_u16[0], var_vec_u16[1])) *var_cc24f++;
+ if(!vec_any_ne(var_vec_u32[0], var_vec_b32[1])) *var_cc24f++;
+ if(!vec_any_ne(var_vec_u32[0], var_vec_u32[1])) *var_cc24f++;
+ if(!vec_any_ne(var_vec_u8[0], var_vec_b8[1])) *var_cc24f++;
+ if(!vec_any_ne(var_vec_u8[0], var_vec_u8[1])) *var_cc24f++;
+ if(!vec_any_nge(var_vec_f32[0], var_vec_f32[1])) *var_cc24f++;
+ if(!vec_any_ngt(var_vec_f32[0], var_vec_f32[1])) *var_cc24f++;
+ if(!vec_any_nle(var_vec_f32[0], var_vec_f32[1])) *var_cc24fr++;
+ if(!vec_any_nlt(var_vec_f32[0], var_vec_f32[1])) *var_cc24fr++;
+ if(!vec_any_numeric(var_vec_f32[0])) *var_cc26fd++;
+ if(!vec_any_out(var_vec_f32[0], var_vec_f32[1])) *var_cc26f++;
+ if(vec_all_eq(var_vec_b16[0], var_vec_b16[1])) *var_cc24t++;
+ if(vec_all_eq(var_vec_b16[0], var_vec_s16[1])) *var_cc24t++;
+ if(vec_all_eq(var_vec_b16[0], var_vec_u16[1])) *var_cc24t++;
+ if(vec_all_eq(var_vec_b32[0], var_vec_b32[1])) *var_cc24t++;
+ if(vec_all_eq(var_vec_b32[0], var_vec_s32[1])) *var_cc24t++;
+ if(vec_all_eq(var_vec_b32[0], var_vec_u32[1])) *var_cc24t++;
+ if(vec_all_eq(var_vec_b8[0], var_vec_b8[1])) *var_cc24t++;
+ if(vec_all_eq(var_vec_b8[0], var_vec_s8[1])) *var_cc24t++;
+ if(vec_all_eq(var_vec_b8[0], var_vec_u8[1])) *var_cc24t++;
+ if(vec_all_eq(var_vec_f32[0], var_vec_f32[1])) *var_cc24t++;
+ if(vec_all_eq(var_vec_p16[0], var_vec_p16[1])) *var_cc24t++;
+ if(vec_all_eq(var_vec_s16[0], var_vec_b16[1])) *var_cc24t++;
+ if(vec_all_eq(var_vec_s16[0], var_vec_s16[1])) *var_cc24t++;
+ if(vec_all_eq(var_vec_s32[0], var_vec_b32[1])) *var_cc24t++;
+ if(vec_all_eq(var_vec_s32[0], var_vec_s32[1])) *var_cc24t++;
+}
+void f31() {
+ if(vec_all_eq(var_vec_s8[0], var_vec_b8[1])) *var_cc24t++;
+ if(vec_all_eq(var_vec_s8[0], var_vec_s8[1])) *var_cc24t++;
+ if(vec_all_eq(var_vec_u16[0], var_vec_b16[1])) *var_cc24t++;
+ if(vec_all_eq(var_vec_u16[0], var_vec_u16[1])) *var_cc24t++;
+ if(vec_all_eq(var_vec_u32[0], var_vec_b32[1])) *var_cc24t++;
+ if(vec_all_eq(var_vec_u32[0], var_vec_u32[1])) *var_cc24t++;
+ if(vec_all_eq(var_vec_u8[0], var_vec_b8[1])) *var_cc24t++;
+ if(vec_all_eq(var_vec_u8[0], var_vec_u8[1])) *var_cc24t++;
+ if(vec_all_ge(var_vec_b16[0], var_vec_s16[1])) *var_cc26tr++;
+ if(vec_all_ge(var_vec_b16[0], var_vec_u16[1])) *var_cc26tr++;
+ if(vec_all_ge(var_vec_b32[0], var_vec_s32[1])) *var_cc26tr++;
+ if(vec_all_ge(var_vec_b32[0], var_vec_u32[1])) *var_cc26tr++;
+ if(vec_all_ge(var_vec_b8[0], var_vec_s8[1])) *var_cc26tr++;
+ if(vec_all_ge(var_vec_b8[0], var_vec_u8[1])) *var_cc26tr++;
+ if(vec_all_ge(var_vec_f32[0], var_vec_f32[1])) *var_cc24t++;
+ if(vec_all_ge(var_vec_s16[0], var_vec_b16[1])) *var_cc26tr++;
+ if(vec_all_ge(var_vec_s16[0], var_vec_s16[1])) *var_cc26tr++;
+ if(vec_all_ge(var_vec_s32[0], var_vec_b32[1])) *var_cc26tr++;
+ if(vec_all_ge(var_vec_s32[0], var_vec_s32[1])) *var_cc26tr++;
+ if(vec_all_ge(var_vec_s8[0], var_vec_b8[1])) *var_cc26tr++;
+ if(vec_all_ge(var_vec_s8[0], var_vec_s8[1])) *var_cc26tr++;
+ if(vec_all_ge(var_vec_u16[0], var_vec_b16[1])) *var_cc26tr++;
+ if(vec_all_ge(var_vec_u16[0], var_vec_u16[1])) *var_cc26tr++;
+ if(vec_all_ge(var_vec_u32[0], var_vec_b32[1])) *var_cc26tr++;
+ if(vec_all_ge(var_vec_u32[0], var_vec_u32[1])) *var_cc26tr++;
+ if(vec_all_ge(var_vec_u8[0], var_vec_b8[1])) *var_cc26tr++;
+ if(vec_all_ge(var_vec_u8[0], var_vec_u8[1])) *var_cc26tr++;
+ if(vec_all_gt(var_vec_b16[0], var_vec_s16[1])) *var_cc24t++;
+ if(vec_all_gt(var_vec_b16[0], var_vec_u16[1])) *var_cc24t++;
+ if(vec_all_gt(var_vec_b32[0], var_vec_s32[1])) *var_cc24t++;
+ if(vec_all_gt(var_vec_b32[0], var_vec_u32[1])) *var_cc24t++;
+ if(vec_all_gt(var_vec_b8[0], var_vec_s8[1])) *var_cc24t++;
+ if(vec_all_gt(var_vec_b8[0], var_vec_u8[1])) *var_cc24t++;
+ if(vec_all_gt(var_vec_f32[0], var_vec_f32[1])) *var_cc24t++;
+ if(vec_all_gt(var_vec_s16[0], var_vec_b16[1])) *var_cc24t++;
+ if(vec_all_gt(var_vec_s16[0], var_vec_s16[1])) *var_cc24t++;
+ if(vec_all_gt(var_vec_s32[0], var_vec_b32[1])) *var_cc24t++;
+ if(vec_all_gt(var_vec_s32[0], var_vec_s32[1])) *var_cc24t++;
+ if(vec_all_gt(var_vec_s8[0], var_vec_b8[1])) *var_cc24t++;
+ if(vec_all_gt(var_vec_s8[0], var_vec_s8[1])) *var_cc24t++;
+ if(vec_all_gt(var_vec_u16[0], var_vec_b16[1])) *var_cc24t++;
+ if(vec_all_gt(var_vec_u16[0], var_vec_u16[1])) *var_cc24t++;
+ if(vec_all_gt(var_vec_u32[0], var_vec_b32[1])) *var_cc24t++;
+ if(vec_all_gt(var_vec_u32[0], var_vec_u32[1])) *var_cc24t++;
+ if(vec_all_gt(var_vec_u8[0], var_vec_b8[1])) *var_cc24t++;
+ if(vec_all_gt(var_vec_u8[0], var_vec_u8[1])) *var_cc24t++;
+ if(vec_all_in(var_vec_f32[0], var_vec_f32[1])) *var_cc26t++;
+ if(vec_all_le(var_vec_b16[0], var_vec_s16[1])) *var_cc26t++;
+ if(vec_all_le(var_vec_b16[0], var_vec_u16[1])) *var_cc26t++;
+ if(vec_all_le(var_vec_b32[0], var_vec_s32[1])) *var_cc26t++;
+ if(vec_all_le(var_vec_b32[0], var_vec_u32[1])) *var_cc26t++;
+ if(vec_all_le(var_vec_b8[0], var_vec_s8[1])) *var_cc26t++;
+ if(vec_all_le(var_vec_b8[0], var_vec_u8[1])) *var_cc26t++;
+ if(vec_all_le(var_vec_f32[0], var_vec_f32[1])) *var_cc24tr++;
+ if(vec_all_le(var_vec_s16[0], var_vec_b16[1])) *var_cc26t++;
+ if(vec_all_le(var_vec_s16[0], var_vec_s16[1])) *var_cc26t++;
+ if(vec_all_le(var_vec_s32[0], var_vec_b32[1])) *var_cc26t++;
+ if(vec_all_le(var_vec_s32[0], var_vec_s32[1])) *var_cc26t++;
+ if(vec_all_le(var_vec_s8[0], var_vec_b8[1])) *var_cc26t++;
+ if(vec_all_le(var_vec_s8[0], var_vec_s8[1])) *var_cc26t++;
+ if(vec_all_le(var_vec_u16[0], var_vec_b16[1])) *var_cc26t++;
+ if(vec_all_le(var_vec_u16[0], var_vec_u16[1])) *var_cc26t++;
+ if(vec_all_le(var_vec_u32[0], var_vec_b32[1])) *var_cc26t++;
+ if(vec_all_le(var_vec_u32[0], var_vec_u32[1])) *var_cc26t++;
+ if(vec_all_le(var_vec_u8[0], var_vec_b8[1])) *var_cc26t++;
+ if(vec_all_le(var_vec_u8[0], var_vec_u8[1])) *var_cc26t++;
+ if(vec_all_lt(var_vec_b16[0], var_vec_s16[1])) *var_cc24tr++;
+ if(vec_all_lt(var_vec_b16[0], var_vec_u16[1])) *var_cc24tr++;
+ if(vec_all_lt(var_vec_b32[0], var_vec_s32[1])) *var_cc24tr++;
+ if(vec_all_lt(var_vec_b32[0], var_vec_u32[1])) *var_cc24tr++;
+ if(vec_all_lt(var_vec_b8[0], var_vec_s8[1])) *var_cc24tr++;
+ if(vec_all_lt(var_vec_b8[0], var_vec_u8[1])) *var_cc24tr++;
+ if(vec_all_lt(var_vec_f32[0], var_vec_f32[1])) *var_cc24tr++;
+ if(vec_all_lt(var_vec_s16[0], var_vec_b16[1])) *var_cc24tr++;
+ if(vec_all_lt(var_vec_s16[0], var_vec_s16[1])) *var_cc24tr++;
+ if(vec_all_lt(var_vec_s32[0], var_vec_b32[1])) *var_cc24tr++;
+ if(vec_all_lt(var_vec_s32[0], var_vec_s32[1])) *var_cc24tr++;
+ if(vec_all_lt(var_vec_s8[0], var_vec_b8[1])) *var_cc24tr++;
+ if(vec_all_lt(var_vec_s8[0], var_vec_s8[1])) *var_cc24tr++;
+ if(vec_all_lt(var_vec_u16[0], var_vec_b16[1])) *var_cc24tr++;
+ if(vec_all_lt(var_vec_u16[0], var_vec_u16[1])) *var_cc24tr++;
+ if(vec_all_lt(var_vec_u32[0], var_vec_b32[1])) *var_cc24tr++;
+ if(vec_all_lt(var_vec_u32[0], var_vec_u32[1])) *var_cc24tr++;
+ if(vec_all_lt(var_vec_u8[0], var_vec_b8[1])) *var_cc24tr++;
+ if(vec_all_lt(var_vec_u8[0], var_vec_u8[1])) *var_cc24tr++;
+ if(vec_all_nan(var_vec_f32[0])) *var_cc26td++;
+ if(vec_all_ne(var_vec_b16[0], var_vec_b16[1])) *var_cc26t++;
+ if(vec_all_ne(var_vec_b16[0], var_vec_s16[1])) *var_cc26t++;
+ if(vec_all_ne(var_vec_b16[0], var_vec_u16[1])) *var_cc26t++;
+ if(vec_all_ne(var_vec_b32[0], var_vec_b32[1])) *var_cc26t++;
+ if(vec_all_ne(var_vec_b32[0], var_vec_s32[1])) *var_cc26t++;
+ if(vec_all_ne(var_vec_b32[0], var_vec_u32[1])) *var_cc26t++;
+ if(vec_all_ne(var_vec_b8[0], var_vec_b8[1])) *var_cc26t++;
+ if(vec_all_ne(var_vec_b8[0], var_vec_s8[1])) *var_cc26t++;
+ if(vec_all_ne(var_vec_b8[0], var_vec_u8[1])) *var_cc26t++;
+ if(vec_all_ne(var_vec_f32[0], var_vec_f32[1])) *var_cc26t++;
+ if(vec_all_ne(var_vec_p16[0], var_vec_p16[1])) *var_cc26t++;
+ if(vec_all_ne(var_vec_s16[0], var_vec_b16[1])) *var_cc26t++;
+ if(vec_all_ne(var_vec_s16[0], var_vec_s16[1])) *var_cc26t++;
+ if(vec_all_ne(var_vec_s32[0], var_vec_b32[1])) *var_cc26t++;
+}
+void f32() {
+ if(vec_all_ne(var_vec_s32[0], var_vec_s32[1])) *var_cc26t++;
+ if(vec_all_ne(var_vec_s8[0], var_vec_b8[1])) *var_cc26t++;
+ if(vec_all_ne(var_vec_s8[0], var_vec_s8[1])) *var_cc26t++;
+ if(vec_all_ne(var_vec_u16[0], var_vec_b16[1])) *var_cc26t++;
+ if(vec_all_ne(var_vec_u16[0], var_vec_u16[1])) *var_cc26t++;
+ if(vec_all_ne(var_vec_u32[0], var_vec_b32[1])) *var_cc26t++;
+ if(vec_all_ne(var_vec_u32[0], var_vec_u32[1])) *var_cc26t++;
+ if(vec_all_ne(var_vec_u8[0], var_vec_b8[1])) *var_cc26t++;
+ if(vec_all_ne(var_vec_u8[0], var_vec_u8[1])) *var_cc26t++;
+ if(vec_all_nge(var_vec_f32[0], var_vec_f32[1])) *var_cc26t++;
+ if(vec_all_ngt(var_vec_f32[0], var_vec_f32[1])) *var_cc26t++;
+ if(vec_all_nle(var_vec_f32[0], var_vec_f32[1])) *var_cc26tr++;
+ if(vec_all_nlt(var_vec_f32[0], var_vec_f32[1])) *var_cc26tr++;
+ if(vec_all_numeric(var_vec_f32[0])) *var_cc24td++;
+ if(vec_any_eq(var_vec_b16[0], var_vec_b16[1])) *var_cc26f++;
+ if(vec_any_eq(var_vec_b16[0], var_vec_s16[1])) *var_cc26f++;
+ if(vec_any_eq(var_vec_b16[0], var_vec_u16[1])) *var_cc26f++;
+ if(vec_any_eq(var_vec_b32[0], var_vec_b32[1])) *var_cc26f++;
+ if(vec_any_eq(var_vec_b32[0], var_vec_s32[1])) *var_cc26f++;
+ if(vec_any_eq(var_vec_b32[0], var_vec_u32[1])) *var_cc26f++;
+ if(vec_any_eq(var_vec_b8[0], var_vec_b8[1])) *var_cc26f++;
+ if(vec_any_eq(var_vec_b8[0], var_vec_s8[1])) *var_cc26f++;
+ if(vec_any_eq(var_vec_b8[0], var_vec_u8[1])) *var_cc26f++;
+ if(vec_any_eq(var_vec_f32[0], var_vec_f32[1])) *var_cc26f++;
+ if(vec_any_eq(var_vec_p16[0], var_vec_p16[1])) *var_cc26f++;
+ if(vec_any_eq(var_vec_s16[0], var_vec_b16[1])) *var_cc26f++;
+ if(vec_any_eq(var_vec_s16[0], var_vec_s16[1])) *var_cc26f++;
+ if(vec_any_eq(var_vec_s32[0], var_vec_b32[1])) *var_cc26f++;
+ if(vec_any_eq(var_vec_s32[0], var_vec_s32[1])) *var_cc26f++;
+ if(vec_any_eq(var_vec_s8[0], var_vec_b8[1])) *var_cc26f++;
+ if(vec_any_eq(var_vec_s8[0], var_vec_s8[1])) *var_cc26f++;
+ if(vec_any_eq(var_vec_u16[0], var_vec_b16[1])) *var_cc26f++;
+ if(vec_any_eq(var_vec_u16[0], var_vec_u16[1])) *var_cc26f++;
+ if(vec_any_eq(var_vec_u32[0], var_vec_b32[1])) *var_cc26f++;
+ if(vec_any_eq(var_vec_u32[0], var_vec_u32[1])) *var_cc26f++;
+ if(vec_any_eq(var_vec_u8[0], var_vec_b8[1])) *var_cc26f++;
+ if(vec_any_eq(var_vec_u8[0], var_vec_u8[1])) *var_cc26f++;
+ if(vec_any_ge(var_vec_b16[0], var_vec_s16[1])) *var_cc24fr++;
+ if(vec_any_ge(var_vec_b16[0], var_vec_u16[1])) *var_cc24fr++;
+ if(vec_any_ge(var_vec_b32[0], var_vec_s32[1])) *var_cc24fr++;
+ if(vec_any_ge(var_vec_b32[0], var_vec_u32[1])) *var_cc24fr++;
+ if(vec_any_ge(var_vec_b8[0], var_vec_s8[1])) *var_cc24fr++;
+ if(vec_any_ge(var_vec_b8[0], var_vec_u8[1])) *var_cc24fr++;
+ if(vec_any_ge(var_vec_f32[0], var_vec_f32[1])) *var_cc26f++;
+ if(vec_any_ge(var_vec_s16[0], var_vec_b16[1])) *var_cc24fr++;
+ if(vec_any_ge(var_vec_s16[0], var_vec_s16[1])) *var_cc24fr++;
+ if(vec_any_ge(var_vec_s32[0], var_vec_b32[1])) *var_cc24fr++;
+ if(vec_any_ge(var_vec_s32[0], var_vec_s32[1])) *var_cc24fr++;
+ if(vec_any_ge(var_vec_s8[0], var_vec_b8[1])) *var_cc24fr++;
+ if(vec_any_ge(var_vec_s8[0], var_vec_s8[1])) *var_cc24fr++;
+ if(vec_any_ge(var_vec_u16[0], var_vec_b16[1])) *var_cc24fr++;
+ if(vec_any_ge(var_vec_u16[0], var_vec_u16[1])) *var_cc24fr++;
+ if(vec_any_ge(var_vec_u32[0], var_vec_b32[1])) *var_cc24fr++;
+ if(vec_any_ge(var_vec_u32[0], var_vec_u32[1])) *var_cc24fr++;
+ if(vec_any_ge(var_vec_u8[0], var_vec_b8[1])) *var_cc24fr++;
+ if(vec_any_ge(var_vec_u8[0], var_vec_u8[1])) *var_cc24fr++;
+ if(vec_any_gt(var_vec_b16[0], var_vec_s16[1])) *var_cc26f++;
+ if(vec_any_gt(var_vec_b16[0], var_vec_u16[1])) *var_cc26f++;
+ if(vec_any_gt(var_vec_b32[0], var_vec_s32[1])) *var_cc26f++;
+ if(vec_any_gt(var_vec_b32[0], var_vec_u32[1])) *var_cc26f++;
+ if(vec_any_gt(var_vec_b8[0], var_vec_s8[1])) *var_cc26f++;
+ if(vec_any_gt(var_vec_b8[0], var_vec_u8[1])) *var_cc26f++;
+ if(vec_any_gt(var_vec_f32[0], var_vec_f32[1])) *var_cc26f++;
+ if(vec_any_gt(var_vec_s16[0], var_vec_b16[1])) *var_cc26f++;
+ if(vec_any_gt(var_vec_s16[0], var_vec_s16[1])) *var_cc26f++;
+ if(vec_any_gt(var_vec_s32[0], var_vec_b32[1])) *var_cc26f++;
+ if(vec_any_gt(var_vec_s32[0], var_vec_s32[1])) *var_cc26f++;
+ if(vec_any_gt(var_vec_s8[0], var_vec_b8[1])) *var_cc26f++;
+ if(vec_any_gt(var_vec_s8[0], var_vec_s8[1])) *var_cc26f++;
+ if(vec_any_gt(var_vec_u16[0], var_vec_b16[1])) *var_cc26f++;
+ if(vec_any_gt(var_vec_u16[0], var_vec_u16[1])) *var_cc26f++;
+ if(vec_any_gt(var_vec_u32[0], var_vec_b32[1])) *var_cc26f++;
+ if(vec_any_gt(var_vec_u32[0], var_vec_u32[1])) *var_cc26f++;
+ if(vec_any_gt(var_vec_u8[0], var_vec_b8[1])) *var_cc26f++;
+ if(vec_any_gt(var_vec_u8[0], var_vec_u8[1])) *var_cc26f++;
+ if(vec_any_le(var_vec_b16[0], var_vec_s16[1])) *var_cc24f++;
+ if(vec_any_le(var_vec_b16[0], var_vec_u16[1])) *var_cc24f++;
+ if(vec_any_le(var_vec_b32[0], var_vec_s32[1])) *var_cc24f++;
+ if(vec_any_le(var_vec_b32[0], var_vec_u32[1])) *var_cc24f++;
+ if(vec_any_le(var_vec_b8[0], var_vec_s8[1])) *var_cc24f++;
+ if(vec_any_le(var_vec_b8[0], var_vec_u8[1])) *var_cc24f++;
+ if(vec_any_le(var_vec_f32[0], var_vec_f32[1])) *var_cc26fr++;
+ if(vec_any_le(var_vec_s16[0], var_vec_b16[1])) *var_cc24f++;
+ if(vec_any_le(var_vec_s16[0], var_vec_s16[1])) *var_cc24f++;
+ if(vec_any_le(var_vec_s32[0], var_vec_b32[1])) *var_cc24f++;
+ if(vec_any_le(var_vec_s32[0], var_vec_s32[1])) *var_cc24f++;
+ if(vec_any_le(var_vec_s8[0], var_vec_b8[1])) *var_cc24f++;
+ if(vec_any_le(var_vec_s8[0], var_vec_s8[1])) *var_cc24f++;
+ if(vec_any_le(var_vec_u16[0], var_vec_b16[1])) *var_cc24f++;
+ if(vec_any_le(var_vec_u16[0], var_vec_u16[1])) *var_cc24f++;
+ if(vec_any_le(var_vec_u32[0], var_vec_b32[1])) *var_cc24f++;
+ if(vec_any_le(var_vec_u32[0], var_vec_u32[1])) *var_cc24f++;
+ if(vec_any_le(var_vec_u8[0], var_vec_b8[1])) *var_cc24f++;
+ if(vec_any_le(var_vec_u8[0], var_vec_u8[1])) *var_cc24f++;
+ if(vec_any_lt(var_vec_b16[0], var_vec_s16[1])) *var_cc26fr++;
+ if(vec_any_lt(var_vec_b16[0], var_vec_u16[1])) *var_cc26fr++;
+ if(vec_any_lt(var_vec_b32[0], var_vec_s32[1])) *var_cc26fr++;
+ if(vec_any_lt(var_vec_b32[0], var_vec_u32[1])) *var_cc26fr++;
+ if(vec_any_lt(var_vec_b8[0], var_vec_s8[1])) *var_cc26fr++;
+ if(vec_any_lt(var_vec_b8[0], var_vec_u8[1])) *var_cc26fr++;
+}
+void f33() {
+ if(vec_any_lt(var_vec_f32[0], var_vec_f32[1])) *var_cc26fr++;
+ if(vec_any_lt(var_vec_s16[0], var_vec_b16[1])) *var_cc26fr++;
+ if(vec_any_lt(var_vec_s16[0], var_vec_s16[1])) *var_cc26fr++;
+ if(vec_any_lt(var_vec_s32[0], var_vec_b32[1])) *var_cc26fr++;
+ if(vec_any_lt(var_vec_s32[0], var_vec_s32[1])) *var_cc26fr++;
+ if(vec_any_lt(var_vec_s8[0], var_vec_b8[1])) *var_cc26fr++;
+ if(vec_any_lt(var_vec_s8[0], var_vec_s8[1])) *var_cc26fr++;
+ if(vec_any_lt(var_vec_u16[0], var_vec_b16[1])) *var_cc26fr++;
+ if(vec_any_lt(var_vec_u16[0], var_vec_u16[1])) *var_cc26fr++;
+ if(vec_any_lt(var_vec_u32[0], var_vec_b32[1])) *var_cc26fr++;
+ if(vec_any_lt(var_vec_u32[0], var_vec_u32[1])) *var_cc26fr++;
+ if(vec_any_lt(var_vec_u8[0], var_vec_b8[1])) *var_cc26fr++;
+ if(vec_any_lt(var_vec_u8[0], var_vec_u8[1])) *var_cc26fr++;
+ if(vec_any_nan(var_vec_f32[0])) *var_cc24fd++;
+ if(vec_any_ne(var_vec_b16[0], var_vec_b16[1])) *var_cc24f++;
+ if(vec_any_ne(var_vec_b16[0], var_vec_s16[1])) *var_cc24f++;
+ if(vec_any_ne(var_vec_b16[0], var_vec_u16[1])) *var_cc24f++;
+ if(vec_any_ne(var_vec_b32[0], var_vec_b32[1])) *var_cc24f++;
+ if(vec_any_ne(var_vec_b32[0], var_vec_s32[1])) *var_cc24f++;
+ if(vec_any_ne(var_vec_b32[0], var_vec_u32[1])) *var_cc24f++;
+ if(vec_any_ne(var_vec_b8[0], var_vec_b8[1])) *var_cc24f++;
+ if(vec_any_ne(var_vec_b8[0], var_vec_s8[1])) *var_cc24f++;
+ if(vec_any_ne(var_vec_b8[0], var_vec_u8[1])) *var_cc24f++;
+ if(vec_any_ne(var_vec_f32[0], var_vec_f32[1])) *var_cc24f++;
+ if(vec_any_ne(var_vec_p16[0], var_vec_p16[1])) *var_cc24f++;
+ if(vec_any_ne(var_vec_s16[0], var_vec_b16[1])) *var_cc24f++;
+ if(vec_any_ne(var_vec_s16[0], var_vec_s16[1])) *var_cc24f++;
+ if(vec_any_ne(var_vec_s32[0], var_vec_b32[1])) *var_cc24f++;
+ if(vec_any_ne(var_vec_s32[0], var_vec_s32[1])) *var_cc24f++;
+ if(vec_any_ne(var_vec_s8[0], var_vec_b8[1])) *var_cc24f++;
+ if(vec_any_ne(var_vec_s8[0], var_vec_s8[1])) *var_cc24f++;
+ if(vec_any_ne(var_vec_u16[0], var_vec_b16[1])) *var_cc24f++;
+ if(vec_any_ne(var_vec_u16[0], var_vec_u16[1])) *var_cc24f++;
+ if(vec_any_ne(var_vec_u32[0], var_vec_b32[1])) *var_cc24f++;
+ if(vec_any_ne(var_vec_u32[0], var_vec_u32[1])) *var_cc24f++;
+ if(vec_any_ne(var_vec_u8[0], var_vec_b8[1])) *var_cc24f++;
+ if(vec_any_ne(var_vec_u8[0], var_vec_u8[1])) *var_cc24f++;
+ if(vec_any_nge(var_vec_f32[0], var_vec_f32[1])) *var_cc24f++;
+ if(vec_any_ngt(var_vec_f32[0], var_vec_f32[1])) *var_cc24f++;
+ if(vec_any_nle(var_vec_f32[0], var_vec_f32[1])) *var_cc24fr++;
+ if(vec_any_nlt(var_vec_f32[0], var_vec_f32[1])) *var_cc24fr++;
+ if(vec_any_numeric(var_vec_f32[0])) *var_cc26fd++;
+ if(vec_any_out(var_vec_f32[0], var_vec_f32[1])) *var_cc26f++;
+ vec_dss( 0);
+ vec_dss( 1);
+ vec_dss( 2);
+ vec_dss( 3);
+ vec_dssall();
+ vec_dst(var_float_ptr[0], var_int[1], 0);
+ vec_dst(var_float_ptr[0], var_int[1], 1);
+ vec_dst(var_float_ptr[0], var_int[1], 2);
+ vec_dst(var_float_ptr[0], var_int[1], 3);
+ vec_dst(var_int_ptr[0], var_int[1], 0);
+ vec_dst(var_int_ptr[0], var_int[1], 1);
+ vec_dst(var_int_ptr[0], var_int[1], 2);
+ vec_dst(var_int_ptr[0], var_int[1], 3);
+ vec_dst(var_short_ptr[0], var_int[1], 0);
+ vec_dst(var_short_ptr[0], var_int[1], 1);
+ vec_dst(var_short_ptr[0], var_int[1], 2);
+ vec_dst(var_short_ptr[0], var_int[1], 3);
+ vec_dst(var_signed_char_ptr[0], var_int[1], 0);
+ vec_dst(var_signed_char_ptr[0], var_int[1], 1);
+ vec_dst(var_signed_char_ptr[0], var_int[1], 2);
+ vec_dst(var_signed_char_ptr[0], var_int[1], 3);
+ vec_dst(var_unsigned_char_ptr[0], var_int[1], 0);
+ vec_dst(var_unsigned_char_ptr[0], var_int[1], 1);
+ vec_dst(var_unsigned_char_ptr[0], var_int[1], 2);
+ vec_dst(var_unsigned_char_ptr[0], var_int[1], 3);
+ vec_dst(var_unsigned_int_ptr[0], var_int[1], 0);
+ vec_dst(var_unsigned_int_ptr[0], var_int[1], 1);
+ vec_dst(var_unsigned_int_ptr[0], var_int[1], 2);
+ vec_dst(var_unsigned_int_ptr[0], var_int[1], 3);
+ vec_dst(var_unsigned_short_ptr[0], var_int[1], 0);
+ vec_dst(var_unsigned_short_ptr[0], var_int[1], 1);
+ vec_dst(var_unsigned_short_ptr[0], var_int[1], 2);
+ vec_dst(var_unsigned_short_ptr[0], var_int[1], 3);
+ vec_dst(var_vec_b16_ptr[0], var_int[1], 0);
+ vec_dst(var_vec_b16_ptr[0], var_int[1], 1);
+ vec_dst(var_vec_b16_ptr[0], var_int[1], 2);
+ vec_dst(var_vec_b16_ptr[0], var_int[1], 3);
+ vec_dst(var_vec_b32_ptr[0], var_int[1], 0);
+ vec_dst(var_vec_b32_ptr[0], var_int[1], 1);
+ vec_dst(var_vec_b32_ptr[0], var_int[1], 2);
+ vec_dst(var_vec_b32_ptr[0], var_int[1], 3);
+ vec_dst(var_vec_b8_ptr[0], var_int[1], 0);
+ vec_dst(var_vec_b8_ptr[0], var_int[1], 1);
+ vec_dst(var_vec_b8_ptr[0], var_int[1], 2);
+ vec_dst(var_vec_b8_ptr[0], var_int[1], 3);
+ vec_dst(var_vec_f32_ptr[0], var_int[1], 0);
+ vec_dst(var_vec_f32_ptr[0], var_int[1], 1);
+ vec_dst(var_vec_f32_ptr[0], var_int[1], 2);
+ vec_dst(var_vec_f32_ptr[0], var_int[1], 3);
+}
+void f34() {
+ vec_dst(var_vec_p16_ptr[0], var_int[1], 0);
+ vec_dst(var_vec_p16_ptr[0], var_int[1], 1);
+ vec_dst(var_vec_p16_ptr[0], var_int[1], 2);
+ vec_dst(var_vec_p16_ptr[0], var_int[1], 3);
+ vec_dst(var_vec_s16_ptr[0], var_int[1], 0);
+ vec_dst(var_vec_s16_ptr[0], var_int[1], 1);
+ vec_dst(var_vec_s16_ptr[0], var_int[1], 2);
+ vec_dst(var_vec_s16_ptr[0], var_int[1], 3);
+ vec_dst(var_vec_s32_ptr[0], var_int[1], 0);
+ vec_dst(var_vec_s32_ptr[0], var_int[1], 1);
+ vec_dst(var_vec_s32_ptr[0], var_int[1], 2);
+ vec_dst(var_vec_s32_ptr[0], var_int[1], 3);
+ vec_dst(var_vec_s8_ptr[0], var_int[1], 0);
+ vec_dst(var_vec_s8_ptr[0], var_int[1], 1);
+ vec_dst(var_vec_s8_ptr[0], var_int[1], 2);
+ vec_dst(var_vec_s8_ptr[0], var_int[1], 3);
+ vec_dst(var_vec_u16_ptr[0], var_int[1], 0);
+ vec_dst(var_vec_u16_ptr[0], var_int[1], 1);
+ vec_dst(var_vec_u16_ptr[0], var_int[1], 2);
+ vec_dst(var_vec_u16_ptr[0], var_int[1], 3);
+ vec_dst(var_vec_u32_ptr[0], var_int[1], 0);
+ vec_dst(var_vec_u32_ptr[0], var_int[1], 1);
+ vec_dst(var_vec_u32_ptr[0], var_int[1], 2);
+ vec_dst(var_vec_u32_ptr[0], var_int[1], 3);
+ vec_dst(var_vec_u8_ptr[0], var_int[1], 0);
+ vec_dst(var_vec_u8_ptr[0], var_int[1], 1);
+ vec_dst(var_vec_u8_ptr[0], var_int[1], 2);
+ vec_dst(var_vec_u8_ptr[0], var_int[1], 3);
+ vec_dstst(var_float_ptr[0], var_int[1], 0);
+ vec_dstst(var_float_ptr[0], var_int[1], 1);
+ vec_dstst(var_float_ptr[0], var_int[1], 2);
+ vec_dstst(var_float_ptr[0], var_int[1], 3);
+ vec_dstst(var_int_ptr[0], var_int[1], 0);
+ vec_dstst(var_int_ptr[0], var_int[1], 1);
+ vec_dstst(var_int_ptr[0], var_int[1], 2);
+ vec_dstst(var_int_ptr[0], var_int[1], 3);
+ vec_dstst(var_short_ptr[0], var_int[1], 0);
+ vec_dstst(var_short_ptr[0], var_int[1], 1);
+ vec_dstst(var_short_ptr[0], var_int[1], 2);
+ vec_dstst(var_short_ptr[0], var_int[1], 3);
+ vec_dstst(var_signed_char_ptr[0], var_int[1], 0);
+ vec_dstst(var_signed_char_ptr[0], var_int[1], 1);
+ vec_dstst(var_signed_char_ptr[0], var_int[1], 2);
+ vec_dstst(var_signed_char_ptr[0], var_int[1], 3);
+ vec_dstst(var_unsigned_char_ptr[0], var_int[1], 0);
+ vec_dstst(var_unsigned_char_ptr[0], var_int[1], 1);
+ vec_dstst(var_unsigned_char_ptr[0], var_int[1], 2);
+ vec_dstst(var_unsigned_char_ptr[0], var_int[1], 3);
+ vec_dstst(var_unsigned_int_ptr[0], var_int[1], 0);
+ vec_dstst(var_unsigned_int_ptr[0], var_int[1], 1);
+ vec_dstst(var_unsigned_int_ptr[0], var_int[1], 2);
+ vec_dstst(var_unsigned_int_ptr[0], var_int[1], 3);
+ vec_dstst(var_unsigned_short_ptr[0], var_int[1], 0);
+ vec_dstst(var_unsigned_short_ptr[0], var_int[1], 1);
+ vec_dstst(var_unsigned_short_ptr[0], var_int[1], 2);
+ vec_dstst(var_unsigned_short_ptr[0], var_int[1], 3);
+ vec_dstst(var_vec_b16_ptr[0], var_int[1], 0);
+ vec_dstst(var_vec_b16_ptr[0], var_int[1], 1);
+ vec_dstst(var_vec_b16_ptr[0], var_int[1], 2);
+ vec_dstst(var_vec_b16_ptr[0], var_int[1], 3);
+ vec_dstst(var_vec_b32_ptr[0], var_int[1], 0);
+ vec_dstst(var_vec_b32_ptr[0], var_int[1], 1);
+ vec_dstst(var_vec_b32_ptr[0], var_int[1], 2);
+ vec_dstst(var_vec_b32_ptr[0], var_int[1], 3);
+ vec_dstst(var_vec_b8_ptr[0], var_int[1], 0);
+ vec_dstst(var_vec_b8_ptr[0], var_int[1], 1);
+ vec_dstst(var_vec_b8_ptr[0], var_int[1], 2);
+ vec_dstst(var_vec_b8_ptr[0], var_int[1], 3);
+ vec_dstst(var_vec_f32_ptr[0], var_int[1], 0);
+ vec_dstst(var_vec_f32_ptr[0], var_int[1], 1);
+ vec_dstst(var_vec_f32_ptr[0], var_int[1], 2);
+ vec_dstst(var_vec_f32_ptr[0], var_int[1], 3);
+ vec_dstst(var_vec_p16_ptr[0], var_int[1], 0);
+ vec_dstst(var_vec_p16_ptr[0], var_int[1], 1);
+ vec_dstst(var_vec_p16_ptr[0], var_int[1], 2);
+ vec_dstst(var_vec_p16_ptr[0], var_int[1], 3);
+ vec_dstst(var_vec_s16_ptr[0], var_int[1], 0);
+ vec_dstst(var_vec_s16_ptr[0], var_int[1], 1);
+ vec_dstst(var_vec_s16_ptr[0], var_int[1], 2);
+ vec_dstst(var_vec_s16_ptr[0], var_int[1], 3);
+ vec_dstst(var_vec_s32_ptr[0], var_int[1], 0);
+ vec_dstst(var_vec_s32_ptr[0], var_int[1], 1);
+ vec_dstst(var_vec_s32_ptr[0], var_int[1], 2);
+ vec_dstst(var_vec_s32_ptr[0], var_int[1], 3);
+ vec_dstst(var_vec_s8_ptr[0], var_int[1], 0);
+ vec_dstst(var_vec_s8_ptr[0], var_int[1], 1);
+ vec_dstst(var_vec_s8_ptr[0], var_int[1], 2);
+ vec_dstst(var_vec_s8_ptr[0], var_int[1], 3);
+ vec_dstst(var_vec_u16_ptr[0], var_int[1], 0);
+ vec_dstst(var_vec_u16_ptr[0], var_int[1], 1);
+ vec_dstst(var_vec_u16_ptr[0], var_int[1], 2);
+ vec_dstst(var_vec_u16_ptr[0], var_int[1], 3);
+}
+void f35() {
+ vec_dstst(var_vec_u32_ptr[0], var_int[1], 0);
+ vec_dstst(var_vec_u32_ptr[0], var_int[1], 1);
+ vec_dstst(var_vec_u32_ptr[0], var_int[1], 2);
+ vec_dstst(var_vec_u32_ptr[0], var_int[1], 3);
+ vec_dstst(var_vec_u8_ptr[0], var_int[1], 0);
+ vec_dstst(var_vec_u8_ptr[0], var_int[1], 1);
+ vec_dstst(var_vec_u8_ptr[0], var_int[1], 2);
+ vec_dstst(var_vec_u8_ptr[0], var_int[1], 3);
+ vec_dststt(var_float_ptr[0], var_int[1], 0);
+ vec_dststt(var_float_ptr[0], var_int[1], 1);
+ vec_dststt(var_float_ptr[0], var_int[1], 2);
+ vec_dststt(var_float_ptr[0], var_int[1], 3);
+ vec_dststt(var_int_ptr[0], var_int[1], 0);
+ vec_dststt(var_int_ptr[0], var_int[1], 1);
+ vec_dststt(var_int_ptr[0], var_int[1], 2);
+ vec_dststt(var_int_ptr[0], var_int[1], 3);
+ vec_dststt(var_short_ptr[0], var_int[1], 0);
+ vec_dststt(var_short_ptr[0], var_int[1], 1);
+ vec_dststt(var_short_ptr[0], var_int[1], 2);
+ vec_dststt(var_short_ptr[0], var_int[1], 3);
+ vec_dststt(var_signed_char_ptr[0], var_int[1], 0);
+ vec_dststt(var_signed_char_ptr[0], var_int[1], 1);
+ vec_dststt(var_signed_char_ptr[0], var_int[1], 2);
+ vec_dststt(var_signed_char_ptr[0], var_int[1], 3);
+ vec_dststt(var_unsigned_char_ptr[0], var_int[1], 0);
+ vec_dststt(var_unsigned_char_ptr[0], var_int[1], 1);
+ vec_dststt(var_unsigned_char_ptr[0], var_int[1], 2);
+ vec_dststt(var_unsigned_char_ptr[0], var_int[1], 3);
+ vec_dststt(var_unsigned_int_ptr[0], var_int[1], 0);
+ vec_dststt(var_unsigned_int_ptr[0], var_int[1], 1);
+ vec_dststt(var_unsigned_int_ptr[0], var_int[1], 2);
+ vec_dststt(var_unsigned_int_ptr[0], var_int[1], 3);
+ vec_dststt(var_unsigned_short_ptr[0], var_int[1], 0);
+ vec_dststt(var_unsigned_short_ptr[0], var_int[1], 1);
+ vec_dststt(var_unsigned_short_ptr[0], var_int[1], 2);
+ vec_dststt(var_unsigned_short_ptr[0], var_int[1], 3);
+ vec_dststt(var_vec_b16_ptr[0], var_int[1], 0);
+ vec_dststt(var_vec_b16_ptr[0], var_int[1], 1);
+ vec_dststt(var_vec_b16_ptr[0], var_int[1], 2);
+ vec_dststt(var_vec_b16_ptr[0], var_int[1], 3);
+ vec_dststt(var_vec_b32_ptr[0], var_int[1], 0);
+ vec_dststt(var_vec_b32_ptr[0], var_int[1], 1);
+ vec_dststt(var_vec_b32_ptr[0], var_int[1], 2);
+ vec_dststt(var_vec_b32_ptr[0], var_int[1], 3);
+ vec_dststt(var_vec_b8_ptr[0], var_int[1], 0);
+ vec_dststt(var_vec_b8_ptr[0], var_int[1], 1);
+ vec_dststt(var_vec_b8_ptr[0], var_int[1], 2);
+ vec_dststt(var_vec_b8_ptr[0], var_int[1], 3);
+ vec_dststt(var_vec_f32_ptr[0], var_int[1], 0);
+ vec_dststt(var_vec_f32_ptr[0], var_int[1], 1);
+ vec_dststt(var_vec_f32_ptr[0], var_int[1], 2);
+ vec_dststt(var_vec_f32_ptr[0], var_int[1], 3);
+ vec_dststt(var_vec_p16_ptr[0], var_int[1], 0);
+ vec_dststt(var_vec_p16_ptr[0], var_int[1], 1);
+ vec_dststt(var_vec_p16_ptr[0], var_int[1], 2);
+ vec_dststt(var_vec_p16_ptr[0], var_int[1], 3);
+ vec_dststt(var_vec_s16_ptr[0], var_int[1], 0);
+ vec_dststt(var_vec_s16_ptr[0], var_int[1], 1);
+ vec_dststt(var_vec_s16_ptr[0], var_int[1], 2);
+ vec_dststt(var_vec_s16_ptr[0], var_int[1], 3);
+ vec_dststt(var_vec_s32_ptr[0], var_int[1], 0);
+ vec_dststt(var_vec_s32_ptr[0], var_int[1], 1);
+ vec_dststt(var_vec_s32_ptr[0], var_int[1], 2);
+ vec_dststt(var_vec_s32_ptr[0], var_int[1], 3);
+ vec_dststt(var_vec_s8_ptr[0], var_int[1], 0);
+ vec_dststt(var_vec_s8_ptr[0], var_int[1], 1);
+ vec_dststt(var_vec_s8_ptr[0], var_int[1], 2);
+ vec_dststt(var_vec_s8_ptr[0], var_int[1], 3);
+ vec_dststt(var_vec_u16_ptr[0], var_int[1], 0);
+ vec_dststt(var_vec_u16_ptr[0], var_int[1], 1);
+ vec_dststt(var_vec_u16_ptr[0], var_int[1], 2);
+ vec_dststt(var_vec_u16_ptr[0], var_int[1], 3);
+ vec_dststt(var_vec_u32_ptr[0], var_int[1], 0);
+ vec_dststt(var_vec_u32_ptr[0], var_int[1], 1);
+ vec_dststt(var_vec_u32_ptr[0], var_int[1], 2);
+ vec_dststt(var_vec_u32_ptr[0], var_int[1], 3);
+ vec_dststt(var_vec_u8_ptr[0], var_int[1], 0);
+ vec_dststt(var_vec_u8_ptr[0], var_int[1], 1);
+ vec_dststt(var_vec_u8_ptr[0], var_int[1], 2);
+ vec_dststt(var_vec_u8_ptr[0], var_int[1], 3);
+ vec_dstt(var_float_ptr[0], var_int[1], 0);
+ vec_dstt(var_float_ptr[0], var_int[1], 1);
+ vec_dstt(var_float_ptr[0], var_int[1], 2);
+ vec_dstt(var_float_ptr[0], var_int[1], 3);
+ vec_dstt(var_int_ptr[0], var_int[1], 0);
+ vec_dstt(var_int_ptr[0], var_int[1], 1);
+ vec_dstt(var_int_ptr[0], var_int[1], 2);
+ vec_dstt(var_int_ptr[0], var_int[1], 3);
+}
+void f36() {
+ vec_dstt(var_short_ptr[0], var_int[1], 0);
+ vec_dstt(var_short_ptr[0], var_int[1], 1);
+ vec_dstt(var_short_ptr[0], var_int[1], 2);
+ vec_dstt(var_short_ptr[0], var_int[1], 3);
+ vec_dstt(var_signed_char_ptr[0], var_int[1], 0);
+ vec_dstt(var_signed_char_ptr[0], var_int[1], 1);
+ vec_dstt(var_signed_char_ptr[0], var_int[1], 2);
+ vec_dstt(var_signed_char_ptr[0], var_int[1], 3);
+ vec_dstt(var_unsigned_char_ptr[0], var_int[1], 0);
+ vec_dstt(var_unsigned_char_ptr[0], var_int[1], 1);
+ vec_dstt(var_unsigned_char_ptr[0], var_int[1], 2);
+ vec_dstt(var_unsigned_char_ptr[0], var_int[1], 3);
+ vec_dstt(var_unsigned_int_ptr[0], var_int[1], 0);
+ vec_dstt(var_unsigned_int_ptr[0], var_int[1], 1);
+ vec_dstt(var_unsigned_int_ptr[0], var_int[1], 2);
+ vec_dstt(var_unsigned_int_ptr[0], var_int[1], 3);
+ vec_dstt(var_unsigned_short_ptr[0], var_int[1], 0);
+ vec_dstt(var_unsigned_short_ptr[0], var_int[1], 1);
+ vec_dstt(var_unsigned_short_ptr[0], var_int[1], 2);
+ vec_dstt(var_unsigned_short_ptr[0], var_int[1], 3);
+ vec_dstt(var_vec_b16_ptr[0], var_int[1], 0);
+ vec_dstt(var_vec_b16_ptr[0], var_int[1], 1);
+ vec_dstt(var_vec_b16_ptr[0], var_int[1], 2);
+ vec_dstt(var_vec_b16_ptr[0], var_int[1], 3);
+ vec_dstt(var_vec_b32_ptr[0], var_int[1], 0);
+ vec_dstt(var_vec_b32_ptr[0], var_int[1], 1);
+ vec_dstt(var_vec_b32_ptr[0], var_int[1], 2);
+ vec_dstt(var_vec_b32_ptr[0], var_int[1], 3);
+ vec_dstt(var_vec_b8_ptr[0], var_int[1], 0);
+ vec_dstt(var_vec_b8_ptr[0], var_int[1], 1);
+ vec_dstt(var_vec_b8_ptr[0], var_int[1], 2);
+ vec_dstt(var_vec_b8_ptr[0], var_int[1], 3);
+ vec_dstt(var_vec_f32_ptr[0], var_int[1], 0);
+ vec_dstt(var_vec_f32_ptr[0], var_int[1], 1);
+ vec_dstt(var_vec_f32_ptr[0], var_int[1], 2);
+ vec_dstt(var_vec_f32_ptr[0], var_int[1], 3);
+ vec_dstt(var_vec_p16_ptr[0], var_int[1], 0);
+ vec_dstt(var_vec_p16_ptr[0], var_int[1], 1);
+ vec_dstt(var_vec_p16_ptr[0], var_int[1], 2);
+ vec_dstt(var_vec_p16_ptr[0], var_int[1], 3);
+ vec_dstt(var_vec_s16_ptr[0], var_int[1], 0);
+ vec_dstt(var_vec_s16_ptr[0], var_int[1], 1);
+ vec_dstt(var_vec_s16_ptr[0], var_int[1], 2);
+ vec_dstt(var_vec_s16_ptr[0], var_int[1], 3);
+ vec_dstt(var_vec_s32_ptr[0], var_int[1], 0);
+ vec_dstt(var_vec_s32_ptr[0], var_int[1], 1);
+ vec_dstt(var_vec_s32_ptr[0], var_int[1], 2);
+ vec_dstt(var_vec_s32_ptr[0], var_int[1], 3);
+ vec_dstt(var_vec_s8_ptr[0], var_int[1], 0);
+ vec_dstt(var_vec_s8_ptr[0], var_int[1], 1);
+ vec_dstt(var_vec_s8_ptr[0], var_int[1], 2);
+ vec_dstt(var_vec_s8_ptr[0], var_int[1], 3);
+ vec_dstt(var_vec_u16_ptr[0], var_int[1], 0);
+ vec_dstt(var_vec_u16_ptr[0], var_int[1], 1);
+ vec_dstt(var_vec_u16_ptr[0], var_int[1], 2);
+ vec_dstt(var_vec_u16_ptr[0], var_int[1], 3);
+ vec_dstt(var_vec_u32_ptr[0], var_int[1], 0);
+ vec_dstt(var_vec_u32_ptr[0], var_int[1], 1);
+ vec_dstt(var_vec_u32_ptr[0], var_int[1], 2);
+ vec_dstt(var_vec_u32_ptr[0], var_int[1], 3);
+ vec_dstt(var_vec_u8_ptr[0], var_int[1], 0);
+ vec_dstt(var_vec_u8_ptr[0], var_int[1], 1);
+ vec_dstt(var_vec_u8_ptr[0], var_int[1], 2);
+ vec_dstt(var_vec_u8_ptr[0], var_int[1], 3);
+ vec_mtvscr(var_vec_b16[0]);
+ vec_mtvscr(var_vec_b32[0]);
+ vec_mtvscr(var_vec_b8[0]);
+ vec_mtvscr(var_vec_p16[0]);
+ vec_mtvscr(var_vec_s16[0]);
+ vec_mtvscr(var_vec_s32[0]);
+ vec_mtvscr(var_vec_s8[0]);
+ vec_mtvscr(var_vec_u16[0]);
+ vec_mtvscr(var_vec_u32[0]);
+ vec_mtvscr(var_vec_u8[0]);
+ vec_st(var_vec_b16[0], var_int[1], var_vec_b16_ptr[2]);
+ vec_st(var_vec_b32[0], var_int[1], var_vec_b32_ptr[2]);
+ vec_st(var_vec_b8[0], var_int[1], var_vec_b8_ptr[2]);
+ vec_st(var_vec_f32[0], var_int[1], var_float_ptr[2]);
+ vec_st(var_vec_f32[0], var_int[1], var_vec_f32_ptr[2]);
+ vec_st(var_vec_p16[0], var_int[1], var_vec_p16_ptr[2]);
+ vec_st(var_vec_s16[0], var_int[1], var_short_ptr[2]);
+ vec_st(var_vec_s16[0], var_int[1], var_vec_s16_ptr[2]);
+ vec_st(var_vec_s32[0], var_int[1], var_int_ptr[2]);
+ vec_st(var_vec_s32[0], var_int[1], var_vec_s32_ptr[2]);
+ vec_st(var_vec_s8[0], var_int[1], var_signed_char_ptr[2]);
+ vec_st(var_vec_s8[0], var_int[1], var_vec_s8_ptr[2]);
+ vec_st(var_vec_u16[0], var_int[1], var_unsigned_short_ptr[2]);
+ vec_st(var_vec_u16[0], var_int[1], var_vec_u16_ptr[2]);
+ vec_st(var_vec_u32[0], var_int[1], var_unsigned_int_ptr[2]);
+ vec_st(var_vec_u32[0], var_int[1], var_vec_u32_ptr[2]);
+ vec_st(var_vec_u8[0], var_int[1], var_unsigned_char_ptr[2]);
+ vec_st(var_vec_u8[0], var_int[1], var_vec_u8_ptr[2]);
+ vec_ste(var_vec_f32[0], var_int[1], var_float_ptr[2]);
+ vec_ste(var_vec_s16[0], var_int[1], var_short_ptr[2]);
+}
+void f37() {
+ vec_ste(var_vec_s32[0], var_int[1], var_int_ptr[2]);
+ vec_ste(var_vec_s8[0], var_int[1], var_signed_char_ptr[2]);
+ vec_ste(var_vec_u16[0], var_int[1], var_unsigned_short_ptr[2]);
+ vec_ste(var_vec_u32[0], var_int[1], var_unsigned_int_ptr[2]);
+ vec_ste(var_vec_u8[0], var_int[1], var_unsigned_char_ptr[2]);
+ vec_stl(var_vec_b16[0], var_int[1], var_vec_b16_ptr[2]);
+ vec_stl(var_vec_b32[0], var_int[1], var_vec_b32_ptr[2]);
+ vec_stl(var_vec_b8[0], var_int[1], var_vec_b8_ptr[2]);
+ vec_stl(var_vec_f32[0], var_int[1], var_float_ptr[2]);
+ vec_stl(var_vec_f32[0], var_int[1], var_vec_f32_ptr[2]);
+ vec_stl(var_vec_p16[0], var_int[1], var_vec_p16_ptr[2]);
+ vec_stl(var_vec_s16[0], var_int[1], var_short_ptr[2]);
+ vec_stl(var_vec_s16[0], var_int[1], var_vec_s16_ptr[2]);
+ vec_stl(var_vec_s32[0], var_int[1], var_int_ptr[2]);
+ vec_stl(var_vec_s32[0], var_int[1], var_vec_s32_ptr[2]);
+ vec_stl(var_vec_s8[0], var_int[1], var_signed_char_ptr[2]);
+ vec_stl(var_vec_s8[0], var_int[1], var_vec_s8_ptr[2]);
+ vec_stl(var_vec_u16[0], var_int[1], var_unsigned_short_ptr[2]);
+ vec_stl(var_vec_u16[0], var_int[1], var_vec_u16_ptr[2]);
+ vec_stl(var_vec_u32[0], var_int[1], var_unsigned_int_ptr[2]);
+ vec_stl(var_vec_u32[0], var_int[1], var_vec_u32_ptr[2]);
+ vec_stl(var_vec_u8[0], var_int[1], var_unsigned_char_ptr[2]);
+ vec_stl(var_vec_u8[0], var_int[1], var_vec_u8_ptr[2]);
+ vec_stvebx(var_vec_s8[0], var_int[1], var_signed_char_ptr[2]);
+ vec_stvebx(var_vec_u8[0], var_int[1], var_unsigned_char_ptr[2]);
+ vec_stvehx(var_vec_s16[0], var_int[1], var_short_ptr[2]);
+ vec_stvehx(var_vec_u16[0], var_int[1], var_unsigned_short_ptr[2]);
+ vec_stvewx(var_vec_f32[0], var_int[1], var_float_ptr[2]);
+ vec_stvewx(var_vec_s32[0], var_int[1], var_int_ptr[2]);
+ vec_stvewx(var_vec_u32[0], var_int[1], var_unsigned_int_ptr[2]);
+ vec_stvx(var_vec_b16[0], var_int[1], var_vec_b16_ptr[2]);
+ vec_stvx(var_vec_b32[0], var_int[1], var_vec_b32_ptr[2]);
+ vec_stvx(var_vec_b8[0], var_int[1], var_vec_b8_ptr[2]);
+ vec_stvx(var_vec_f32[0], var_int[1], var_float_ptr[2]);
+ vec_stvx(var_vec_f32[0], var_int[1], var_vec_f32_ptr[2]);
+ vec_stvx(var_vec_p16[0], var_int[1], var_vec_p16_ptr[2]);
+ vec_stvx(var_vec_s16[0], var_int[1], var_short_ptr[2]);
+ vec_stvx(var_vec_s16[0], var_int[1], var_vec_s16_ptr[2]);
+ vec_stvx(var_vec_s32[0], var_int[1], var_int_ptr[2]);
+ vec_stvx(var_vec_s32[0], var_int[1], var_vec_s32_ptr[2]);
+ vec_stvx(var_vec_s8[0], var_int[1], var_signed_char_ptr[2]);
+ vec_stvx(var_vec_s8[0], var_int[1], var_vec_s8_ptr[2]);
+ vec_stvx(var_vec_u16[0], var_int[1], var_unsigned_short_ptr[2]);
+ vec_stvx(var_vec_u16[0], var_int[1], var_vec_u16_ptr[2]);
+ vec_stvx(var_vec_u32[0], var_int[1], var_unsigned_int_ptr[2]);
+ vec_stvx(var_vec_u32[0], var_int[1], var_vec_u32_ptr[2]);
+ vec_stvx(var_vec_u8[0], var_int[1], var_unsigned_char_ptr[2]);
+ vec_stvx(var_vec_u8[0], var_int[1], var_vec_u8_ptr[2]);
+ vec_stvxl(var_vec_b16[0], var_int[1], var_vec_b16_ptr[2]);
+ vec_stvxl(var_vec_b32[0], var_int[1], var_vec_b32_ptr[2]);
+ vec_stvxl(var_vec_b8[0], var_int[1], var_vec_b8_ptr[2]);
+ vec_stvxl(var_vec_f32[0], var_int[1], var_float_ptr[2]);
+ vec_stvxl(var_vec_f32[0], var_int[1], var_vec_f32_ptr[2]);
+ vec_stvxl(var_vec_p16[0], var_int[1], var_vec_p16_ptr[2]);
+ vec_stvxl(var_vec_s16[0], var_int[1], var_short_ptr[2]);
+ vec_stvxl(var_vec_s16[0], var_int[1], var_vec_s16_ptr[2]);
+ vec_stvxl(var_vec_s32[0], var_int[1], var_int_ptr[2]);
+ vec_stvxl(var_vec_s32[0], var_int[1], var_vec_s32_ptr[2]);
+ vec_stvxl(var_vec_s8[0], var_int[1], var_signed_char_ptr[2]);
+ vec_stvxl(var_vec_s8[0], var_int[1], var_vec_s8_ptr[2]);
+ vec_stvxl(var_vec_u16[0], var_int[1], var_unsigned_short_ptr[2]);
+ vec_stvxl(var_vec_u16[0], var_int[1], var_vec_u16_ptr[2]);
+ vec_stvxl(var_vec_u32[0], var_int[1], var_unsigned_int_ptr[2]);
+ vec_stvxl(var_vec_u32[0], var_int[1], var_vec_u32_ptr[2]);
+ vec_stvxl(var_vec_u8[0], var_int[1], var_unsigned_char_ptr[2]);
+ vec_stvxl(var_vec_u8[0], var_int[1], var_vec_u8_ptr[2]);
+}
diff --git a/gcc/testsuite/gcc.dg/vmx/pr27006.c b/gcc/testsuite/gcc.dg/vmx/pr27006.c
new file mode 100644
index 0000000000..d34d51e92c
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/pr27006.c
@@ -0,0 +1,22 @@
+/* { dg-options "-maltivec" } */
+
+extern void abort ();
+
+typedef union
+{
+ int i[4];
+ __attribute__((altivec(vector__))) int v;
+} vec_int4;
+
+int main (void)
+{
+ vec_int4 i1;
+
+ i1.v = (__attribute__((altivec(vector__))) int){31, 31, 31, 31};
+
+ if (i1.i[0] != 31)
+ abort ();
+
+ return 0;
+}
+
diff --git a/gcc/testsuite/gcc.dg/vmx/pr27842.c b/gcc/testsuite/gcc.dg/vmx/pr27842.c
new file mode 100644
index 0000000000..ad3130d007
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/pr27842.c
@@ -0,0 +1,26 @@
+#include <altivec.h>
+
+extern void abort (void);
+extern int memcmp (const void *, const void *, __SIZE_TYPE__);
+
+void test (vector float *p, int n)
+{
+ int i;
+ for (i = 0; i < n; i++)
+ p[i] = vec_abs (p[i]);
+}
+
+int
+main (void)
+{
+ vector float p = (vector float){ 0.5, 0.5, 0.5, 0.5 };
+ vector float q = p;
+
+ test (&p, 1);
+
+ if (memcmp (&p, &q, sizeof (p)))
+ abort ();
+
+ return 0;
+}
+
diff --git a/gcc/testsuite/gcc.dg/vmx/sn7153.c b/gcc/testsuite/gcc.dg/vmx/sn7153.c
new file mode 100644
index 0000000000..a498a86200
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/sn7153.c
@@ -0,0 +1,62 @@
+/* In the source code, the vec_adds appears before the call to
+ validate_sat(). In the .s code, the vaddubs has been moved to after
+ the call to validate_sat(). This invalidates the meaning of checking
+ the saturation bit. */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <altivec.h>
+
+static int failed;
+
+void validate_sat();
+void validate_u8(vector unsigned char, vector unsigned char);
+
+int
+main()
+{
+ vector unsigned char result_u8;
+ vec_mtvscr(((vector unsigned short){0,0,0,0,0,0,0,0}));
+ result_u8 = vec_adds(((vector unsigned
+ char){0,1,2,3,0xfc,0xfd,0xfe,0xff,
+ 0,1,2,3,0xfc,0xfd,0xfe,0xff}),
+ ((vector unsigned
+ char){0,0xf0,0xfd,0xfd,2,2,2,2,0,
+ 0xf0,0xfd,0xfd,2,2,2,2}));
+ validate_sat();
+ validate_u8(result_u8, ((vector unsigned
+ char){0,0xf1,0xff,0xff,0xfe,0xff,0xff,0xff,
+ 0,0xf1,0xff,0xff,0xfe,0xff,0xff,0xff}));
+ if (failed)
+ abort ();
+ return 0;
+}
+
+void validate_sat()
+{
+ if (vec_any_ne(vec_splat(vec_mfvscr(), 7), ((vector unsigned short){1,1,1,1,1,1,1,1})))
+ {
+ union {vector unsigned short v; unsigned short s[8];} u;
+ u.v = vec_mfvscr();
+ printf("error: vscr == { %d,%d,%d,%d,%d,%d,%d,%d }",
+ u.s[0], u.s[1], u.s[2], u.s[3],
+ u.s[4], u.s[5], u.s[6], u.s[7]);
+ printf("expected { 1,1,1,1,1,1,1,1 }\n");
+ failed++;
+ }
+}
+
+void validate_u8(vector unsigned char v, vector unsigned char vx)
+{
+ union {vector unsigned char v; unsigned char x[16]; } u, ux;
+ int i;
+ u.v = v;
+ ux.v = vx;
+ for (i=0; i<16; i++) {
+ if (u.x[i] != ux.x[i]) {
+ printf(" error: field %d %#2.2x expected %#2.2x\n",
+ i, u.x[i], ux.x[i]);
+ failed++;
+ }
+ }
+}
diff --git a/gcc/testsuite/gcc.dg/vmx/spill.c b/gcc/testsuite/gcc.dg/vmx/spill.c
new file mode 100644
index 0000000000..dad489c347
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/spill.c
@@ -0,0 +1,131 @@
+#include <altivec.h>
+extern vector unsigned char u8(void);
+extern vector signed char s8(void);
+extern vector bool char b8(void);
+extern vector unsigned short u16(void);
+extern vector signed short s16(void);
+extern vector bool short b16(void);
+extern vector unsigned int u32(void);
+extern vector signed int s32(void);
+extern vector bool int b32(void);
+extern vector float f32(void);
+extern vector pixel p16(void);
+
+extern void g(vector unsigned char, ...);
+
+void f()
+{
+ vector unsigned char u8l = u8();
+ vector signed char s8l = s8();
+ vector bool char b8l = b8();
+ vector unsigned short u16l = u16();
+ vector signed short s16l = s16();
+ vector bool short b16l = b16();
+ vector unsigned int u32l = u32();
+ vector signed int s32l = s32();
+ vector bool int b32l = b32();
+ vector float f32l = f32();
+ vector pixel p16l = p16();
+ vector unsigned char u8lx = u8();
+ vector signed char s8lx = s8();
+ vector bool char b8lx = b8();
+ vector unsigned short u16lx = u16();
+ vector signed short s16lx = s16();
+ vector bool short b16lx = b16();
+ vector unsigned int u32lx = u32();
+ vector signed int s32lx = s32();
+ vector bool int b32lx = b32();
+ vector float f32lx = f32();
+ vector pixel p16lx = p16();
+
+ g(u8l, s8l, b8l, u16l, s16l, b16l, u32l, s32l, b32l, f32l, p16l,
+ u8lx, s8lx, b8lx, u16lx, s16lx, b16lx, u32lx, s32lx, b32lx, f32lx, p16lx);
+}
+
+vector unsigned char
+u8(void)
+{
+ static vector unsigned char zero;
+ return zero;
+}
+
+vector signed char
+s8(void)
+{
+ static vector signed char zero;
+ return zero;
+}
+
+vector bool char
+b8(void)
+{
+ static vector bool char zero;
+ return zero;
+}
+
+vector unsigned short
+u16(void)
+{
+ static vector unsigned short zero;
+ return zero;
+}
+
+vector signed short
+s16(void)
+{
+ static vector signed short zero;
+ return zero;
+}
+
+vector bool short
+b16(void)
+{
+ static vector bool short zero;
+ return zero;
+}
+
+vector unsigned int
+u32(void)
+{
+ static vector unsigned int zero;
+ return zero;
+}
+
+vector signed int
+s32(void)
+{
+ static vector signed int zero;
+ return zero;
+}
+
+vector bool int
+b32(void)
+{
+ static vector bool int zero;
+ return zero;
+}
+
+vector float
+f32(void)
+{
+ static vector float zero;
+ return zero;
+}
+
+vector pixel
+p16(void)
+{
+ static vector pixel zero;
+ return zero;
+}
+
+void
+g(vector unsigned char a, ...)
+{
+}
+
+int main()
+{
+ f();
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.dg/vmx/spill2.c b/gcc/testsuite/gcc.dg/vmx/spill2.c
new file mode 100644
index 0000000000..4c7337bd08
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/spill2.c
@@ -0,0 +1,155 @@
+#include <altivec.h>
+extern vector unsigned char u8(void);
+extern vector signed char s8(void);
+extern vector bool char b8(void);
+extern vector unsigned short u16(void);
+extern vector signed short s16(void);
+extern vector bool short b16(void);
+extern vector unsigned int u32(void);
+extern vector signed int s32(void);
+extern vector bool int b32(void);
+extern vector float f32(void);
+extern vector pixel p16(void);
+extern double d(void);
+extern int i(void);
+
+extern void g(vector unsigned char, ...);
+
+void f()
+{
+ int i1l = i();
+ vector unsigned char u8l = u8();
+ vector signed char s8l = s8();
+ vector bool char b8l = b8();
+ int i2l = i();
+ vector unsigned short u16l = u16();
+ vector signed short s16l = s16();
+ vector bool short b16l = b16();
+ int i3l = i();
+ vector unsigned int u32l = u32();
+ vector signed int s32l = s32();
+ vector bool int b32l = b32();
+ double d1l = d();
+ vector float f32l = f32();
+ vector pixel p16l = p16();
+ double d2l = d();
+ vector unsigned char u8lx = u8();
+ vector signed char s8lx = s8();
+ vector bool char b8lx = b8();
+ vector unsigned short u16lx = u16();
+ vector signed short s16lx = s16();
+ vector bool short b16lx = b16();
+ vector unsigned int u32lx = u32();
+ vector signed int s32lx = s32();
+ vector bool int b32lx = b32();
+ vector float f32lx = f32();
+ vector pixel p16lx = p16();
+
+ if (i1l)
+ g(u8l, s8l, b8l, u16l, s16l, b16l, u32l, s32l, b32l, f32l, p16l,
+ u8lx, s8lx, b8lx, u16lx, s16lx, b16lx, u32lx, s32lx, b32lx, f32lx, p16lx,
+ i1l, i2l, i3l, d1l, d2l);
+ g(u8l, i1l, i2l, i3l, d1l, d2l);
+}
+
+double
+d(void)
+{
+ static double zero;
+ return zero;
+}
+
+int
+i(void)
+{
+ static int non_zero;
+ return ++non_zero;
+}
+
+vector unsigned char
+u8(void)
+{
+ static vector unsigned char zero;
+ return zero;
+}
+
+vector signed char
+s8(void)
+{
+ static vector signed char zero;
+ return zero;
+}
+
+vector bool char
+b8(void)
+{
+ static vector bool char zero;
+ return zero;
+}
+
+vector unsigned short
+u16(void)
+{
+ static vector unsigned short zero;
+ return zero;
+}
+
+vector signed short
+s16(void)
+{
+ static vector signed short zero;
+ return zero;
+}
+
+vector bool short
+b16(void)
+{
+ static vector bool short zero;
+ return zero;
+}
+
+vector unsigned int
+u32(void)
+{
+ static vector unsigned int zero;
+ return zero;
+}
+
+vector signed int
+s32(void)
+{
+ static vector signed int zero;
+ return zero;
+}
+
+vector bool int
+b32(void)
+{
+ static vector bool int zero;
+ return zero;
+}
+
+vector float
+f32(void)
+{
+ static vector float zero;
+ return zero;
+}
+
+vector pixel
+p16(void)
+{
+ static vector pixel zero;
+ return zero;
+}
+
+void
+g(vector unsigned char a, ...)
+{
+}
+
+int main()
+{
+ f();
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.dg/vmx/spill3.c b/gcc/testsuite/gcc.dg/vmx/spill3.c
new file mode 100644
index 0000000000..9f1c45c65b
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/spill3.c
@@ -0,0 +1,156 @@
+#include <altivec.h>
+extern void g(vector unsigned char, ...);
+extern vector unsigned char v(void);
+extern double d(void);
+extern int i(void);
+
+static vector unsigned char v1l = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
+static vector unsigned char v2l = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
+static vector unsigned char v3l = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
+static vector unsigned char v4l = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
+static vector unsigned char v5l = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
+static vector unsigned char v6l = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
+static vector unsigned char v7l = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
+static vector unsigned char v8l = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
+static vector unsigned char v9l = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
+static vector unsigned char v10l = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
+static vector unsigned char v11l = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
+static vector unsigned char v12l = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
+static double d1l = 0;
+static double d2l = 0;
+static double d3l = 0;
+static double d4l = 0;
+static double d5l = 0;
+static double d6l = 0;
+static double d7l = 0;
+static double d8l = 0;
+static double d9l = 0;
+static double d10l = 0;
+static double d11l = 0;
+static double d12l = 0;
+static double d13l = 0;
+static double d14l = 0;
+static double d15l = 0;
+static double d16l = 0;
+static double d17l = 0;
+static double d18l = 0;
+static int i1l = 0;
+static int i2l = 0;
+static int i3l = 0;
+static int i4l = 0;
+static int i5l = 0;
+static int i6l = 0;
+static int i7l = 0;
+static int i8l = 0;
+static int i9l = 0;
+static int i10l = 0;
+static int i11l = 0;
+static int i12l = 0;
+static int i13l = 0;
+static int i14l = 0;
+static int i15l = 0;
+static int i16l = 0;
+static int i17l = 0;
+static int i18l = 0;
+static int i19l = 0;
+
+void f()
+{
+ char buffer[23];
+ vector unsigned char v1l = v();
+ vector unsigned char v2l = v();
+ vector unsigned char v3l = v();
+ vector unsigned char v4l = v();
+ vector unsigned char v5l = v();
+ vector unsigned char v6l = v();
+ vector unsigned char v7l = v();
+ vector unsigned char v8l = v();
+ vector unsigned char v9l = v();
+ vector unsigned char v10l = v();
+ vector unsigned char v11l = v();
+ vector unsigned char v12l = v();
+
+ double d1l = d();
+ double d2l = d();
+ double d3l = d();
+ double d4l = d();
+ double d5l = d();
+ double d6l = d();
+ double d7l = d();
+ double d8l = d();
+ double d9l = d();
+ double d10l = d();
+ double d11l = d();
+ double d12l = d();
+ double d13l = d();
+ double d14l = d();
+ double d15l = d();
+ double d16l = d();
+ double d17l = d();
+ double d18l = d();
+
+ int i1l = i();
+ int i2l = i();
+ int i3l = i();
+ int i4l = i();
+ int i5l = i();
+ int i6l = i();
+ int i7l = i();
+ int i8l = i();
+ int i9l = i();
+ int i10l = i();
+ int i11l = i();
+ int i12l = i();
+ int i13l = i();
+ int i14l = i();
+ int i15l = i();
+ int i16l = i();
+ int i17l = i();
+ int i18l = i();
+ int i19l = i();
+
+ if (d1l)
+ g(v1l, v2l, v3l, v4l, v5l, v6l, v7l, v8l, v9l, v10l, v11l, v12l,
+ d1l, d2l, d3l, d4l, d5l, d6l, d7l, d8l, d9l, d10l, d11l, d12l,
+ d13l, d14l, d15l, d16l, d17l, d18l,
+ i1l, i2l, i3l, i4l, i5l, i6l, i7l, i8l, i9l, i10l, i11l, i12l,
+ i13l, i14l, i15l, i16l, i17l, i18l, i19l);
+
+ g(v1l, buffer,
+ d1l, d2l, d3l, d4l, d5l, d6l, d7l, d8l, d9l, d10l, d11l, d12l,
+ d13l, d14l, d15l, d16l, d17l, d18l,
+ i1l, i2l, i3l, i4l, i5l, i6l, i7l, i8l, i9l, i10l, i11l, i12l,
+ i13l, i14l, i15l, i16l, i17l, i18l, i19l);
+}
+
+double
+d(void)
+{
+ static double zero;
+ return zero;
+}
+
+int
+i(void)
+{
+ static int non_zero;
+ return ++non_zero;
+}
+
+vector unsigned char
+v(void)
+{
+ static vector unsigned char zero;
+ return zero;
+}
+
+void
+g(vector unsigned char a, ...)
+{
+}
+
+int main()
+{
+ f();
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.dg/vmx/t.c b/gcc/testsuite/gcc.dg/vmx/t.c
new file mode 100644
index 0000000000..3a7d5a94ae
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/t.c
@@ -0,0 +1,43 @@
+/* { dg-do compile } */
+#include <altivec.h>
+typedef unsigned long size_t;
+vector signed int T_vec_s32;
+void *T_void_ptr;
+const void *T_const_void_ptr;
+size_t T_size_t;
+char *T_char_ptr;
+vector signed short T_vec_s16;
+vector signed char T_vec_s8;
+vector unsigned short T_vec_u16;
+vector unsigned int T_vec_u32;
+vector unsigned char T_vec_u8;
+vector float T_vec_f32;
+int T_int;
+float *T_float_ptr;
+void f(void);
+short *T_short_ptr;
+vector signed short *T_vec_s16_ptr;
+int *T_int_ptr;
+vector signed int *T_vec_s32_ptr;
+signed char *T_signed_char_ptr;
+vector signed char *T_vec_s8_ptr;
+unsigned short *T_unsigned_short_ptr;
+vector unsigned short *T_vec_u16_ptr;
+unsigned int *T_unsigned_int_ptr;
+vector unsigned int *T_vec_u32_ptr;
+unsigned char *T_unsigned_char_ptr;
+vector unsigned char *T_vec_u8_ptr;
+double T_double;
+int T_intb;
+vector bool short *T_vec_b16_ptr;
+vector bool int *T_vec_b32_ptr;
+vector bool char *T_vec_b8_ptr;
+vector float *T_vec_f32_ptr;
+vector pixel *T_vec_p16_ptr;
+vector bool short T_vec_b16;
+vector pixel T_vec_p16;
+vector bool int T_vec_b32;
+vector bool char T_vec_b8;
+float T_float;
+volatile void g(void);
+const char *T_const_char_ptr;
diff --git a/gcc/testsuite/gcc.dg/vmx/varargs-1.c b/gcc/testsuite/gcc.dg/vmx/varargs-1.c
new file mode 100644
index 0000000000..5961374648
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/varargs-1.c
@@ -0,0 +1,99 @@
+#include <altivec.h>
+#include <stdarg.h>
+#include <stdio.h>
+
+typedef vector unsigned int T;
+
+extern void f1(int, ...);
+extern void f2(int, T, ...);
+extern void f3(int, T, T, ...);
+extern void f4(int, T, T, T);
+
+void printx(T a)
+{
+ union {
+ T v;
+ unsigned int a[4];
+ } u;
+ u.v = a;
+ printf("%d, %d, %d, %d\n", u.a[0], u.a[1], u.a[2], u.a[3]);
+}
+
+void f1(int a, ...)
+{
+ va_list ap;
+ va_start (ap, a);
+ while (a-- > 0)
+ printx(va_arg(ap, T));
+ va_end (ap);
+}
+
+void f2(int a, T b, ...)
+{
+ va_list ap;
+ printx(b);
+ a--;
+ va_start (ap, b);
+ while (a-- > 0)
+ printx(va_arg(ap, T));
+ va_end (ap);
+}
+
+void f3(int a, T b, T c, ...)
+{
+ va_list ap;
+ printx(b);
+ a--;
+ printx(c);
+ a--;
+ va_start (ap, c);
+ while (a-- > 0)
+ printx(va_arg(ap, T));
+ va_end (ap);
+}
+
+void f4(int a, T b, T c,
+ T d)
+{
+ printx(b);
+ a--;
+ printx(c);
+ a--;
+ printx(d);
+ a--;
+}
+
+int main()
+{
+ f4 (3,
+ ((T){1,1,1,1}),
+ ((T){2,2,2,2}),
+ ((T){3,3,3,3}));
+ f3 (3,
+ ((T){4,4,4,4}),
+ ((T){5,5,5,5}),
+ ((T){6,6,6,6}));
+ f2 (3,
+ ((T){7,7,7,7}),
+ ((T){8,8,8,8}),
+ ((T){9,9,9,9}));
+ f1 (3,
+ ((T){10,10,10,10}),
+ ((T){11,11,11,11}),
+ ((T){12,12,12,12}));
+ return 0;
+}
+
+/* { dg-output "1, 1, 1, 1(\n|\r\n|\r)" }
+ { dg-output "2, 2, 2, 2(\n|\r\n|\r)" }
+ { dg-output "3, 3, 3, 3(\n|\r\n|\r)" }
+ { dg-output "4, 4, 4, 4(\n|\r\n|\r)" }
+ { dg-output "5, 5, 5, 5(\n|\r\n|\r)" }
+ { dg-output "6, 6, 6, 6(\n|\r\n|\r)" }
+ { dg-output "7, 7, 7, 7(\n|\r\n|\r)" }
+ { dg-output "8, 8, 8, 8(\n|\r\n|\r)" }
+ { dg-output "9, 9, 9, 9(\n|\r\n|\r)" }
+ { dg-output "10, 10, 10, 10(\n|\r\n|\r)" }
+ { dg-output "11, 11, 11, 11(\n|\r\n|\r)" }
+ { dg-output "12, 12, 12, 12(\n|\r\n|\r)" }
+ */
diff --git a/gcc/testsuite/gcc.dg/vmx/varargs-2.c b/gcc/testsuite/gcc.dg/vmx/varargs-2.c
new file mode 100644
index 0000000000..1df1715290
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/varargs-2.c
@@ -0,0 +1,78 @@
+#include "harness.h"
+#include <stdarg.h>
+
+static void
+varargsn003(vector float p1, vector float p2, vector float p3, ...)
+{
+ va_list ap;
+ vector float i1;
+ vector float i2;
+ vector float i3;
+ vector float i4;
+ vector float i5;
+ vector float i6;
+ vector float i7;
+ vector float i8;
+ vector float i9;
+ vector float i10;
+ vector float i11;
+ vector float i12;
+ vector float i13;
+ vector float i14;
+ vector float i15;
+ int i16;
+
+ va_start(ap, p3);
+ i1 = p1;
+ i2 = p2;
+ i3 = p3;
+ i4 = va_arg(ap, vector float);
+ i5 = va_arg(ap, vector float);
+ i6 = va_arg(ap, vector float);
+ i7 = va_arg(ap, vector float);
+ i8 = va_arg(ap, vector float);
+ i9 = va_arg(ap, vector float);
+ i10 = va_arg(ap, vector float);
+ i11 = va_arg(ap, vector float);
+ i12 = va_arg(ap, vector float);
+ i13 = va_arg(ap, vector float);
+ i14 = va_arg(ap, vector float);
+ i15 = va_arg(ap, vector float);
+ i16 = va_arg(ap, int);
+ va_end(ap);
+
+ check(vec_all_eq(i1, ((vector float){1.14e+09, 4.29e+08, -1.58e+09, 1.66e+09})), "i1");
+ check(vec_all_eq(i2, ((vector float){-1.83e+09, -6.79e+08, 1.58e+09, -3.38e+08})), "i2");
+ check(vec_all_eq(i3, ((vector float){-1.19e+09, -4.27e+08, 6.84e+08, 1.21e+08})), "i3");
+ check(vec_all_eq(i4, ((vector float){1.47e+09, 9.17e+08, 3.45e+08, -1.17e+08})), "i4");
+ check(vec_all_eq(i5, ((vector float){3.08e+08, 1.2e+08, 1.73e+09, 1.77e+09})), "i5");
+ check(vec_all_eq(i6, ((vector float){1.89e+09, 2.06e+09, 2.64e+08, 1.05e+09})), "i6");
+ check(vec_all_eq(i7, ((vector float){5.45e+08, 1.37e+09, -8.2e+08, 4.32e+07})), "i7");
+ check(vec_all_eq(i8, ((vector float){3.47e+08, -1.66e+09, 1.25e+09, 1.53e+09})), "i8");
+ check(vec_all_eq(i9, ((vector float){-6.04e+08, 1.48e+09, -1.48e+09, 1.92e+09})), "i9");
+ check(vec_all_eq(i10, ((vector float){-1.66e+09, -8.92e+08, -3.78e+08, 2.11e+09})), "i10");
+ check(vec_all_eq(i11, ((vector float){-7.46e+08, 4.01e+08, -1.78e+09, 1.83e+09})), "i11");
+ check(vec_all_eq(i12, ((vector float){1.83e+09, 5.73e+08, -2.96e+08, -7.46e+08})), "i12");
+ check(vec_all_eq(i13, ((vector float){-2.01e+09, 9.89e+08, -1.92e+09, 2.09e+09})), "i13");
+ check(vec_all_eq(i14, ((vector float){1.95e+09, -2.41e+08, 2.67e+08, 1.67e+09})), "i14");
+ check(vec_all_eq(i15, ((vector float){-2.12e+09, 8.18e+08, 9.47e+08, -1.25e+09})), "i15");
+ check(i16 == -947264420, "i16");
+}
+
+static void test()
+{
+ varargsn003(((vector float){1.14e+09, 4.29e+08, -1.58e+09, 1.66e+09}),
+ ((vector float){-1.83e+09, -6.79e+08, 1.58e+09, -3.38e+08}),
+ ((vector float){-1.19e+09, -4.27e+08, 6.84e+08, 1.21e+08}),
+ ((vector float){1.47e+09, 9.17e+08, 3.45e+08, -1.17e+08}),
+ ((vector float){3.08e+08, 1.2e+08, 1.73e+09, 1.77e+09}),
+ ((vector float){1.89e+09, 2.06e+09, 2.64e+08, 1.05e+09}),
+ ((vector float){5.45e+08, 1.37e+09, -8.2e+08, 4.32e+07}),
+ ((vector float){3.47e+08, -1.66e+09, 1.25e+09, 1.53e+09}),
+ ((vector float){-6.04e+08, 1.48e+09, -1.48e+09, 1.92e+09}),
+ ((vector float){-1.66e+09, -8.92e+08, -3.78e+08, 2.11e+09}),
+ ((vector float){-7.46e+08, 4.01e+08, -1.78e+09, 1.83e+09}),
+ ((vector float){1.83e+09, 5.73e+08, -2.96e+08, -7.46e+08}),
+ ((vector float){-2.01e+09, 9.89e+08, -1.92e+09, 2.09e+09}),
+ ((vector float){1.95e+09, -2.41e+08, 2.67e+08, 1.67e+09}), ((vector float){-2.12e+09, 8.18e+08, 9.47e+08, -1.25e+09}), -947264420);
+}
diff --git a/gcc/testsuite/gcc.dg/vmx/varargs-3.c b/gcc/testsuite/gcc.dg/vmx/varargs-3.c
new file mode 100644
index 0000000000..be8b71bf94
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/varargs-3.c
@@ -0,0 +1,75 @@
+#include "harness.h"
+#include <stdarg.h>
+
+typedef struct n_a
+{
+ signed char m1;
+ short m2;
+ int m3;
+ double m4;
+ vector float m5;
+}
+n_a;
+
+void
+varlistn_a(signed char p1, va_list ap)
+{
+ n_a q;
+ q.m1 = p1;
+ q.m2 = va_arg(ap, int);
+ q.m3 = va_arg(ap, int);
+ q.m4 = va_arg(ap, double);
+ q.m5 = va_arg(ap, vector float);
+
+ check(q.m1 == 77, "q.m1");
+ check(q.m2 == 1924, "q.m2");
+ check(q.m3 == -1471601920, "q.m3");
+ check(q.m4 == 3.65e+18, "q.m4");
+ check(vec_all_eq(q.m5, ((vector float){-1.38e+09, 5.96e+08, 6.88e+08, -3.2e+08})), "q.m5");
+}
+
+void
+varargsn_a(signed char p1, ...)
+{
+ n_a r, s;
+ va_list ap;
+
+ va_start(ap, p1);
+ r.m1 = p1;
+ r.m2 = va_arg(ap, int);
+ r.m3 = va_arg(ap, int);
+ r.m4 = va_arg(ap, double);
+ r.m5 = va_arg(ap, vector float);
+ va_end(ap);
+
+ check(r.m1 == 77, "r.m1");
+ check(r.m2 == 1924, "r.m2");
+ check(r.m3 == -1471601920, "r.m3");
+ check(r.m4 == 3.65e+18, "r.m4");
+ check(vec_all_eq(r.m5, ((vector float){-1.38e+09, 5.96e+08, 6.88e+08, -3.2e+08})), "r.m5");
+
+ va_start(ap, p1);
+ s.m1 = p1;
+ s.m2 = va_arg(ap, int);
+ s.m3 = va_arg(ap, int);
+ s.m4 = va_arg(ap, double);
+ s.m5 = va_arg(ap, vector float);
+ va_end(ap);
+
+ check(s.m1 == 77, "s.m1");
+ check(s.m2 == 1924, "s.m2");
+ check(s.m3 == -1471601920, "s.m3");
+ check(s.m4 == 3.65e+18, "s.m4");
+ check(vec_all_eq(s.m5, ((vector float){-1.38e+09, 5.96e+08, 6.88e+08, -3.2e+08})), "s.m5");
+
+ va_start(ap, p1);
+ varlistn_a(p1, ap);
+ va_end(ap);
+}
+
+
+
+void test()
+{
+ varargsn_a(77, 1924, -1471601920, 3.65e+18, ((vector float){-1.38e+09, 5.96e+08, 6.88e+08, -3.2e+08}));
+}
diff --git a/gcc/testsuite/gcc.dg/vmx/varargs-4.c b/gcc/testsuite/gcc.dg/vmx/varargs-4.c
new file mode 100644
index 0000000000..a175519532
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/varargs-4.c
@@ -0,0 +1,291 @@
+#include "harness.h"
+#include <stdarg.h>
+#include <stddef.h>
+#include <string.h>
+
+typedef struct n_a
+{
+ signed char m1;
+ short m2;
+ int m3;
+ double m4;
+ vector float m5;
+}
+n_a;
+
+static n_a gn_a;
+
+static int
+lay(char *p, int start, int end, int n)
+{
+ int b;
+ unsigned char ch;
+ unsigned int mask;
+
+ start *= 8;
+ end *= 8;
+ n *= 8;
+
+ for (b = 0; b + 8 <= start; b += 8)
+ {
+ ch = *p++;
+ if (ch != 0xff)
+ for (mask = 0x80; mask; b++, mask >>= 1)
+ if ((ch & mask) != mask)
+ return b;
+ }
+
+ if (b < start)
+ {
+ ch = *p++;
+ for (mask = 0x80; b < start; b++, mask >>= 1)
+ if ((ch & mask) != mask)
+ return b;
+ for (; mask && b < end; b++, mask >>= 1)
+ if ((ch & mask) != 0)
+ return b;
+ }
+
+ for (; b + 8 <= end; b += 8)
+ {
+ ch = *p++;
+ if (ch != 0)
+ for (mask = 0x80; mask; b++, mask >>= 1)
+ if ((ch & mask) != 0)
+ return b;
+ }
+
+ if (b < end)
+ {
+ ch = *p++;
+ for (mask = 0x80; b < end; b++, mask >>= 1)
+ if ((ch & mask) != 0)
+ return b;
+ for (; mask && b < n; b++, mask >>= 1)
+ if ((ch & mask) != mask)
+ return b;
+ }
+
+ for (; b + 8 <= n; b += 8)
+ {
+ ch = *p++;
+ if (ch != 0xff)
+ for (mask = 0x80; mask; b++, mask >>= 1)
+ if ((ch & mask) != mask)
+ return b;
+ }
+
+ return n;
+}
+
+static void
+initn_a(signed char p1, short p2, int p3, double p4, vector float p5)
+{
+ n_a i;
+
+ i.m1 = p1;
+ i.m2 = p2;
+ i.m3 = p3;
+ i.m4 = p4;
+ i.m5 = p5;
+
+ check(i.m1 == 77, "i.m1");
+ check(i.m2 == 1924, "i.m2");
+ check(i.m3 == -1471601920, "i.m3");
+ check(vec_all_eq(i.m5, ((vector float){-1.38e+09, 5.96e+08, 6.88e+08, -3.2e+08})),
+ "i.m5");
+
+ check(sizeof(n_a) == 32, "sizeof(n_a)");
+
+ check(offsetof(n_a, m1) == 0, "offsetof(m1)");
+ check(offsetof(n_a, m2) == 2, "offsetof(m2)");
+ check(offsetof(n_a, m3) == 4, "offsetof(m3)");
+ check(offsetof(n_a, m4) == 8, "offsetof(m4)");
+ check(offsetof(n_a, m5) == 16, "offsetof(m5)");
+
+ check(sizeof(i.m1) == 1, "sizeof(m1)");
+ check(sizeof(i.m2) == 2, "sizeof(m2)");
+ check(sizeof(i.m3) == 4, "sizeof(m3)");
+ check(sizeof(i.m4) == 8, "sizeof(m4)");
+ check(sizeof(i.m5) == 16, "sizeof(m5)");
+
+#define lay_check(field) do { \
+ memset((char *)&i, 0xFF, sizeof(i)); \
+ lay_reset(field); \
+ check(lay((char *)&i, \
+ offsetof(n_a, field), \
+ offsetof(n_a, field) + sizeof(i.field), \
+ sizeof(i)) == sizeof(i)*8, \
+ "lay(" #field ")"); \
+ } while (0)
+#define lay_reset(field) i.field = 0
+
+ lay_check(m1);
+ lay_check(m2);
+ lay_check(m3);
+ lay_check(m4);
+#undef lay_reset
+#define lay_reset(field) i.field = ((vector float){0,0,0,0})
+ lay_check(m5);
+
+#undef lay_check
+#undef lay_reset
+}
+
+n_a
+valuen_a(void)
+{
+ return gn_a;
+}
+
+n_a *
+addrn_a(void)
+{
+ return &gn_a;
+}
+
+static void
+eqn_a(n_a * a)
+{
+ check(a->m1 == 77, "a->m1");
+ check(a->m2 == 1924, "a->m2");
+ check(a->m3 == -1471601920, "a->m3");
+ check(vec_all_eq(a->m5, ((vector float){-1.38e+09, 5.96e+08, 6.88e+08, -3.2e+08})),
+ "a->m5");
+}
+
+static void
+getsn_a(n_a * a)
+{
+ a->m1 = 77;
+ a->m2 = 1924;
+ a->m3 = -1471601920;
+ a->m4 = 3.65e+18;
+ a->m5 = ((vector float){-1.38e+09, 5.96e+08, 6.88e+08, -3.2e+08});
+}
+
+static void
+varlistn_a(signed char p1, va_list ap)
+{
+ n_a q;
+ q.m1 = p1;
+ q.m2 = va_arg(ap, int);
+ q.m3 = va_arg(ap, int);
+ q.m4 = va_arg(ap, double);
+ q.m5 = va_arg(ap, vector float);
+
+ check(q.m1 == 77, "q.m1");
+ check(q.m2 == 1924, "q.m2");
+ check(q.m3 == -1471601920, "q.m3");
+ check(vec_all_eq(q.m5, ((vector float){-1.38e+09, 5.96e+08, 6.88e+08, -3.2e+08})),
+ "q.m5");
+}
+
+static void
+varargsn_a(signed char p1, ...)
+{
+ n_a q, r;
+ va_list ap;
+
+ va_start(ap, p1);
+ q.m1 = p1;
+ q.m2 = va_arg(ap, int);
+ q.m3 = va_arg(ap, int);
+ q.m4 = va_arg(ap, double);
+ q.m5 = va_arg(ap, vector float);
+ va_end(ap);
+
+ check(q.m1 == 77, "q.m1");
+ check(q.m2 == 1924, "q.m2");
+ check(q.m3 == -1471601920, "q.m3");
+ check(vec_all_eq(q.m5, ((vector float){-1.38e+09, 5.96e+08, 6.88e+08, -3.2e+08})),
+ "q.m5");
+
+ va_start(ap, p1);
+ r.m1 = p1;
+ r.m2 = va_arg(ap, int);
+ r.m3 = va_arg(ap, int);
+ r.m4 = va_arg(ap, double);
+ r.m5 = va_arg(ap, vector float);
+ va_end(ap);
+
+ check(r.m1 == 77, "r.m1");
+ check(r.m2 == 1924, "r.m2");
+ check(r.m3 == -1471601920, "r.m3");
+ check(vec_all_eq(r.m5, ((vector float){-1.38e+09, 5.96e+08, 6.88e+08, -3.2e+08})),
+ "r.m5");
+
+ va_start(ap, p1);
+ varlistn_a(p1, ap);
+ va_end(ap);
+}
+
+static void
+test()
+{
+ static struct
+ {
+ char a;
+ n_a b;
+ }
+ s;
+ n_a v[3], a, *p;
+
+ static n_a i = { 77, 1924, -1471601920, 3.65e+18, {-1.38e+09, 5.96e+08, 6.88e+08, -3.2e+08} };
+
+ memset((char *)&(v), -1, sizeof(v));
+ v[1] = s.b;
+ check(lay((char *)&v, sizeof(n_a), sizeof(n_a)*2, sizeof(n_a)*3) == sizeof(n_a)*3*8,
+ "structure assignment");
+
+ check(i.m1 == 77, "i.m1");
+ check(i.m2 == 1924, "i.m2");
+ check(i.m3 == -1471601920, "i.m3");
+ check(vec_all_eq(i.m5, ((vector float){-1.38e+09, 5.96e+08, 6.88e+08, -3.2e+08})),
+ "i.m5");
+
+ initn_a(77, 1924, -1471601920, 3.65e+18, ((vector float){-1.38e+09, 5.96e+08, 6.88e+08, -3.2e+08}));
+ varargsn_a(77, 1924, -1471601920, 3.65e+18, ((vector float){-1.38e+09, 5.96e+08, 6.88e+08, -3.2e+08}));
+
+ gn_a.m1 = 77;
+ gn_a.m2 = 1924;
+ gn_a.m3 = -1471601920;
+ gn_a.m4 = 3.65e+18;
+ gn_a.m5 = ((vector float){-1.38e+09, 5.96e+08, 6.88e+08, -3.2e+08});
+ a = valuen_a();
+
+ check(a.m1 == 77, "a.m1");
+ check(a.m2 == 1924, "a.m2");
+ check(a.m3 == -1471601920, "a.m3");
+ check(vec_all_eq(a.m5, ((vector float){-1.38e+09, 5.96e+08, 6.88e+08, -3.2e+08})),
+ "a.m5");
+
+ p = addrn_a();
+
+ check(p->m1 == 77, "p->m1");
+ check(p->m2 == 1924, "p->m2");
+ check(p->m3 == -1471601920, "p->m3");
+ check(vec_all_eq(p->m5, ((vector float){-1.38e+09, 5.96e+08, 6.88e+08, -3.2e+08})),
+ "p->m5");
+
+ eqn_a(&a);
+
+ check(gn_a.m1 == 77, "gn_a.m1");
+ check(gn_a.m2 == 1924, "gn_a.m2");
+ check(gn_a.m3 == -1471601920, "gn_a.m3");
+ check(vec_all_eq(gn_a.m5, ((vector float){-1.38e+09, 5.96e+08, 6.88e+08, -3.2e+08})),
+ "gn_a.m5");
+
+ getsn_a(&v[0]);
+ v[2].m1 = v[0].m1;
+ v[2].m2 = v[0].m2;
+ v[2].m3 = v[0].m3;
+ v[2].m4 = v[0].m4;
+ v[2].m5 = v[0].m5;
+
+ check(v[2].m1 == 77, "v[2].m1");
+ check(v[2].m2 == 1924, "v[2].m2");
+ check(v[2].m3 == -1471601920, "v[2].m3");
+ check(vec_all_eq(v[2].m5, ((vector float){-1.38e+09, 5.96e+08, 6.88e+08, -3.2e+08})),
+ "v[2].m5");
+}
diff --git a/gcc/testsuite/gcc.dg/vmx/varargs-5.c b/gcc/testsuite/gcc.dg/vmx/varargs-5.c
new file mode 100644
index 0000000000..ed551c2dfa
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/varargs-5.c
@@ -0,0 +1,71 @@
+#include "harness.h"
+#include <stdarg.h>
+
+typedef struct n025
+{
+ int m1;
+ double m2;
+ int m3;
+ vector signed int m4;
+}
+n025;
+
+static void
+varlistn025(int p1, double p2, va_list ap)
+{
+ n025 q;
+ q.m1 = p1;
+ q.m2 = p2;
+ q.m3 = va_arg(ap, int);
+ q.m4 = va_arg(ap, vector signed int);
+
+ check(q.m1 == 1363477585, "q.m1");
+ check(q.m2 == -8.72e+18, "q.m2");
+ check(q.m3 == 198652649, "q.m3");
+ check(vec_all_eq(q.m4, ((vector signed int){323001541, -1353029458, 1756879633, -327031280})),
+ "q.m5");
+}
+
+
+void
+varargsn025(int p1, double p2, ...)
+{
+ n025 r, s;
+ va_list ap;
+
+ va_start(ap, p2);
+ r.m1 = p1;
+ r.m2 = p2;
+ r.m3 = va_arg(ap, int);
+ r.m4 = va_arg(ap, vector signed int);
+ va_end(ap);
+
+ check(r.m1 == 1363477585, "r.m1");
+ check(r.m2 == -8.72e+18, "r.m2");
+ check(r.m3 == 198652649, "r.m3");
+ check(vec_all_eq(r.m4, ((vector signed int){323001541, -1353029458, 1756879633, -327031280})),
+ "r.m5");
+
+ va_start(ap, p2);
+ s.m1 = p1;
+ s.m2 = p2;
+ s.m3 = va_arg(ap, int);
+ s.m4 = va_arg(ap, vector signed int);
+ va_end(ap);
+
+ check(s.m1 == 1363477585, "s.m1");
+ check(s.m2 == -8.72e+18, "s.m2");
+ check(s.m3 == 198652649, "s.m3");
+ check(vec_all_eq(s.m4, ((vector signed int){323001541, -1353029458, 1756879633, -327031280})),
+ "s.m5");
+
+ va_start(ap, p2);
+ varlistn025(p1, p2, ap);
+ va_end(ap);
+}
+
+static void test()
+{
+ varargsn025(1363477585, -8.72e+18, 198652649,
+ ((vector signed int){323001541, -1353029458, 1756879633, -327031280}));
+}
diff --git a/gcc/testsuite/gcc.dg/vmx/varargs-6.c b/gcc/testsuite/gcc.dg/vmx/varargs-6.c
new file mode 100644
index 0000000000..b1f436fd04
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/varargs-6.c
@@ -0,0 +1,35 @@
+#include "harness.h"
+#include <stdarg.h>
+
+typedef struct n025
+{
+ int m1;
+ double m2;
+ int m3;
+ vector signed int m4;
+}
+n025;
+
+static void
+varargsn025(int p1, double p2, ...)
+{
+ n025 q;
+ va_list ap;
+ va_start(ap, p2);
+ q.m1 = p1;
+ q.m2 = p2;
+ q.m3 = va_arg(ap, int);
+ q.m4 = va_arg(ap, vector signed int);
+ va_end(ap);
+
+ check(q.m1 == 1363477585, "q.m1");
+ check(q.m2 == -8.72e+18, "q.m2");
+ check(q.m3 == 198652649, "q.m3");
+ check(vec_all_eq(q.m4, ((vector signed int){323001541, -1353029458, 1756879633, -327031280})),
+ "q.m4");
+}
+
+static void test()
+{
+ varargsn025(1363477585, -8.72e+18, 198652649, ((vector signed int){323001541, -1353029458, 1756879633, -327031280}));
+}
diff --git a/gcc/testsuite/gcc.dg/vmx/varargs-7.c b/gcc/testsuite/gcc.dg/vmx/varargs-7.c
new file mode 100644
index 0000000000..2a09d0ca08
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/varargs-7.c
@@ -0,0 +1,83 @@
+#include "harness.h"
+#include <stdarg.h>
+
+static void
+varargsn001(vector unsigned int p1, vector unsigned int p2,
+ vector unsigned int p3, vector unsigned int p4,
+ vector unsigned int p5, vector unsigned int p6,
+ vector unsigned int p7, vector unsigned int p8,
+ vector unsigned int p9, vector unsigned int p10,
+ vector unsigned int p11, vector unsigned int p12,
+ vector unsigned int p13, ...)
+{
+ va_list ap;
+ vector unsigned int i1;
+ vector unsigned int i2;
+ vector unsigned int i3;
+ vector unsigned int i4;
+ vector unsigned int i5;
+ vector unsigned int i6;
+ vector unsigned int i7;
+ vector unsigned int i8;
+ vector unsigned int i9;
+ vector unsigned int i10;
+ vector unsigned int i11;
+ vector unsigned int i12;
+ vector unsigned int i13;
+ vector unsigned int i14;
+ int i15;
+
+ va_start(ap, p13);
+
+ i1 = p1;
+ i2 = p2;
+ i3 = p3;
+ i4 = p4;
+ i5 = p5;
+ i6 = p6;
+ i7 = p7;
+ i8 = p8;
+ i9 = p9;
+ i10 = p10;
+ i11 = p11;
+ i12 = p12;
+ i13 = p13;
+ i14 = va_arg(ap, vector unsigned int);
+ i15 = va_arg(ap, int);
+ va_end(ap);
+
+ check(vec_all_eq(i1, ((vector unsigned int){1,1,1,1})), "i1");
+ check(vec_all_eq(i2, ((vector unsigned int){2,2,2,2})), "i2");
+ check(vec_all_eq(i3, ((vector unsigned int){3,3,3,3})), "i3");
+ check(vec_all_eq(i4, ((vector unsigned int){4,4,4,4})), "i4");
+ check(vec_all_eq(i5, ((vector unsigned int){5,5,5,5})), "i5");
+ check(vec_all_eq(i6, ((vector unsigned int){6,6,6,6})), "i6");
+ check(vec_all_eq(i7, ((vector unsigned int){7,7,7,7})), "i7");
+ check(vec_all_eq(i8, ((vector unsigned int){8,8,8,8})), "i8");
+ check(vec_all_eq(i9, ((vector unsigned int){9,9,9,9})), "i9");
+ check(vec_all_eq(i10, ((vector unsigned int){10,10,10,10})), "i10");
+ check(vec_all_eq(i11, ((vector unsigned int){11,11,11,11})), "i11");
+ check(vec_all_eq(i12, ((vector unsigned int){12,12,12,12})), "i12");
+ check(vec_all_eq(i13, ((vector unsigned int){13,13,13,13})), "i13");
+ check(vec_all_eq(i14, ((vector unsigned int){14,14,14,14})), "i14");
+ check(i15 == 15, "i15");
+}
+
+static void test()
+{
+ varargsn001(((vector unsigned int){1,1,1,1}),
+ ((vector unsigned int){2,2,2,2}),
+ ((vector unsigned int){3,3,3,3}),
+ ((vector unsigned int){4,4,4,4}),
+ ((vector unsigned int){5,5,5,5}),
+ ((vector unsigned int){6,6,6,6}),
+ ((vector unsigned int){7,7,7,7}),
+ ((vector unsigned int){8,8,8,8}),
+ ((vector unsigned int){9,9,9,9}),
+ ((vector unsigned int){10,10,10,10}),
+ ((vector unsigned int){11,11,11,11}),
+ ((vector unsigned int){12,12,12,12}),
+ ((vector unsigned int){13,13,13,13}),
+ ((vector unsigned int){14,14,14,14}),
+ 15);
+}
diff --git a/gcc/testsuite/gcc.dg/vmx/vmx.exp b/gcc/testsuite/gcc.dg/vmx/vmx.exp
new file mode 100644
index 0000000000..85c88d8a39
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/vmx.exp
@@ -0,0 +1,57 @@
+# Copyright (C) 2004, 2006, 2007 Free Software Foundation, Inc.
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>.
+
+# GCC testsuite that uses the `dg.exp' driver.
+
+# Load support procs.
+load_lib gcc-dg.exp
+
+# Skip these tests for non-PowerPC targets and for targets where AltiVec
+# is not supported.
+if {![istarget powerpc*-*-*]
+ || ![check_effective_target_powerpc_altivec_ok] } {
+ return
+}
+
+# If a testcase doesn't have special options, use these.
+# -pedantic-errors is inappropriate here, as this subdirectory tests
+# nothing but extensions.
+global DEFAULT_VMXCFLAGS
+if ![info exists DEFAULT_VMXCFLAGS] then {
+ set DEFAULT_VMXCFLAGS "-maltivec -mabi=altivec -std=gnu99 -mno-vsx"
+}
+
+# If the target system supports AltiVec instructions, the default action
+# for a test is 'run', otherwise it's 'compile'.
+global dg-do-what-default
+set save-dg-do-what-default ${dg-do-what-default}
+if { [check_vmx_hw_available ] } {
+ set dg-do-what-default run
+} else {
+ set dg-do-what-default compile
+}
+
+# Initialize `dg'.
+dg-init
+
+# Main loop.
+gcc-dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/*.c]] \
+ $DEFAULT_VMXCFLAGS
+
+# All done.
+dg-finish
+
+set dg-do-what-default ${save-dg-do-what-default}
diff --git a/gcc/testsuite/gcc.dg/vmx/x-01.c b/gcc/testsuite/gcc.dg/vmx/x-01.c
new file mode 100644
index 0000000000..324e83e35a
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/x-01.c
@@ -0,0 +1,25 @@
+#include <altivec.h>
+vector bool char
+g(vector unsigned char, vector bool char);
+
+vector bool char
+f(vector bool char b, vector unsigned char d)
+{
+ vector bool char *p = &b;
+ *p = g(d,b);
+ return *p;
+}
+
+vector bool char b8;
+vector unsigned char u8;
+vector bool char
+g(vector unsigned char a, vector bool char b)
+{
+ return b8;
+}
+
+int main()
+{
+ f(b8, u8);
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.dg/vmx/x-02.c b/gcc/testsuite/gcc.dg/vmx/x-02.c
new file mode 100644
index 0000000000..4ddcc0c00d
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/x-02.c
@@ -0,0 +1,34 @@
+#include <altivec.h>
+
+static vector bool char
+g(vector unsigned char, vector bool char);
+
+static int q(void);
+
+static vector bool char
+f(vector bool char b, vector unsigned char d)
+{
+ vector bool char *p = &b;
+ *p = g(d,b);
+ return q() ? *p : b;
+}
+
+static vector bool char b8;
+static vector unsigned char u8;
+
+static vector bool char
+g(vector unsigned char a, vector bool char b)
+{
+ return b8;
+}
+
+static int q ()
+{
+ return 1;
+}
+
+int main()
+{
+ f(b8, u8);
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.dg/vmx/x-03.c b/gcc/testsuite/gcc.dg/vmx/x-03.c
new file mode 100644
index 0000000000..0972ac9a3d
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/x-03.c
@@ -0,0 +1,124 @@
+#include <altivec.h>
+extern vector unsigned char u8(void);
+extern vector signed char s8(void);
+extern vector bool char b8(void);
+extern vector unsigned short u16(void);
+extern vector signed short s16(void);
+extern vector bool short b16(void);
+extern vector unsigned int u32(void);
+extern vector signed int s32(void);
+extern vector bool int b32(void);
+extern vector float f32(void);
+extern vector pixel p16(void);
+
+extern void g(vector unsigned char, ...);
+
+void
+f(vector unsigned char u8p, vector signed char s8p, vector bool char b8p,
+ vector unsigned short u16p, vector signed short s16p,
+ vector bool short b16p, vector unsigned int u32p,
+ vector signed int s32p, vector bool int b32p,
+ vector float f32p, vector pixel p16p)
+{
+ vector unsigned char u8l = u8();
+ vector signed char s8l = s8();
+ vector bool char b8l = b8();
+ vector unsigned short u16l = u16();
+ vector signed short s16l = s16();
+ vector bool short b16l = b16();
+ vector unsigned int u32l = u32();
+ vector signed int s32l = s32();
+ vector bool int b32l = b32();
+ vector float f32l = f32();
+ vector pixel p16l = p16();
+
+ g(u8l, s8l, b8l, u16l, s16l, b16l, u32l, s32l, b32l, f32l, p16l);
+}
+
+vector unsigned char
+u8(void)
+{
+ static vector unsigned char zero;
+ return zero;
+}
+
+vector signed char
+s8(void)
+{
+ static vector signed char zero;
+ return zero;
+}
+
+vector bool char
+b8(void)
+{
+ static vector bool char zero;
+ return zero;
+}
+
+vector unsigned short
+u16(void)
+{
+ static vector unsigned short zero;
+ return zero;
+}
+
+vector signed short
+s16(void)
+{
+ static vector signed short zero;
+ return zero;
+}
+
+vector bool short
+b16(void)
+{
+ static vector bool short zero;
+ return zero;
+}
+
+vector unsigned int
+u32(void)
+{
+ static vector unsigned int zero;
+ return zero;
+}
+
+vector signed int
+s32(void)
+{
+ static vector signed int zero;
+ return zero;
+}
+
+vector bool int
+b32(void)
+{
+ static vector bool int zero;
+ return zero;
+}
+
+vector float
+f32(void)
+{
+ static vector float zero;
+ return zero;
+}
+
+vector pixel
+p16(void)
+{
+ static vector pixel zero;
+ return zero;
+}
+
+void
+g(vector unsigned char a, ...)
+{
+}
+
+int main()
+{
+ f(u8(), s8(), b8(), u16(), s16(), b16(), u32(), s32(), b32(), f32(), p16());
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.dg/vmx/x-04.c b/gcc/testsuite/gcc.dg/vmx/x-04.c
new file mode 100644
index 0000000000..44694c8a6d
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/x-04.c
@@ -0,0 +1,80 @@
+#include <altivec.h>
+vector unsigned char
+permute_128(vector unsigned char input)
+{
+ vector unsigned char result, new_bit;
+
+ vector unsigned char select2 = ((vector unsigned char){2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2});
+ vector unsigned char select3 = ((vector unsigned char){4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4});
+ vector unsigned char select4 = ((vector unsigned char){8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8});
+ vector unsigned char select5 = ((vector unsigned char){16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16});
+ vector unsigned char select6 = ((vector unsigned char){32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32});
+ vector unsigned char select7 = ((vector unsigned char){64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64});
+ vector unsigned char select8 = ((vector unsigned char){128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128});
+
+ vector unsigned char control1
+ = ((vector unsigned char){15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0});
+ vector unsigned char control2
+ = ((vector unsigned char){15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0});
+ vector unsigned char control3
+ = ((vector unsigned char){15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0});
+ vector unsigned char control4
+ = ((vector unsigned char){15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0});
+ vector unsigned char control5
+ = ((vector unsigned char){15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0});
+ vector unsigned char control6
+ = ((vector unsigned char){15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0});
+ vector unsigned char control7
+ = ((vector unsigned char){15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0});
+ vector unsigned char control8
+ = ((vector unsigned char){15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0});
+ vector unsigned char rotate1 = ((vector unsigned char){1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1});
+ vector unsigned char rotate2 = ((vector unsigned char){3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3});
+ vector unsigned char rotate3 = ((vector unsigned char){5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5});
+ vector unsigned char rotate4 = ((vector unsigned char){7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7});
+ vector unsigned char rotate5 = ((vector unsigned char){1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1});
+ vector unsigned char rotate6 = ((vector unsigned char){3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3});
+ vector unsigned char rotate7 = ((vector unsigned char){5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5});
+ vector unsigned char rotate8 = ((vector unsigned char){7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7});
+
+ result = vec_vperm(input, input, control1);
+ result = vec_rl(result, rotate1);
+
+ new_bit = vec_vperm(input, input, control2);
+ new_bit = vec_rl(new_bit, rotate2);
+ result = vec_sel(result, new_bit, select2);
+
+ new_bit = vec_vperm(input, input, control3);
+ new_bit = vec_rl(new_bit, rotate3);
+ result = vec_sel(result, new_bit, select3);
+
+ new_bit = vec_vperm(input, input, control4);
+ new_bit = vec_rl(new_bit, rotate4);
+ result = vec_sel(result, new_bit, select4);
+
+ new_bit = vec_vperm(input, input, control5);
+ new_bit = vec_rl(new_bit, rotate5);
+ result = vec_sel(result, new_bit, select5);
+
+ new_bit = vec_vperm(input, input, control6);
+ new_bit = vec_rl(new_bit, rotate6);
+ result = vec_sel(result, new_bit, select6);
+
+ new_bit = vec_vperm(input, input, control7);
+ new_bit = vec_rl(new_bit, rotate7);
+ result = vec_sel(result, new_bit, select7);
+
+ new_bit = vec_vperm(input, input, control8);
+ new_bit = vec_rl(new_bit, rotate8);
+ result = vec_sel(result, new_bit, select8);
+
+ return result;
+}
+
+int main()
+{
+ vector unsigned char input
+ = ((vector unsigned char){0,1,2,4,8,16,32,64,128,0,1,2,4,8,16,32});
+ vector unsigned char result = permute_128(input);
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.dg/vmx/x-05.c b/gcc/testsuite/gcc.dg/vmx/x-05.c
new file mode 100644
index 0000000000..80c13dcf04
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/x-05.c
@@ -0,0 +1,82 @@
+#include <altivec.h>
+
+static vector unsigned char select2 = {2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2};
+static vector unsigned char select3 = {4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4};
+static vector unsigned char select4 = {8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8};
+static vector unsigned char select5 = {16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16};
+static vector unsigned char select6 = {32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32};
+static vector unsigned char select7 = {64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64};
+static vector unsigned char select8 = {128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128};
+
+static vector unsigned char control1
+ = {15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0};
+static vector unsigned char control2
+ = {15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0};
+static vector unsigned char control3
+ = {15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0};
+static vector unsigned char control4
+ = {15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0};
+static vector unsigned char control5
+ = {15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0};
+static vector unsigned char control6
+ = {15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0};
+static vector unsigned char control7
+ = {15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0};
+static vector unsigned char control8
+ = {15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0};
+static vector unsigned char rotate1 = {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1};
+static vector unsigned char rotate2 = {3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3};
+static vector unsigned char rotate3 = {5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5};
+static vector unsigned char rotate4 = {7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7};
+static vector unsigned char rotate5 = {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1};
+static vector unsigned char rotate6 = {3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3};
+static vector unsigned char rotate7 = {5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5};
+static vector unsigned char rotate8 = {7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7};
+
+static vector unsigned char permute_128(vector unsigned char input)
+{
+ vector unsigned char result, new_bit;
+
+ /* and now the code */
+ result = vec_vperm(input, input, control1);
+ result = vec_rl(result, rotate1);
+
+ new_bit = vec_vperm(input, input, control2);
+ new_bit = vec_rl(new_bit, rotate2);
+ result = vec_sel(result, new_bit, select2);
+
+ new_bit = vec_vperm(input, input, control3);
+ new_bit = vec_rl(new_bit, rotate3);
+ result = vec_sel(result, new_bit, select3);
+
+ new_bit = vec_vperm(input, input, control4);
+ new_bit = vec_rl(new_bit, rotate4);
+ result = vec_sel(result, new_bit, select4);
+
+ new_bit = vec_vperm(input, input, control5);
+ new_bit = vec_rl(new_bit, rotate5);
+ result = vec_sel(result, new_bit, select5);
+
+ new_bit = vec_vperm(input, input, control6);
+ new_bit = vec_rl(new_bit, rotate6);
+ result = vec_sel(result, new_bit, select6);
+
+ new_bit = vec_vperm(input, input, control7);
+ new_bit = vec_rl(new_bit, rotate7);
+ result = vec_sel(result, new_bit, select7);
+
+ new_bit = vec_vperm(input, input, control8);
+ new_bit = vec_rl(new_bit, rotate8);
+ result = vec_sel(result, new_bit, select8);
+
+ return result;
+}
+
+int main()
+{
+ vector unsigned char input
+ = {0,1,2,4,8,16,32,64,128,0,1,2,4,8,16,32};
+ vector unsigned char result = permute_128(input);
+ return 0;
+}
+
diff --git a/gcc/testsuite/gcc.dg/vmx/yousufi-1.c b/gcc/testsuite/gcc.dg/vmx/yousufi-1.c
new file mode 100644
index 0000000000..eed4be3d1f
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/yousufi-1.c
@@ -0,0 +1,15 @@
+#include "harness.h"
+
+/* Tests the vec_ctu function, which converts a vector of floats to a vector
+ of unsigned ints. In powerpc-eabisim-run ver. moto-1.0, vec_ctu produces
+ strange output for input values of less than ~.0039. -Umair */
+
+static void test()
+{
+ vector float input = ((vector float){0.003,0.003,0.003,0.003});
+ vector unsigned int output;
+ vector unsigned int expect = ((vector unsigned int){0,0,0,0});
+
+ output = vec_ctu(input, 1);
+ check(vec_all_eq(output, expect), "vec_ctu");
+}
diff --git a/gcc/testsuite/gcc.dg/vmx/zero-1.c b/gcc/testsuite/gcc.dg/vmx/zero-1.c
new file mode 100644
index 0000000000..637351cc5d
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/zero-1.c
@@ -0,0 +1,13 @@
+#include "harness.h"
+
+static vector unsigned int funny()
+{
+ vector unsigned int a;
+ return vec_andc(vec_add(a,a),vec_add(a,a));
+}
+
+static void test()
+{
+ static vector unsigned int zero;
+ check(vec_all_eq(funny(), zero), "funny");
+}
diff --git a/gcc/testsuite/gcc.dg/vmx/zero.c b/gcc/testsuite/gcc.dg/vmx/zero.c
new file mode 100644
index 0000000000..9a337e6e85
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vmx/zero.c
@@ -0,0 +1,100 @@
+#include "harness.h"
+
+vector signed short zs16() { return ((vector signed short){0,0,0,0,0,0,0,0}); }
+vector signed short s16ss() { vector signed short a; return vec_subs(a,a); }
+vector signed short s16s() { vector signed short a; return vec_sub(a,a); }
+vector signed short s16x() { vector signed short a; return vec_xor(a,a); }
+vector signed short s16a() { vector signed short a; return vec_andc(a,a); }
+
+vector unsigned short zu16() { return ((vector unsigned short){0,0,0,0,0,0,0,0}); }
+vector unsigned short u16ss() { vector unsigned short a; return vec_subs(a,a); }
+vector unsigned short u16s() { vector unsigned short a; return vec_sub(a,a); }
+vector unsigned short u16x() { vector unsigned short a; return vec_xor(a,a); }
+vector unsigned short u16a() { vector unsigned short a; return vec_andc(a,a); }
+
+vector signed int zs32() { return ((vector signed int){0,0,0,0}); }
+vector signed int s32ss() { vector signed int a; return vec_subs(a,a); }
+vector signed int s32s() { vector signed int a; return vec_sub(a,a); }
+vector signed int s32x() { vector signed int a; return vec_xor(a,a); }
+vector signed int s32a() { vector signed int a; return vec_andc(a,a); }
+
+vector unsigned int zu32() { return ((vector unsigned int){0,0,0,0}); }
+vector unsigned int u32ss() { vector unsigned int a; return vec_subs(a,a); }
+vector unsigned int u32s() { vector unsigned int a; return vec_sub(a,a); }
+vector unsigned int u32x() { vector unsigned int a; return vec_xor(a,a); }
+vector unsigned int u32a() { vector unsigned int a; return vec_andc(a,a); }
+
+vector signed char zs8() { return ((vector signed char){0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}); }
+vector signed char s8ss() { vector signed char a; return vec_subs(a,a); }
+vector signed char s8s() { vector signed char a; return vec_sub(a,a); }
+vector signed char s8x() { vector signed char a; return vec_xor(a,a); }
+vector signed char s8a() { vector signed char a; return vec_andc(a,a); }
+
+vector unsigned char zu8() { return ((vector unsigned char){0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}); }
+vector unsigned char u8ss() { vector unsigned char a; return vec_subs(a,a); }
+vector unsigned char u8s() { vector unsigned char a; return vec_sub(a,a); }
+vector unsigned char u8x() { vector unsigned char a; return vec_xor(a,a); }
+vector unsigned char u8a() { vector unsigned char a; return vec_andc(a,a); }
+
+vector pixel zp16() { return ((vector pixel){0,0,0,0,0,0,0,0}); }
+
+vector bool short zb16() { return ((vector bool short){0,0,0,0,0,0,0,0}); }
+
+vector bool short b16x() { vector bool short a; return vec_xor(a,a); }
+vector bool short b16a() { vector bool short a; return vec_andc(a,a); }
+vector bool int zb32() { return ((vector bool int){0,0,0,0}); }
+
+vector bool int b32x() { vector bool int a; return vec_xor(a,a); }
+vector bool int b32a() { vector bool int a; return vec_andc(a,a); }
+vector bool char zb8() { return ((vector bool char){0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}); }
+
+vector bool char b8x() { vector bool char a; return vec_xor(a,a); }
+vector bool char b8a() { vector bool char a; return vec_andc(a,a); }
+
+static void test()
+{
+ static vector unsigned int zerov;
+#define zcheck(val, tag) \
+ check(vec_all_eq((vector unsigned int)(val), zerov), tag)
+
+ zcheck(zs16(), "zs16");
+ zcheck(s16ss(), "s16ss");
+ zcheck(s16s(), "s16s");
+ zcheck(s16x(), "s16x");
+ zcheck(s16a(), "s16a");
+ zcheck(zu16(), "zu16");
+ zcheck(u16ss(), "u16ss");
+ zcheck(u16s(), "u16s");
+ zcheck(u16x(), "u16x");
+ zcheck(u16a(), "u16a");
+ zcheck(zs32(), "zs32");
+ zcheck(s32ss(), "s32ss");
+ zcheck(s32s(), "s32s");
+ zcheck(s32x(), "s32x");
+ zcheck(s32a(), "s32a");
+ zcheck(zu32(), "zu32");
+ zcheck(u32ss(), "u32ss");
+ zcheck(u32s(), "u32s");
+ zcheck(u32x(), "u32x");
+ zcheck(u32a(), "u32a");
+ zcheck(zs8(), "zs8");
+ zcheck(s8ss(), "s8ss");
+ zcheck(s8s(), "s8s");
+ zcheck(s8x(), "s8x");
+ zcheck(s8a(), "s8a");
+ zcheck(zu8(), "zu8");
+ zcheck(u8ss(), "u8ss");
+ zcheck(u8s(), "u8s");
+ zcheck(u8x(), "u8x");
+ zcheck(u8a(), "u8a");
+ zcheck(zp16(), "zp16");
+ zcheck(zb16(), "zb16");
+ zcheck(b16x(), "b16x");
+ zcheck(b16a(), "b16a");
+ zcheck(zb32(), "zb32");
+ zcheck(b32x(), "b32x");
+ zcheck(b32a(), "b32a");
+ zcheck(zb8(), "zb8");
+ zcheck(b8x(), "b8x");
+ zcheck(b8a(), "b8a");
+}