summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRaghuveer Devulapalli <raghuveer.devulapalli@intel.com>2022-01-18 22:08:30 -0800
committerRaghuveer Devulapalli <raghuveer.devulapalli@intel.com>2022-01-20 13:45:02 -0800
commit60e7c19bf9b005365633bf10c4f9dba7c6a75bc2 (patch)
tree82a30a1d5cf1e1bb72d9bb8044e5d95f9c5e4f46
parentcf60656b7551e37d21ccf30fee770a3417e59373 (diff)
downloadnumpy-60e7c19bf9b005365633bf10c4f9dba7c6a75bc2.tar.gz
MAINT: Use new CPU dispatch mechanism
-rw-r--r--numpy/core/setup.py2
-rw-r--r--numpy/core/src/npysort/quicksort.c.src17
-rw-r--r--numpy/core/src/npysort/x86-qsort.dispatch.c.src (renamed from numpy/core/src/npysort/qsort-32bit-avx512.h.src)65
-rw-r--r--numpy/core/src/npysort/x86-qsort.h18
4 files changed, 69 insertions, 33 deletions
diff --git a/numpy/core/setup.py b/numpy/core/setup.py
index c952f9a3b..92297509e 100644
--- a/numpy/core/setup.py
+++ b/numpy/core/setup.py
@@ -946,7 +946,7 @@ def configuration(parent_package='',top_path=None):
join('src', 'multiarray', 'usertypes.c'),
join('src', 'multiarray', 'vdot.c'),
join('src', 'common', 'npy_sort.h.src'),
- join('src', 'npysort', 'qsort-32bit-avx512.h.src'),
+ join('src', 'npysort', 'x86-qsort.dispatch.c.src'),
join('src', 'npysort', 'quicksort.c.src'),
join('src', 'npysort', 'mergesort.c.src'),
join('src', 'npysort', 'timsort.c.src'),
diff --git a/numpy/core/src/npysort/quicksort.c.src b/numpy/core/src/npysort/quicksort.c.src
index f22360169..b4b060720 100644
--- a/numpy/core/src/npysort/quicksort.c.src
+++ b/numpy/core/src/npysort/quicksort.c.src
@@ -52,9 +52,13 @@
#include "npy_sort.h"
#include "npysort_common.h"
#include "npy_cpu_features.h"
-#include "qsort-32bit-avx512.h"
+#include "x86-qsort.h"
#include <stdlib.h>
+#ifndef NPY_DISABLE_OPTIMIZATION
+ #include "x86-qsort.dispatch.h"
+#endif
+
#define NOT_USED NPY_UNUSED(unused)
/*
* pushing largest partition has upper bound of log2(n) space
@@ -91,14 +95,15 @@
NPY_NO_EXPORT int
quicksort_@suff@(void *start, npy_intp num, void *NOT_USED)
{
+
#if @AVX512@
-#if defined HAVE_ATTRIBUTE_TARGET_AVX512_SKX_WITH_INTRINSICS
- if(NPY_CPU_HAVE(AVX512_SKX)) {
- avx512_qsort_@suff@(start, num);
- return 0;
+ void (*dispfunc)(void*, npy_intp) = NULL;
+ NPY_CPU_DISPATCH_CALL_XB(dispfunc = &x86_quicksort_@suff@);
+ if (dispfunc) {
+ (*dispfunc)(start, num);
+ return 0;
}
#endif
-#endif
@type@ vp;
@type@ *pl = start;
diff --git a/numpy/core/src/npysort/qsort-32bit-avx512.h.src b/numpy/core/src/npysort/x86-qsort.dispatch.c.src
index cd0e08b6d..938495c59 100644
--- a/numpy/core/src/npysort/qsort-32bit-avx512.h.src
+++ b/numpy/core/src/npysort/x86-qsort.dispatch.c.src
@@ -1,6 +1,18 @@
-#if defined HAVE_ATTRIBUTE_TARGET_AVX512_SKX_WITH_INTRINSICS
+/*@targets
+ * $maxopt $keep_baseline avx512_skx
+ */
+// policy $keep_baseline is used to avoid skip building avx512_skx
+// when its part of baseline features (--cpu-baseline), since
+// 'baseline' option isn't specified within targets.
+
+#include "x86-qsort.h"
+#define NPY_NO_DEPRECATED_API NPY_API_VERSION
+
+#ifdef NPY_HAVE_AVX512_SKX
#include <immintrin.h>
#include "numpy/npy_math.h"
+#include "npy_sort.h"
+
/*
* Quicksort using AVX-512 for int, uint32 and float. The ideas and code are
@@ -51,7 +63,7 @@
#define VROTL(x, k) /* rotate each uint64_t value in vector */ \
_mm256_or_si256(_mm256_slli_epi64((x),(k)),_mm256_srli_epi64((x),64-(k)))
-static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512_SKX
+static NPY_INLINE NPY_GCC_OPT_3
__m256i vnext(__m256i* s0, __m256i* s1) {
*s1 = _mm256_xor_si256(*s0, *s1); /* modify vectors s1 and s0 */
*s0 = _mm256_xor_si256(_mm256_xor_si256(VROTL(*s0, 24), *s1),
@@ -61,7 +73,7 @@ __m256i vnext(__m256i* s0, __m256i* s1) {
}
/* transform random numbers to the range between 0 and bound - 1 */
-static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512_SKX
+static NPY_INLINE NPY_GCC_OPT_3
__m256i rnd_epu32(__m256i rnd_vec, __m256i bound) {
__m256i even = _mm256_srli_epi64(_mm256_mul_epu32(rnd_vec, bound), 32);
__m256i odd = _mm256_mul_epu32(_mm256_srli_epi64(rnd_vec, 32), bound);
@@ -96,7 +108,7 @@ __m256i rnd_epu32(__m256i rnd_vec, __m256i bound) {
a = _mm256_min_@vsuf1@(a, b); \
b = _mm256_max_@vsuf1@(temp, b);} \
-static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512_SKX
+static NPY_INLINE NPY_GCC_OPT_3
@zmm_t@ cmp_merge_@vsuf1@(@zmm_t@ in1, @zmm_t@ in2, __mmask16 mask)
{
@zmm_t@ min = _mm512_min_@vsuf1@(in2, in1);
@@ -108,7 +120,7 @@ static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512_SKX
* Assumes zmm is random and performs a full sorting network defined in
* https://en.wikipedia.org/wiki/Bitonic_sorter#/media/File:BitonicSort.svg
*/
-static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512_SKX
+static NPY_INLINE NPY_GCC_OPT_3
@zmm_t@ sort_zmm_@vsuf1@(@zmm_t@ zmm)
{
zmm = cmp_merge_@vsuf1@(zmm, SHUFFLE_@vsuf2@(zmm, SHUFFLE_MASK(2,3,0,1)), 0xAAAA);
@@ -125,7 +137,7 @@ static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512_SKX
}
// Assumes zmm is bitonic and performs a recursive half cleaner
-static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512_SKX
+static NPY_INLINE NPY_GCC_OPT_3
@zmm_t@ bitonic_merge_zmm_@vsuf1@(@zmm_t@ zmm)
{
// 1) half_cleaner[16]: compare 1-9, 2-10, 3-11 etc ..
@@ -140,7 +152,7 @@ static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512_SKX
}
// Assumes zmm1 and zmm2 are sorted and performs a recursive half cleaner
-static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512_SKX
+static NPY_INLINE NPY_GCC_OPT_3
void bitonic_merge_two_zmm_@vsuf1@(@zmm_t@* zmm1, @zmm_t@* zmm2)
{
// 1) First step of a merging network: coex of zmm1 and zmm2 reversed
@@ -153,7 +165,7 @@ void bitonic_merge_two_zmm_@vsuf1@(@zmm_t@* zmm1, @zmm_t@* zmm2)
}
// Assumes [zmm0, zmm1] and [zmm2, zmm3] are sorted and performs a recursive half cleaner
-static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512_SKX
+static NPY_INLINE NPY_GCC_OPT_3
void bitonic_merge_four_zmm_@vsuf1@(@zmm_t@* zmm)
{
@zmm_t@ zmm2r = _mm512_permutexvar_@vsuf2@(_mm512_set_epi32(NETWORK5), zmm[2]);
@@ -172,7 +184,7 @@ void bitonic_merge_four_zmm_@vsuf1@(@zmm_t@* zmm)
zmm[3] = bitonic_merge_zmm_@vsuf1@(zmm3);
}
-static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512_SKX
+static NPY_INLINE NPY_GCC_OPT_3
void bitonic_merge_eight_zmm_@vsuf1@(@zmm_t@* zmm)
{
@zmm_t@ zmm4r = _mm512_permutexvar_@vsuf2@(_mm512_set_epi32(NETWORK5), zmm[4]);
@@ -205,7 +217,7 @@ void bitonic_merge_eight_zmm_@vsuf1@(@zmm_t@* zmm)
zmm[7] = bitonic_merge_zmm_@vsuf1@(zmm_t8);
}
-static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512_SKX
+static NPY_INLINE NPY_GCC_OPT_3
void sort_16_@vsuf1@(@type_t@* arr, npy_int N)
{
__mmask16 load_mask = (0x0001 << N) - 0x0001;
@@ -213,7 +225,7 @@ void sort_16_@vsuf1@(@type_t@* arr, npy_int N)
_mm512_mask_storeu_@vsuf2@(arr, load_mask, sort_zmm_@vsuf1@(zmm));
}
-static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512_SKX
+static NPY_INLINE NPY_GCC_OPT_3
void sort_32_@vsuf1@(@type_t@* arr, npy_int N)
{
if (N <= 16) {
@@ -230,7 +242,7 @@ void sort_32_@vsuf1@(@type_t@* arr, npy_int N)
_mm512_mask_storeu_@vsuf2@(arr + 16, load_mask, zmm2);
}
-static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512_SKX
+static NPY_INLINE NPY_GCC_OPT_3
void sort_64_@vsuf1@(@type_t@* arr, npy_int N)
{
if (N <= 32) {
@@ -263,7 +275,7 @@ void sort_64_@vsuf1@(@type_t@* arr, npy_int N)
_mm512_mask_storeu_@vsuf2@(arr + 48, load_mask2, zmm[3]);
}
-static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512_SKX
+static NPY_INLINE NPY_GCC_OPT_3
void sort_128_@vsuf1@(@type_t@* arr, npy_int N)
{
if (N <= 64) {
@@ -325,7 +337,7 @@ void sort_128_@vsuf1@(@type_t@* arr, npy_int N)
}
-static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512_SKX
+static NPY_INLINE NPY_GCC_OPT_3
void swap_@TYPE@(@type_t@ *arr, npy_intp ii, npy_intp jj) {
@type_t@ temp = arr[ii];
arr[ii] = arr[jj];
@@ -333,7 +345,7 @@ void swap_@TYPE@(@type_t@ *arr, npy_intp ii, npy_intp jj) {
}
// Median of 3 stratergy
-//static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512_SKX
+//static NPY_INLINE NPY_GCC_OPT_3
//npy_intp get_pivot_index(@type_t@ *arr, const npy_intp left, const npy_intp right) {
// return (rand() % (right + 1 - left)) + left;
// //npy_intp middle = ((right-left)/2) + left;
@@ -350,7 +362,7 @@ void swap_@TYPE@(@type_t@ *arr, npy_intp ii, npy_intp jj) {
* Picking the pivot: Median of 72 array elements chosen at random.
*/
-static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512_SKX
+static NPY_INLINE NPY_GCC_OPT_3
@type_t@ get_pivot_@vsuf1@(@type_t@ *arr, const npy_intp left, const npy_intp right) {
/* seeds for vectorized random number generator */
__m256i s0 = _mm256_setr_epi64x(8265987198341093849, 3762817312854612374,
@@ -402,7 +414,7 @@ static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512_SKX
* Parition one ZMM register based on the pivot and returns the index of the
* last element that is less than equal to the pivot.
*/
-static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512_SKX
+static NPY_INLINE NPY_GCC_OPT_3
npy_int partition_vec_@vsuf1@(@type_t@* arr, npy_intp left, npy_intp right,
const @zmm_t@ curr_vec, const @zmm_t@ pivot_vec,
@zmm_t@* smallest_vec, @zmm_t@* biggest_vec)
@@ -421,7 +433,7 @@ npy_int partition_vec_@vsuf1@(@type_t@* arr, npy_intp left, npy_intp right,
* Parition an array based on the pivot and returns the index of the
* last element that is less than equal to the pivot.
*/
-static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512_SKX
+static NPY_INLINE NPY_GCC_OPT_3
npy_intp partition_avx512_@vsuf1@(@type_t@* arr, npy_intp left, npy_intp right,
@type_t@ pivot, @type_t@* smallest, @type_t@* biggest)
{
@@ -491,7 +503,7 @@ npy_intp partition_avx512_@vsuf1@(@type_t@* arr, npy_intp left, npy_intp right,
return l_store;
}
-static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512_SKX
+static NPY_INLINE NPY_GCC_OPT_3
void qsort_@type@(@type_t@* arr, npy_intp left, npy_intp right, npy_int max_iters)
{
/*
@@ -520,7 +532,7 @@ void qsort_@type@(@type_t@* arr, npy_intp left, npy_intp right, npy_int max_iter
}
/**end repeat**/
-static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512_SKX
+static NPY_INLINE NPY_GCC_OPT_3
npy_intp replace_nan_with_inf(npy_float* arr, npy_intp arrsize)
{
npy_intp nan_count = 0;
@@ -539,7 +551,7 @@ npy_intp replace_nan_with_inf(npy_float* arr, npy_intp arrsize)
return nan_count;
}
-static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512_SKX
+static NPY_INLINE NPY_GCC_OPT_3
void replace_inf_with_nan(npy_float* arr, npy_intp arrsize, npy_intp nan_count)
{
for (npy_intp ii = arrsize-1; nan_count > 0; --ii) {
@@ -555,18 +567,19 @@ void replace_inf_with_nan(npy_float* arr, npy_intp arrsize, npy_intp nan_count)
* #FIXNAN = 0, 0, 1#
*/
-void avx512_qsort_@type@(@type_t@* arr, npy_intp arrsize)
+NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(x86_quicksort_@type@)
+(void* arr, npy_intp arrsize)
{
if (arrsize > 1) {
#if @FIXNAN@
- npy_intp nan_count = replace_nan_with_inf(arr, arrsize);
+ npy_intp nan_count = replace_nan_with_inf((@type_t@*) arr, arrsize);
#endif
- qsort_@type@(arr, 0, arrsize-1, 2*log2(arrsize));
+ qsort_@type@((@type_t@*) arr, 0, arrsize-1, 2*log2(arrsize));
#if @FIXNAN@
- replace_inf_with_nan(arr, arrsize, nan_count);
+ replace_inf_with_nan((@type_t@*) arr, arrsize, nan_count);
#endif
}
}
/**end repeat**/
-#endif // HAVE_ATTRIBUTE_TARGET_AVX512_SKX_WITH_INTRINSICS
+#endif // NPY_HAVE_AVX512_SKX
diff --git a/numpy/core/src/npysort/x86-qsort.h b/numpy/core/src/npysort/x86-qsort.h
new file mode 100644
index 000000000..8cb8e3654
--- /dev/null
+++ b/numpy/core/src/npysort/x86-qsort.h
@@ -0,0 +1,18 @@
+#include "numpy/npy_common.h"
+#include "npy_cpu_dispatch.h"
+
+#ifndef NPY_NO_EXPORT
+ #define NPY_NO_EXPORT NPY_VISIBILITY_HIDDEN
+#endif
+
+#ifndef NPY_DISABLE_OPTIMIZATION
+ #include "x86-qsort.dispatch.h"
+#endif
+NPY_CPU_DISPATCH_DECLARE(NPY_NO_EXPORT void x86_quicksort_int,
+ (void *start, npy_intp num))
+
+NPY_CPU_DISPATCH_DECLARE(NPY_NO_EXPORT void x86_quicksort_uint,
+ (void *start, npy_intp num))
+
+NPY_CPU_DISPATCH_DECLARE(NPY_NO_EXPORT void x86_quicksort_float,
+ (void *start, npy_intp num))