diff options
Diffstat (limited to 'libstdc++/std/valarray_array.h')
-rw-r--r-- | libstdc++/std/valarray_array.h | 260 |
1 files changed, 226 insertions, 34 deletions
diff --git a/libstdc++/std/valarray_array.h b/libstdc++/std/valarray_array.h index a0b5818fd8b..eb66463708e 100644 --- a/libstdc++/std/valarray_array.h +++ b/libstdc++/std/valarray_array.h @@ -34,39 +34,205 @@ #include <cstdlib> #include <cstring> +#include <std/cpp_type_traits.h> extern "C++" { // // Helper functions on raw pointers // - -// fill plain array __a[<__n>] with __t -template<typename _Tp> -inline void -__valarray_fill (_Tp* __restrict__ __a, size_t __n, const _Tp& __t) -{ while (__n--) *__a++ = __t; } - -// fill strided array __a[<__n-1 : __s>] with __t -template<typename _Tp> -inline void -__valarray_fill (_Tp* __restrict__ __a, size_t __n, - size_t __s, const _Tp& __t) -{ for (size_t __i=0; __i<__n; ++__i, __a+=__s) *__a = __t; } - -// fill indirect array __a[__i[<__n>]] with __i -template<typename _Tp> -inline void -__valarray_fill(_Tp* __restrict__ __a, const size_t* __restrict__ __i, - size_t __n, const _Tp& __t) -{ for (size_t __j=0; __j<__n; ++__j, ++__i) __a[*__i] = __t; } - -// copy plain array __a[<__n>] in __b[<__n>] -template<typename _Tp> -inline void -__valarray_copy (const _Tp* __restrict__ __a, size_t __n, - _Tp* __restrict__ __b) -{ memcpy (__b, __a, __n * sizeof(_Tp)); } + + inline void* + __valarray_get_memory(size_t __n) + { return operator new(__n); } + + template<typename _Tp> + inline _Tp*__restrict__ + __valarray_get_storage(size_t __n) + { + return static_cast<_Tp*__restrict__> + (__valarray_get_memory(__n * sizeof(_Tp))); + } + + // Return memory to the system + inline void + __valarray_release_storage(void* __p) + { operator delete(__p); } + + // Turn a raw-memory into an array of _Tp filled with _Tp() + // This is required in 'valarray<T> v(n);' + template<typename _Tp, bool> + struct _Array_default_ctor + { + // Please note that this isn't exception safe. But + // valarrays aren't required to be exception safe. + inline static void + _S_do_it(_Tp* __restrict__ __b, _Tp* __restrict__ __e) + { while (__b != __e) new(__b++) _Tp(); } + }; + + template<typename _Tp> + struct _Array_default_ctor<_Tp, true> + { + // For fundamental types, it suffices to say 'memset()' + inline static void + _S_do_it(_Tp* __restrict__ __b, _Tp* __restrict__ __e) + { memset(__b, 0, (__e - __b)*sizeof(_Tp)); } + }; + + template<typename _Tp> + inline void + __valarray_default_construct(_Tp* __restrict__ __b, _Tp* __restrict__ __e) + { + _Array_default_ctor<_Tp, __is_fundamental<_Tp>::_M_type>:: + _S_do_it(__b, __e); + } + + // Turn a raw-memory into an array of _Tp filled with __t + // This is the required in valarray<T> v(n, t). Also + // used in valarray<>::resize(). + template<typename _Tp, bool> + struct _Array_init_ctor + { + // Please note that this isn't exception safe. But + // valarrays aren't required to be exception safe. + inline static void + _S_do_it(_Tp* __restrict__ __b, _Tp* __restrict__ __e, const _Tp __t) + { while (__b != __e) new(__b++) _Tp(__t); } + }; + + template<typename _Tp> + struct _Array_init_ctor<_Tp, true> + { + inline static void + _S_do_it(_Tp* __restrict__ __b, _Tp* __restrict__ __e, const _Tp __t) + { while (__b != __e) *__b++ = __t; } + }; + + template<typename _Tp> + inline void + __valarray_fill_construct(_Tp* __restrict__ __b, _Tp* __restrict__ __e, + const _Tp __t) + { + _Array_init_ctor<_Tp, __is_fundamental<_Tp>::_M_type>:: + _S_do_it(__b, __e, __t); + } + + // + // copy-construct raw array [__o, *) from plain array [__b, __e) + // We can't just say 'memcpy()' + // + template<typename _Tp, bool> + struct _Array_copy_ctor + { + // Please note that this isn't exception safe. But + // valarrays aren't required to be exception safe. + inline static void + _S_do_it(const _Tp* __restrict__ __b, const _Tp* __restrict__ __e, + _Tp* __restrict__ __o) + { while (__b != __e) new(__o++) _Tp(*__b++); } + }; + + template<typename _Tp> + struct _Array_copy_ctor<_Tp, true> + { + inline static void + _S_do_it(const _Tp* __restrict__ __b, const _Tp* __restrict__ __e, + _Tp* __restrict__ __o) + { memcpy(__o, __b, (__e - __b)*sizeof(_Tp)); } + }; + + template<typename _Tp> + inline void + __valarray_copy_construct(const _Tp* __restrict__ __b, + const _Tp* __restrict__ __e, + _Tp* __restrict__ __o) + { + _Array_copy_ctor<_Tp, __is_fundamental<_Tp>::_M_type>:: + _S_do_it(__b, __e, __o); + } + + // copy-construct raw array [__o, *) from strided array __a[<__n : __s>] + template<typename _Tp> + inline void + __valarray_copy_construct (const _Tp* __restrict__ __a, size_t __n, + size_t __s, _Tp* __restrict__ __o) + { + if (__is_fundamental<_Tp>::_M_type) + while (__n--) { *__o++ = *__a; __a += __s; } + else + while (__n--) { new(__o++) _Tp(*__a); __a += __s; } + } + + // copy-construct raw array [__o, *) from indexed array __a[__i[<__n>]] + template<typename _Tp> + inline void + __valarray_copy_construct (const _Tp* __restrict__ __a, + const size_t* __restrict__ __i, + _Tp* __restrict__ __o, size_t __n) + { + if (__is_fundamental<_Tp>::_M_type) + while (__n--) *__o++ = __a[*__i++]; + else + while (__n--) new (__o++) _Tp(__a[*__i++]); + } + + // Do the necessary cleanup when we're done with arrays. + template<typename _Tp> + inline void + __valarray_destroy_elements(_Tp* __restrict__ __b, _Tp* __restrict__ __e) + { + if (!__is_fundamental<_Tp>::_M_type) + while (__b != __e) { __b->~_Tp(); ++__b; } + } + + + // fill plain array __a[<__n>] with __t + template<typename _Tp> + inline void + __valarray_fill (_Tp* __restrict__ __a, size_t __n, const _Tp& __t) + { while (__n--) *__a++ = __t; } + + // fill strided array __a[<__n-1 : __s>] with __t + template<typename _Tp> + inline void + __valarray_fill (_Tp* __restrict__ __a, size_t __n, + size_t __s, const _Tp& __t) + { for (size_t __i=0; __i<__n; ++__i, __a+=__s) *__a = __t; } + + // fill indirect array __a[__i[<__n>]] with __i + template<typename _Tp> + inline void + __valarray_fill(_Tp* __restrict__ __a, const size_t* __restrict__ __i, + size_t __n, const _Tp& __t) + { for (size_t __j=0; __j<__n; ++__j, ++__i) __a[*__i] = __t; } + + // copy plain array __a[<__n>] in __b[<__n>] + // For non-fundamental types, it is wrong to say 'memcpy()' + template<typename _Tp, bool> + struct _Array_copier + { + inline static void + _S_do_it(const _Tp* __restrict__ __a, size_t __n, _Tp* __restrict__ __b) + { while (__n--) *__b++ = *__a++; } + }; + + template<typename _Tp> + struct _Array_copier<_Tp, true> + { + inline static void + _S_do_it(const _Tp* __restrict__ __a, size_t __n, _Tp* __restrict__ __b) + { memcpy (__b, __a, __n * sizeof (_Tp)); } + }; + + template<typename _Tp> + inline void + __valarray_copy (const _Tp* __restrict__ __a, size_t __n, + _Tp* __restrict__ __b) + { + _Array_copier<_Tp, __is_fundamental<_Tp>::_M_type>:: + _S_do_it(__a, __n, __b); + } // copy strided array __a[<__n : __s>] in plain __b[<__n>] template<typename _Tp> @@ -97,6 +263,34 @@ __valarray_copy (const _Tp* __restrict__ __a, size_t __n, _Tp* __restrict__ __b, const size_t* __restrict__ __i) { for (size_t __j=0; __j<__n; ++__j, ++__a, ++__i) __b[*__i] = *__a; } + // + // Compute the sum of elements in range [__f, __l) + // This is a naive algorithm. It suffers from cancelling. + // In the future try to specialize + // for _Tp = float, double, long double using a more accurate + // algorithm. + // + template<typename _Tp> + inline _Tp + __valarray_sum(const _Tp* __restrict__ __f, const _Tp* __restrict__ __l) + { + _Tp __r = _Tp(); + while (__f != __l) __r = __r + *__f++; + return __r; + } + + // Compute the product of all elements in range [__f, __l) + template<typename _Tp> + _Tp + __valarray_product(const _Tp* __restrict__ __f, + const _Tp* __restrict__ __l) + { + _Tp __r = _Tp(1); + while (__f != __l) __r = __r * *__f++; + return __r; + } + + // // Helper class _Array, first layer of valarray abstraction. // All operations on valarray should be forwarded to this class @@ -110,7 +304,6 @@ template<typename _Tp> struct _Array { explicit _Array (const valarray<_Tp>&); _Array (const _Tp* __restrict__, size_t); - void free_data() const; _Tp* begin () const; _Tp* const __restrict__ _M_data; @@ -161,7 +354,9 @@ __valarray_copy (_Array<_Tp> __a, size_t __n, _Array<_Tp> __b, template<typename _Tp> inline -_Array<_Tp>::_Array (size_t __n) : _M_data (new _Tp[__n]) {} +_Array<_Tp>::_Array (size_t __n) + : _M_data (__valarray_get_storage<_Tp>(__n)) +{ __valarray_default_construct(_M_data, _M_data + __n); } template<typename _Tp> inline @@ -174,11 +369,8 @@ inline _Array<_Tp>::_Array (const valarray<_Tp>& __v) template<typename _Tp> inline _Array<_Tp>::_Array (const _Tp* __restrict__ __b, size_t __s) - : _M_data (new _Tp[__s]) { __valarray_copy (__b, __s, _M_data); } - -template<typename _Tp> -inline void -_Array<_Tp>::free_data() const { delete[] _M_data; } + : _M_data (__valarray_get_storage<_Tp>(__s )) +{ __valarray_copy_construct(__b, __s, _M_data); } template<typename _Tp> inline _Tp* |