diff options
Diffstat (limited to 'libitm/config/x86')
-rw-r--r-- | libitm/config/x86/cacheline.cc | 73 | ||||
-rw-r--r-- | libitm/config/x86/cacheline.h | 242 | ||||
-rw-r--r-- | libitm/config/x86/sjlj.S | 105 | ||||
-rw-r--r-- | libitm/config/x86/target.h | 98 | ||||
-rw-r--r-- | libitm/config/x86/unaligned.h | 237 | ||||
-rw-r--r-- | libitm/config/x86/x86_avx.cc | 95 | ||||
-rw-r--r-- | libitm/config/x86/x86_sse.cc | 122 |
7 files changed, 972 insertions, 0 deletions
diff --git a/libitm/config/x86/cacheline.cc b/libitm/config/x86/cacheline.cc new file mode 100644 index 00000000000..2e49a355953 --- /dev/null +++ b/libitm/config/x86/cacheline.cc @@ -0,0 +1,73 @@ +/* Copyright (C) 2009, 2011 Free Software Foundation, Inc. + Contributed by Richard Henderson <rth@redhat.com>. + + This file is part of the GNU Transactional Memory Library (libitm). + + Libitm is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3 of the License, or + (at your option) any later version. + + Libitm is distributed in the hope that it will be useful, but WITHOUT ANY + WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + <http://www.gnu.org/licenses/>. */ + +#include "libitm_i.h" + +namespace GTM HIDDEN { + +uint32_t const gtm_bit_to_byte_mask[16] = +{ + 0x00000000, + 0x000000ff, + 0x0000ff00, + 0x0000ffff, + 0x00ff0000, + 0x00ff00ff, + 0x00ffff00, + 0x00ffffff, + 0xff000000, + 0xff0000ff, + 0xff00ff00, + 0xff00ffff, + 0xffff0000, + 0xffff00ff, + 0xffffff00, + 0xffffffff +}; + +#ifdef __SSE2__ +# define MEMBER m128i +#else +# define MEMBER w +#endif + +void +gtm_cacheline::copy_mask (gtm_cacheline * __restrict d, + const gtm_cacheline * __restrict s, + gtm_cacheline_mask m) +{ + if (m == (gtm_cacheline_mask)-1) + { + *d = *s; + return; + } + if (__builtin_expect (m == 0, 0)) + return; + + size_t n = sizeof(d->MEMBER[0]); + for (size_t i = 0; i < CACHELINE_SIZE / n; ++i, m >>= n) + store_mask (&d->MEMBER[i], s->MEMBER[i], m); +} + +} // namespace GTM diff --git a/libitm/config/x86/cacheline.h b/libitm/config/x86/cacheline.h new file mode 100644 index 00000000000..15a95b0be5b --- /dev/null +++ b/libitm/config/x86/cacheline.h @@ -0,0 +1,242 @@ +/* Copyright (C) 2009, 2011 Free Software Foundation, Inc. + Contributed by Richard Henderson <rth@redhat.com>. + + This file is part of the GNU Transactional Memory Library (libitm). + + Libitm is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3 of the License, or + (at your option) any later version. + + Libitm is distributed in the hope that it will be useful, but WITHOUT ANY + WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + <http://www.gnu.org/licenses/>. */ + +#ifndef LIBITM_CACHELINE_H +#define LIBITM_CACHELINE_H 1 + +// Minimum cacheline size is 32, due to both complex long double and __m256. +// There's no requirement that 64-bit use a 64-byte cacheline size, but do +// so for now to make sure everything is parameterized properly. +#ifdef __x86_64__ +# define CACHELINE_SIZE 64 +#else +# define CACHELINE_SIZE 32 +#endif + +namespace GTM HIDDEN { + +// A gtm_cacheline_mask stores a modified bit for every modified byte +// in the cacheline with which it is associated. +typedef sized_integral<CACHELINE_SIZE / 8>::type gtm_cacheline_mask; + +extern uint32_t const gtm_bit_to_byte_mask[16]; + +union gtm_cacheline +{ + // Byte access to the cacheline. + unsigned char b[CACHELINE_SIZE] __attribute__((aligned(CACHELINE_SIZE))); + + // Larger sized access to the cacheline. + uint16_t u16[CACHELINE_SIZE / sizeof(uint16_t)]; + uint32_t u32[CACHELINE_SIZE / sizeof(uint32_t)]; + uint64_t u64[CACHELINE_SIZE / sizeof(uint64_t)]; + gtm_word w[CACHELINE_SIZE / sizeof(gtm_word)]; + +#ifdef __MMX__ + __m64 m64[CACHELINE_SIZE / sizeof(__m64)]; +#endif +#ifdef __SSE__ + __m128 m128[CACHELINE_SIZE / sizeof(__m128)]; +#endif +#ifdef __SSE2__ + __m128i m128i[CACHELINE_SIZE / sizeof(__m128i)]; +#endif +#ifdef __AVX__ + __m256 m256[CACHELINE_SIZE / sizeof(__m256)]; + __m256i m256i[CACHELINE_SIZE / sizeof(__m256i)]; +#endif + + // Store S into D, but only the bytes specified by M. + static void store_mask (uint32_t *d, uint32_t s, uint8_t m); + static void store_mask (uint64_t *d, uint64_t s, uint8_t m); +#ifdef __SSE2__ + static void store_mask (__m128i *d, __m128i s, uint16_t m); +#endif + + // Copy S to D, but only the bytes specified by M. + static void copy_mask (gtm_cacheline * __restrict d, + const gtm_cacheline * __restrict s, + gtm_cacheline_mask m); + + // A write barrier to emit after (a series of) copy_mask. + // When we're emitting non-temporal stores, the normal strong + // ordering of the machine doesn't apply. + static void copy_mask_wb (); + +#if defined(__SSE__) || defined(__AVX__) + // Copy S to D; only bother defining if we can do this more efficiently + // than the compiler-generated default implementation. + gtm_cacheline& operator= (const gtm_cacheline &s); +#endif // SSE, AVX +}; + +inline void +gtm_cacheline::copy_mask_wb () +{ +#ifdef __SSE2__ + _mm_sfence (); +#endif +} + +#if defined(__SSE__) || defined(__AVX__) +inline gtm_cacheline& ALWAYS_INLINE +gtm_cacheline::operator= (const gtm_cacheline & __restrict s) +{ +#ifdef __AVX__ +# define CP m256 +# define TYPE __m256 +#else +# define CP m128 +# define TYPE __m128 +#endif + + TYPE w, x, y, z; + + // ??? Wouldn't it be nice to have a pragma to tell the compiler + // to completely unroll a given loop? + switch (CACHELINE_SIZE / sizeof(TYPE)) + { + case 1: + this->CP[0] = s.CP[0]; + break; + case 2: + x = s.CP[0]; + y = s.CP[1]; + this->CP[0] = x; + this->CP[1] = y; + break; + case 4: + w = s.CP[0]; + x = s.CP[1]; + y = s.CP[2]; + z = s.CP[3]; + this->CP[0] = w; + this->CP[1] = x; + this->CP[2] = y; + this->CP[3] = z; + break; + default: + __builtin_trap (); + } + + return *this; +} +#endif + +// ??? Support masked integer stores more efficiently with an unlocked cmpxchg +// insn. My reasoning is that while we write to locations that we do not wish +// to modify, we do it in an uninterruptable insn, and so we either truely +// write back the original data or the insn fails -- unlike with a +// load/and/or/write sequence which can be interrupted either by a kernel +// task switch or an unlucky cacheline steal by another processor. Avoiding +// the LOCK prefix improves performance by a factor of 10, and we don't need +// the memory barrier semantics implied by that prefix. + +inline void ALWAYS_INLINE +gtm_cacheline::store_mask (uint32_t *d, uint32_t s, uint8_t m) +{ + gtm_cacheline_mask tm = (1 << sizeof (s)) - 1; + if (__builtin_expect (m & tm, tm)) + { + if (__builtin_expect ((m & tm) == tm, 1)) + *d = s; + else + { + gtm_cacheline_mask bm = gtm_bit_to_byte_mask[m & 15]; + gtm_word n, o = *d; + + __asm("\n0:\t" + "mov %[o], %[n]\n\t" + "and %[m], %[n]\n\t" + "or %[s], %[n]\n\t" + "cmpxchg %[n], %[d]\n\t" + "jnz,pn 0b" + : [d] "+m"(*d), [n] "=&r" (n), [o] "+a"(o) + : [s] "r" (s & bm), [m] "r" (~bm)); + } + } +} + +inline void ALWAYS_INLINE +gtm_cacheline::store_mask (uint64_t *d, uint64_t s, uint8_t m) +{ + gtm_cacheline_mask tm = (1 << sizeof (s)) - 1; + if (__builtin_expect (m & tm, tm)) + { + if (__builtin_expect ((m & tm) == tm, 1)) + *d = s; + else + { +#ifdef __x86_64__ + uint32_t bl = gtm_bit_to_byte_mask[m & 15]; + uint32_t bh = gtm_bit_to_byte_mask[(m >> 4) & 15]; + gtm_cacheline_mask bm = bl | ((gtm_cacheline_mask)bh << 31 << 1); + uint64_t n, o = *d; + __asm("\n0:\t" + "mov %[o], %[n]\n\t" + "and %[m], %[n]\n\t" + "or %[s], %[n]\n\t" + "cmpxchg %[n], %[d]\n\t" + "jnz,pn 0b" + : [d] "+m"(*d), [n] "=&r" (n), [o] "+a"(o) + : [s] "r" (s & bm), [m] "r" (~bm)); +#else + /* ??? While it's possible to perform this operation with + cmpxchg8b, the sequence requires all 7 general registers + and thus cannot be performed with -fPIC. Don't even try. */ + uint32_t *d32 = reinterpret_cast<uint32_t *>(d); + store_mask (d32, s, m); + store_mask (d32 + 1, s >> 32, m >> 4); +#endif + } + } +} + +#ifdef __SSE2__ +inline void ALWAYS_INLINE +gtm_cacheline::store_mask (__m128i *d, __m128i s, uint16_t m) +{ + if (__builtin_expect (m == 0, 0)) + return; + if (__builtin_expect (m == 0xffff, 1)) + *d = s; + else + { + __m128i bm0, bm1, bm2, bm3; + bm0 = _mm_set_epi32 (0, 0, 0, gtm_bit_to_byte_mask[m & 15]); m >>= 4; + bm1 = _mm_set_epi32 (0, 0, 0, gtm_bit_to_byte_mask[m & 15]); m >>= 4; + bm2 = _mm_set_epi32 (0, 0, 0, gtm_bit_to_byte_mask[m & 15]); m >>= 4; + bm3 = _mm_set_epi32 (0, 0, 0, gtm_bit_to_byte_mask[m & 15]); m >>= 4; + bm0 = _mm_unpacklo_epi32 (bm0, bm1); + bm2 = _mm_unpacklo_epi32 (bm2, bm3); + bm0 = _mm_unpacklo_epi64 (bm0, bm2); + + _mm_maskmoveu_si128 (s, bm0, (char *)d); + } +} +#endif // SSE2 + +} // namespace GTM + +#endif // LIBITM_CACHELINE_H diff --git a/libitm/config/x86/sjlj.S b/libitm/config/x86/sjlj.S new file mode 100644 index 00000000000..725ffec8057 --- /dev/null +++ b/libitm/config/x86/sjlj.S @@ -0,0 +1,105 @@ +/* Copyright (C) 2008, 2009, 2011 Free Software Foundation, Inc. + Contributed by Richard Henderson <rth@redhat.com>. + + This file is part of the GNU Transactional Memory Library (libitm). + + Libitm is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3 of the License, or + (at your option) any later version. + + Libitm is distributed in the hope that it will be useful, but WITHOUT ANY + WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + <http://www.gnu.org/licenses/>. */ + + .text + .p2align 4 + .globl _ITM_beginTransaction + .type _ITM_beginTransaction, @function + +_ITM_beginTransaction: + .cfi_startproc +#ifdef __x86_64__ + leaq 8(%rsp), %rax + movq (%rsp), %r8 + subq $72, %rsp + .cfi_def_cfa_offset 80 + movq %rax, (%rsp) + movq %r8, 8(%rsp) + movq %rbx, 16(%rsp) + movq %rbp, 24(%rsp) + movq %r12, 32(%rsp) + movq %r13, 40(%rsp) + movq %r14, 48(%rsp) + movq %r15, 56(%rsp) + movq %rsp, %rsi + call GTM_begin_transaction + addq $72, %rsp + .cfi_def_cfa_offset 8 + ret +#else + leal 4(%esp), %ecx + subl $28, %esp + .cfi_def_cfa_offset 32 + movl %ecx, 8(%esp) + movl %ebx, 12(%esp) + movl %esi, 16(%esp) + movl %edi, 20(%esp) + movl %ebp, 24(%esp) + leal 8(%esp), %edx + call GTM_begin_transaction + addl $28, %esp + .cfi_def_cfa_offset 4 + ret +#endif + .cfi_endproc + .size _ITM_beginTransaction, .-_ITM_beginTransaction + + .p2align 4 + .globl GTM_longjmp + .type GTM_longjmp, @function + .hidden GTM_longjmp + +GTM_longjmp: + .cfi_startproc +#ifdef __x86_64__ + movq (%rdi), %rcx + movq 8(%rdi), %rdx + movq 16(%rdi), %rbx + movq 24(%rdi), %rbp + movq 32(%rdi), %r12 + movq 40(%rdi), %r13 + movq 48(%rdi), %r14 + movq 56(%rdi), %r15 + movl %esi, %eax + .cfi_def_cfa %rcx, 0 + .cfi_register %rip, %rdx + movq %rcx, %rsp + jmp *%rdx +#else + xchgl %eax, %edx + movl (%edx), %ecx + movl 4(%edx), %ebx + movl 8(%edx), %esi + movl 12(%edx), %edi + movl 16(%edx), %ebp + movl 20(%edx), %edx + .cfi_def_cfa %ecx, 0 + .cfi_register %eip, %edx + movl %ecx, %esp + jmp *%edx +#endif + .cfi_endproc + .size GTM_longjmp, .-GTM_longjmp + +.section .note.GNU-stack, "", @progbits diff --git a/libitm/config/x86/target.h b/libitm/config/x86/target.h new file mode 100644 index 00000000000..197faebe2d4 --- /dev/null +++ b/libitm/config/x86/target.h @@ -0,0 +1,98 @@ +/* Copyright (C) 2008, 2009, 2011 Free Software Foundation, Inc. + Contributed by Richard Henderson <rth@redhat.com>. + + This file is part of the GNU Transactional Memory Library (libitm). + + Libitm is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3 of the License, or + (at your option) any later version. + + Libitm is distributed in the hope that it will be useful, but WITHOUT ANY + WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + <http://www.gnu.org/licenses/>. */ + +namespace GTM HIDDEN { + +#ifdef __x86_64__ +/* ??? This doesn't work for Win64. */ +typedef struct gtm_jmpbuf +{ + void *cfa; + unsigned long rip; + unsigned long rbx; + unsigned long rbp; + unsigned long r12; + unsigned long r13; + unsigned long r14; + unsigned long r15; +} gtm_jmpbuf; +#else +typedef struct gtm_jmpbuf +{ + void *cfa; + unsigned long ebx; + unsigned long esi; + unsigned long edi; + unsigned long ebp; + unsigned long eip; +} gtm_jmpbuf; +#endif + +/* x86 doesn't require strict alignment for the basic types. */ +#define STRICT_ALIGNMENT 0 + +/* x86 uses a fixed page size of 4K. */ +#define PAGE_SIZE 4096 +#define FIXED_PAGE_SIZE 1 + +/* The size of one line in hardware caches (in bytes). */ +#define HW_CACHELINE_SIZE 64 + + +static inline void +cpu_relax (void) +{ + __asm volatile ("rep; nop" : : : "memory"); +} + +static inline void +atomic_read_barrier (void) +{ + /* x86 is a strong memory ordering machine. */ + __asm volatile ("" : : : "memory"); +} + +static inline void +atomic_write_barrier (void) +{ + /* x86 is a strong memory ordering machine. */ + __asm volatile ("" : : : "memory"); +} + +} // namespace GTM + +// We'll be using some of the cpu builtins, and their associated types. +#ifndef __cplusplus +/* ??? It's broken for C++. */ +#include <x86intrin.h> +#else +# ifdef __SSE2__ +# include <emmintrin.h> +# elif defined(__SSE__) +# include <xmmintrin.h> +# endif +# ifdef __AVX__ +# include <immintrin.h> +# endif +#endif diff --git a/libitm/config/x86/unaligned.h b/libitm/config/x86/unaligned.h new file mode 100644 index 00000000000..01abc47dccb --- /dev/null +++ b/libitm/config/x86/unaligned.h @@ -0,0 +1,237 @@ +/* Copyright (C) 2009, 2011 Free Software Foundation, Inc. + Contributed by Richard Henderson <rth@redhat.com>. + + This file is part of the GNU Transactional Memory Library (libitm). + + Libitm is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3 of the License, or + (at your option) any later version. + + Libitm is distributed in the hope that it will be useful, but WITHOUT ANY + WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + <http://www.gnu.org/licenses/>. */ + +#ifndef LIBITM_X86_UNALIGNED_H +#define LIBITM_X86_UNALIGNED_H 1 + +#define HAVE_ARCH_UNALIGNED_LOAD2_U4 1 +#define HAVE_ARCH_UNALIGNED_LOAD2_U8 1 + +#include "config/generic/unaligned.h" + +namespace GTM HIDDEN { + +template<> +inline uint32_t +unaligned_load2<uint32_t>(const gtm_cacheline *c1, + const gtm_cacheline *c2, size_t ofs) +{ + uint32_t r, lo, hi; + lo = c1->u32[CACHELINE_SIZE / sizeof(uint32_t) - 1]; + hi = c2->u32[0]; + asm("shrd %b2, %1, %0" : "=r"(r) : "r"(hi), "c"((ofs & 3) * 8), "0"(lo)); + return r; +} + +template<> +inline uint64_t +unaligned_load2<uint64_t>(const gtm_cacheline *c1, + const gtm_cacheline *c2, size_t ofs) +{ +#ifdef __x86_64__ + uint64_t r, lo, hi; + lo = c1->u64[CACHELINE_SIZE / sizeof(uint64_t) - 1]; + hi = c2->u64[0]; + asm("shrd %b2, %1, %0" : "=r"(r) : "r"(hi), "c"((ofs & 3) * 8), "0"(lo)); + return r; +#else + uint32_t v0, v1, v2; + uint64_t r; + + if (ofs < CACHELINE_SIZE - 4) + { + v0 = c1->u32[CACHELINE_SIZE / sizeof(uint32_t) - 2]; + v1 = c1->u32[CACHELINE_SIZE / sizeof(uint32_t) - 1]; + v2 = c2->u32[0]; + } + else + { + v0 = c1->u32[CACHELINE_SIZE / sizeof(uint32_t) - 1]; + v1 = c2->u32[0]; + v2 = c2->u32[1]; + } + ofs = (ofs & 3) * 8; + asm("shrd %%cl, %[v1], %[v0]; shrd %%cl, %[v2], %[v1]" + : "=A"(r) : "c"(ofs), [v0] "a"(v0), [v1] "d"(v1), [v2] "r"(v2)); + + return r; +#endif +} + +#if defined(__SSE2__) || defined(__MMX__) +template<> +inline _ITM_TYPE_M64 +unaligned_load2<_ITM_TYPE_M64>(const gtm_cacheline *c1, + const gtm_cacheline *c2, size_t ofs) +{ +# ifdef __x86_64__ + __m128i lo = _mm_movpi64_epi64 (c1->m64[CACHELINE_SIZE / 8 - 1]); + __m128i hi = _mm_movpi64_epi64 (c2->m64[0]); + + ofs = (ofs & 7) * 8; + lo = _mm_srli_epi64 (lo, ofs); + hi = _mm_slli_epi64 (hi, 64 - ofs); + lo = lo | hi; + return _mm_movepi64_pi64 (lo); +# else + // On 32-bit we're about to return the result in an MMX register, so go + // ahead and do the computation in that unit, even if SSE2 is available. + __m64 lo = c1->m64[CACHELINE_SIZE / 8 - 1]; + __m64 hi = c2->m64[0]; + + ofs = (ofs & 7) * 8; + lo = _mm_srli_si64 (lo, ofs); + hi = _mm_slli_si64 (hi, 64 - ofs); + return lo | hi; +# endif +} +#endif // SSE2 or MMX + +// The SSE types are strictly aligned. +#ifdef __SSE__ +template<> + struct strict_alignment<_ITM_TYPE_M128> + : public std::true_type + { }; + +// Expand the unaligned SSE move instructions. +template<> +inline _ITM_TYPE_M128 +unaligned_load<_ITM_TYPE_M128>(const void *t) +{ + return _mm_loadu_ps (static_cast<const float *>(t)); +} + +template<> +inline void +unaligned_store<_ITM_TYPE_M128>(void *t, _ITM_TYPE_M128 val) +{ + _mm_storeu_ps (static_cast<float *>(t), val); +} +#endif // SSE + +#ifdef __AVX__ +// The AVX types are strictly aligned when it comes to vmovaps vs vmovups. +template<> + struct strict_alignment<_ITM_TYPE_M256> + : public std::true_type + { }; + +template<> +inline _ITM_TYPE_M256 +unaligned_load<_ITM_TYPE_M256>(const void *t) +{ + return _mm256_loadu_ps (static_cast<const float *>(t)); +} + +template<> +inline void +unaligned_store<_ITM_TYPE_M256>(void *t, _ITM_TYPE_M256 val) +{ + _mm256_storeu_ps (static_cast<float *>(t), val); +} +#endif // AVX + +#ifdef __XOP__ +# define HAVE_ARCH_REALIGN_M128I 1 +extern const __v16qi GTM_vpperm_shift[16]; +inline __m128i +realign_m128i (__m128i lo, __m128i hi, unsigned byte_count) +{ + return _mm_perm_epi8 (lo, hi, GTM_vpperm_shift[byte_count]); +} +#elif defined(__AVX__) +# define HAVE_ARCH_REALIGN_M128I 1 +extern "C" const uint64_t GTM_vpalignr_table[16]; +inline __m128i +realign_m128i (__m128i lo, __m128i hi, unsigned byte_count) +{ + register __m128i xmm0 __asm__("xmm0") = hi; + register __m128i xmm1 __asm__("xmm1") = lo; + __asm("call *%2" : "+x"(xmm0) : "x"(xmm1), + "r"(>M_vpalignr_table[byte_count])); + return xmm0; +} +#elif defined(__SSSE3__) +# define HAVE_ARCH_REALIGN_M128I 1 +extern "C" const uint64_t GTM_palignr_table[16]; +inline __m128i +realign_m128i (__m128i lo, __m128i hi, unsigned byte_count) +{ + register __m128i xmm0 __asm__("xmm0") = hi; + register __m128i xmm1 __asm__("xmm1") = lo; + __asm("call *%2" : "+x"(xmm0) : "x"(xmm1), + "r"(>M_palignr_table[byte_count])); + return xmm0; +} +#elif defined(__SSE2__) +# define HAVE_ARCH_REALIGN_M128I 1 +extern "C" const char GTM_pshift_table[16 * 16]; +inline __m128i +realign_m128i (__m128i lo, __m128i hi, unsigned byte_count) +{ + register __m128i xmm0 __asm__("xmm0") = lo; + register __m128i xmm1 __asm__("xmm1") = hi; + __asm("call *%2" : "+x"(xmm0), "+x"(xmm1) + : "r"(GTM_pshift_table + byte_count*16)); + return xmm0; +} +#endif // XOP, AVX, SSSE3, SSE2 + +#ifdef HAVE_ARCH_REALIGN_M128I +template<> +inline _ITM_TYPE_M128 +unaligned_load2<_ITM_TYPE_M128>(const gtm_cacheline *c1, + const gtm_cacheline *c2, size_t ofs) +{ + return (_ITM_TYPE_M128) + realign_m128i (c1->m128i[CACHELINE_SIZE / 16 - 1], + c2->m128i[0], ofs & 15); +} +#endif // HAVE_ARCH_REALIGN_M128I + +#ifdef __AVX__ +template<> +inline _ITM_TYPE_M256 +unaligned_load2<_ITM_TYPE_M256>(const gtm_cacheline *c1, + const gtm_cacheline *c2, size_t ofs) +{ + __m128i v0, v1; + __m256i r; + + v0 = (__m128i) unaligned_load2<_ITM_TYPE_M128>(c1, c2, ofs); + if (ofs < CACHELINE_SIZE - 16) + v1 = v0, v0 = _mm_loadu_si128 ((const __m128i *) &c1->b[ofs]); + else + v1 = _mm_loadu_si128((const __m128i *)&c2->b[ofs + 16 - CACHELINE_SIZE]); + + r = _mm256_castsi128_si256 ((__m128i)v0); + r = _mm256_insertf128_si256 (r, (__m128i)v1, 1); + return (_ITM_TYPE_M256) r; +} +#endif // AVX + +} // namespace GTM + +#endif // LIBITM_X86_UNALIGNED_H diff --git a/libitm/config/x86/x86_avx.cc b/libitm/config/x86/x86_avx.cc new file mode 100644 index 00000000000..9d1ddfb3ee8 --- /dev/null +++ b/libitm/config/x86/x86_avx.cc @@ -0,0 +1,95 @@ +/* Copyright (C) 2009, 2011 Free Software Foundation, Inc. + Contributed by Richard Henderson <rth@redhat.com>. + + This file is part of the GNU Transactional Memory Library (libitm). + + Libitm is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3 of the License, or + (at your option) any later version. + + Libitm is distributed in the hope that it will be useful, but WITHOUT ANY + WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + <http://www.gnu.org/licenses/>. */ + +#include "libitm_i.h" +#include "dispatch.h" + +// ??? Use memcpy for now, until we have figured out how to best instantiate +// these loads/stores. +CREATE_DISPATCH_FUNCTIONS_T_MEMCPY(M256, GTM::abi_disp()->, ) + +void ITM_REGPARM +_ITM_LM256 (const _ITM_TYPE_M256 *ptr) +{ + GTM::GTM_LB (ptr, sizeof (*ptr)); +} + +// Helpers for re-aligning two 128-bit values. +#ifdef __XOP__ +const __v16qi GTM::GTM_vpperm_shift[16] = +{ + { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 }, + { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16 }, + { 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17 }, + { 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18 }, + { 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19 }, + { 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20 }, + { 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21 }, + { 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22 }, + { 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23 }, + { 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 }, + { 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25 }, + { 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26 }, + { 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27 }, + { 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28 }, + { 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29 }, + { 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30 }, +}; +#else +# define INSN0 "movdqa %xmm1, %xmm0" +# define INSN(N) "vpalignr $" #N ", %xmm0, %xmm1, %xmm0" +# define TABLE_ENT_0 INSN0 "\n\tret\n\t" +# define TABLE_ENT(N) ".balign 8\n\t" INSN(N) "\n\tret\n\t" + +asm(".pushsection .text\n\ + .balign 16\n\ + .globl GTM_vpalignr_table\n\ + .hidden GTM_vpalignr_table\n\ + .type GTM_vpalignr_table, @function\n\ +GTM_vpalignr_table:\n\t" + TABLE_ENT_0 + TABLE_ENT(1) + TABLE_ENT(2) + TABLE_ENT(3) + TABLE_ENT(4) + TABLE_ENT(5) + TABLE_ENT(6) + TABLE_ENT(7) + TABLE_ENT(8) + TABLE_ENT(9) + TABLE_ENT(10) + TABLE_ENT(11) + TABLE_ENT(12) + TABLE_ENT(13) + TABLE_ENT(14) + TABLE_ENT(15) + ".balign 8\n\ + .size GTM_vpalignr_table, .-GTM_vpalignr_table\n\ + .popsection"); + +# undef INSN0 +# undef INSN +# undef TABLE_ENT_0 +# undef TABLE_ENT +#endif diff --git a/libitm/config/x86/x86_sse.cc b/libitm/config/x86/x86_sse.cc new file mode 100644 index 00000000000..7440c949cb7 --- /dev/null +++ b/libitm/config/x86/x86_sse.cc @@ -0,0 +1,122 @@ +/* Copyright (C) 2009, 2011 Free Software Foundation, Inc. + Contributed by Richard Henderson <rth@redhat.com>. + + This file is part of the GNU Transactional Memory Library (libitm). + + Libitm is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3 of the License, or + (at your option) any later version. + + Libitm is distributed in the hope that it will be useful, but WITHOUT ANY + WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + <http://www.gnu.org/licenses/>. */ + +#include "libitm_i.h" +#include "dispatch.h" + +// ??? Use memcpy for now, until we have figured out how to best instantiate +// these loads/stores. +CREATE_DISPATCH_FUNCTIONS_T_MEMCPY(M64, GTM::abi_disp()->, ) +CREATE_DISPATCH_FUNCTIONS_T_MEMCPY(M128, GTM::abi_disp()->, ) + +void ITM_REGPARM +_ITM_LM64 (const _ITM_TYPE_M64 *ptr) +{ + GTM::GTM_LB (ptr, sizeof (*ptr)); +} + +void ITM_REGPARM +_ITM_LM128 (const _ITM_TYPE_M128 *ptr) +{ + GTM::GTM_LB (ptr, sizeof (*ptr)); +} + +// Helpers for re-aligning two 128-bit values. +#ifdef __SSSE3__ +# define INSN0 "movdqa %xmm1, %xmm0" +# define INSN(N) "palignr $" #N ", %xmm1, %xmm0" +# define TABLE_ENT_0 INSN0 "\n\tret\n\t" +# define TABLE_ENT(N) ".balign 8\n\t" INSN(N) "\n\tret\n\t" + +asm(".pushsection .text\n\ + .balign 16\n\ + .globl GTM_palignr_table\n\ + .hidden GTM_palignr_table\n\ + .type GTM_palignr_table, @function\n\ +GTM_palignr_table:\n\t" + TABLE_ENT_0 + TABLE_ENT(1) + TABLE_ENT(2) + TABLE_ENT(3) + TABLE_ENT(4) + TABLE_ENT(5) + TABLE_ENT(6) + TABLE_ENT(7) + TABLE_ENT(8) + TABLE_ENT(9) + TABLE_ENT(10) + TABLE_ENT(11) + TABLE_ENT(12) + TABLE_ENT(13) + TABLE_ENT(14) + TABLE_ENT(15) + ".balign 8\n\ + .size GTM_palignr_table, .-GTM_palignr_table\n\ + .popsection"); + +# undef INSN0 +# undef INSN +# undef TABLE_ENT_0 +# undef TABLE_ENT +#elif defined(__SSE2__) +# define INSNS_8 "punpcklqdq %xmm1, %xmm0" +# define INSNS(N) "psrldq $"#N", %xmm0\n\t" \ + "pslldq $(16-"#N"), %xmm1\n\t" \ + "por %xmm1, %xmm0" +# define TABLE_ENT_0 "ret\n\t" +# define TABLE_ENT_8 ".balign 16\n\t" INSNS_8 "\n\tret\n\t" +# define TABLE_ENT(N) ".balign 16\n\t" INSNS(N) "\n\tret\n\t" + +asm(".pushsection .text\n\ + .balign 16\n\ + .globl GTM_pshift_table\n\ + .hidden GTM_pshift_table\n\ + .type GTM_pshift_table, @function\n\ +GTM_pshift_table:\n\t" + TABLE_ENT_0 + TABLE_ENT(1) + TABLE_ENT(2) + TABLE_ENT(3) + TABLE_ENT(4) + TABLE_ENT(5) + TABLE_ENT(6) + TABLE_ENT(7) + TABLE_ENT_8 + TABLE_ENT(9) + TABLE_ENT(10) + TABLE_ENT(11) + TABLE_ENT(12) + TABLE_ENT(13) + TABLE_ENT(14) + TABLE_ENT(15) + ".balign 8\n\ + .size GTM_pshift_table, .-GTM_pshift_table\n\ + .popsection"); + +# undef INSNS_8 +# undef INSNS +# undef TABLE_ENT_0 +# undef TABLE_ENT_8 +# undef TABLE_ENT +#endif |