summaryrefslogtreecommitdiff
path: root/gcc/config/xtensa/lib1funcs.asm
diff options
context:
space:
mode:
Diffstat (limited to 'gcc/config/xtensa/lib1funcs.asm')
-rw-r--r--gcc/config/xtensa/lib1funcs.asm224
1 files changed, 219 insertions, 5 deletions
diff --git a/gcc/config/xtensa/lib1funcs.asm b/gcc/config/xtensa/lib1funcs.asm
index 6392fa2a87f..69d51a7551c 100644
--- a/gcc/config/xtensa/lib1funcs.asm
+++ b/gcc/config/xtensa/lib1funcs.asm
@@ -99,7 +99,10 @@ Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
__mulsi3:
leaf_entry sp, 16
-#if XCHAL_HAVE_MUL16
+#if XCHAL_HAVE_MUL32
+ mull a2, a2, a3
+
+#elif XCHAL_HAVE_MUL16
or a4, a2, a3
srai a4, a4, 16
bnez a4, .LMUL16
@@ -124,7 +127,7 @@ __mulsi3:
slli a5, a5, 16
add a2, a4, a5
-#else /* !XCHAL_HAVE_MUL16 && !XCHAL_HAVE_MAC16 */
+#else /* !MUL32 && !MUL16 && !MAC16 */
/* Multiply one bit at a time, but unroll the loop 4x to better
exploit the addx instructions and avoid overhead.
@@ -188,7 +191,7 @@ __mulsi3:
neg a3, a2
movltz a2, a3, a5
-#endif /* !XCHAL_HAVE_MUL16 && !XCHAL_HAVE_MAC16 */
+#endif /* !MUL32 && !MUL16 && !MAC16 */
leaf_return
.size __mulsi3,.-__mulsi3
@@ -196,6 +199,170 @@ __mulsi3:
#endif /* L_mulsi3 */
+#ifdef L_umulsidi3
+ .align 4
+ .global __umulsidi3
+ .type __umulsidi3,@function
+__umulsidi3:
+ leaf_entry sp, 32
+#if __XTENSA_CALL0_ABI__
+ addi sp, sp, -32
+ s32i a12, sp, 16
+ s32i a13, sp, 20
+ s32i a14, sp, 24
+ s32i a15, sp, 28
+#endif
+
+#ifdef __XTENSA_EB__
+#define wh a2
+#define wl a3
+#else
+#define wh a3
+#define wl a2
+#endif /* __XTENSA_EB__ */
+
+ /* This code is taken from the mulsf3 routine in ieee754-sf.S.
+ See more comments there. */
+
+#if XCHAL_HAVE_MUL32_HIGH
+ mull a6, a2, a3
+ muluh wh, a2, a3
+ mov wl, a6
+
+#else /* ! MUL32_HIGH */
+
+#if !XCHAL_HAVE_MUL16 && !XCHAL_HAVE_MUL32 && !XCHAL_HAVE_MAC16
+ /* a0 and a8 will be clobbered by calling the multiply function
+ but a8 is not used here and need not be saved. */
+ s32i a0, sp, 0
+#endif
+
+#if XCHAL_HAVE_MUL16 || XCHAL_HAVE_MUL32
+
+#define a2h a4
+#define a3h a5
+
+ /* Get the high halves of the inputs into registers. */
+ srli a2h, a2, 16
+ srli a3h, a3, 16
+
+#define a2l a2
+#define a3l a3
+
+#if XCHAL_HAVE_MUL32 && !XCHAL_HAVE_MUL16
+ /* Clear the high halves of the inputs. This does not matter
+ for MUL16 because the high bits are ignored. */
+ extui a2, a2, 0, 16
+ extui a3, a3, 0, 16
+#endif
+#endif /* MUL16 || MUL32 */
+
+
+#if XCHAL_HAVE_MUL16
+
+#define do_mul(dst, xreg, xhalf, yreg, yhalf) \
+ mul16u dst, xreg ## xhalf, yreg ## yhalf
+
+#elif XCHAL_HAVE_MUL32
+
+#define do_mul(dst, xreg, xhalf, yreg, yhalf) \
+ mull dst, xreg ## xhalf, yreg ## yhalf
+
+#elif XCHAL_HAVE_MAC16
+
+/* The preprocessor insists on inserting a space when concatenating after
+ a period in the definition of do_mul below. These macros are a workaround
+ using underscores instead of periods when doing the concatenation. */
+#define umul_aa_ll umul.aa.ll
+#define umul_aa_lh umul.aa.lh
+#define umul_aa_hl umul.aa.hl
+#define umul_aa_hh umul.aa.hh
+
+#define do_mul(dst, xreg, xhalf, yreg, yhalf) \
+ umul_aa_ ## xhalf ## yhalf xreg, yreg; \
+ rsr dst, ACCLO
+
+#else /* no multiply hardware */
+
+#define set_arg_l(dst, src) \
+ extui dst, src, 0, 16
+#define set_arg_h(dst, src) \
+ srli dst, src, 16
+
+#define do_mul(dst, xreg, xhalf, yreg, yhalf) \
+ set_arg_ ## xhalf (a13, xreg); \
+ set_arg_ ## yhalf (a14, yreg); \
+ call0 .Lmul_mulsi3; \
+ mov dst, a12
+#endif
+
+ /* Add pp1 and pp2 into a6 with carry-out in a9. */
+ do_mul(a6, a2, l, a3, h) /* pp 1 */
+ do_mul(a11, a2, h, a3, l) /* pp 2 */
+ movi a9, 0
+ add a6, a6, a11
+ bgeu a6, a11, 1f
+ addi a9, a9, 1
+1:
+ /* Shift the high half of a9/a6 into position in a9. Note that
+ this value can be safely incremented without any carry-outs. */
+ ssai 16
+ src a9, a9, a6
+
+ /* Compute the low word into a6. */
+ do_mul(a11, a2, l, a3, l) /* pp 0 */
+ sll a6, a6
+ add a6, a6, a11
+ bgeu a6, a11, 1f
+ addi a9, a9, 1
+1:
+ /* Compute the high word into wh. */
+ do_mul(wh, a2, h, a3, h) /* pp 3 */
+ add wh, wh, a9
+ mov wl, a6
+
+#endif /* !MUL32_HIGH */
+
+ leaf_return
+
+#if !XCHAL_HAVE_MUL16 && !XCHAL_HAVE_MUL32 && !XCHAL_HAVE_MAC16
+
+ /* For Xtensa processors with no multiply hardware, this simplified
+ version of _mulsi3 is used for multiplying 16-bit chunks of
+ the floating-point mantissas. It uses a custom ABI: the inputs
+ are passed in a13 and a14, the result is returned in a12, and
+ a8 and a15 are clobbered. */
+ .align 4
+.Lmul_mulsi3:
+ movi a12, 0
+.Lmul_mult_loop:
+ add a15, a14, a12
+ extui a8, a13, 0, 1
+ movnez a12, a15, a8
+
+ do_addx2 a15, a14, a12, a15
+ extui a8, a13, 1, 1
+ movnez a12, a15, a8
+
+ do_addx4 a15, a14, a12, a15
+ extui a8, a13, 2, 1
+ movnez a12, a15, a8
+
+ do_addx8 a15, a14, a12, a15
+ extui a8, a13, 3, 1
+ movnez a12, a15, a8
+
+ srli a13, a13, 4
+ slli a14, a14, 4
+ bnez a13, .Lmul_mult_loop
+ ret
+#endif /* !MUL16 && !MUL32 && !MAC16 */
+
+ .size __umulsidi3,.-__umulsidi3
+
+#endif /* L_umulsidi3 */
+
+
/* Define a macro for the NSAU (unsigned normalize shift amount)
instruction, which computes the number of leading zero bits,
to handle cases where it is not included in the Xtensa processor
@@ -225,7 +392,7 @@ __mulsi3:
#endif /* !XCHAL_HAVE_NSA */
.endm
-#ifdef L_nsau
+#ifdef L_clz
.section .rodata
.align 4
.global __nsau_data
@@ -251,7 +418,54 @@ __nsau_data:
#endif /* !XCHAL_HAVE_NSA */
.size __nsau_data,.-__nsau_data
.hidden __nsau_data
-#endif /* L_nsau */
+#endif /* L_clz */
+
+
+#ifdef L_clzsi2
+ .align 4
+ .global __clzsi2
+ .type __clzsi2,@function
+__clzsi2:
+ leaf_entry sp, 16
+ do_nsau a2, a2, a3, a4
+ leaf_return
+ .size __clzsi2,.-__clzsi2
+
+#endif /* L_clzsi2 */
+
+
+#ifdef L_ctzsi2
+ .align 4
+ .global __ctzsi2
+ .type __ctzsi2,@function
+__ctzsi2:
+ leaf_entry sp, 16
+ neg a3, a2
+ and a3, a3, a2
+ do_nsau a2, a3, a4, a5
+ neg a2, a2
+ addi a2, a2, 31
+ leaf_return
+ .size __ctzsi2,.-__ctzsi2
+
+#endif /* L_ctzsi2 */
+
+
+#ifdef L_ffssi2
+ .align 4
+ .global __ffssi2
+ .type __ffssi2,@function
+__ffssi2:
+ leaf_entry sp, 16
+ neg a3, a2
+ and a3, a3, a2
+ do_nsau a2, a3, a4, a5
+ neg a2, a2
+ addi a2, a2, 32
+ leaf_return
+ .size __ffssi2,.-__ffssi2
+
+#endif /* L_ffssi2 */
#ifdef L_udivsi3