summaryrefslogtreecommitdiff
path: root/libc/sysdeps/powerpc/powerpc32
diff options
context:
space:
mode:
authorgcc <gcc@7b3dc134-2b1b-0410-93df-9e9f96275f8d>2006-08-17 01:18:26 +0000
committergcc <gcc@7b3dc134-2b1b-0410-93df-9e9f96275f8d>2006-08-17 01:18:26 +0000
commit15f34685e7a9b5caf761af2ebf6afa20438d440b (patch)
treedc04ce3cdf040f198743c15b64557824de174680 /libc/sysdeps/powerpc/powerpc32
parent1e848e0e775a36f6359161f5deb890942ef42ff3 (diff)
downloadeglibc2-15f34685e7a9b5caf761af2ebf6afa20438d440b.tar.gz
Import glibc-mainline for 2006-08-16
git-svn-id: svn://svn.eglibc.org/fsf/trunk@4 7b3dc134-2b1b-0410-93df-9e9f96275f8d
Diffstat (limited to 'libc/sysdeps/powerpc/powerpc32')
-rw-r--r--libc/sysdeps/powerpc/powerpc32/Implies1
-rw-r--r--libc/sysdeps/powerpc/powerpc32/Makefile44
-rw-r--r--libc/sysdeps/powerpc/powerpc32/Versions34
-rw-r--r--libc/sysdeps/powerpc/powerpc32/__longjmp-common.S63
-rw-r--r--libc/sysdeps/powerpc/powerpc32/__longjmp.S39
-rw-r--r--libc/sysdeps/powerpc/powerpc32/add_n.S77
-rw-r--r--libc/sysdeps/powerpc/powerpc32/addmul_1.S56
-rw-r--r--libc/sysdeps/powerpc/powerpc32/backtrace.c67
-rw-r--r--libc/sysdeps/powerpc/powerpc32/bits/atomic.h118
-rw-r--r--libc/sysdeps/powerpc/powerpc32/bits/wordsize.h8
-rw-r--r--libc/sysdeps/powerpc/powerpc32/bp-asm.h114
-rw-r--r--libc/sysdeps/powerpc/powerpc32/bsd-_setjmp.S59
-rw-r--r--libc/sysdeps/powerpc/powerpc32/bsd-setjmp.S41
-rw-r--r--libc/sysdeps/powerpc/powerpc32/configure62
-rw-r--r--libc/sysdeps/powerpc/powerpc32/configure.in32
-rw-r--r--libc/sysdeps/powerpc/powerpc32/dl-dtprocnum.h3
-rw-r--r--libc/sysdeps/powerpc/powerpc32/dl-machine.c620
-rw-r--r--libc/sysdeps/powerpc/powerpc32/dl-machine.h402
-rw-r--r--libc/sysdeps/powerpc/powerpc32/dl-start.S110
-rw-r--r--libc/sysdeps/powerpc/powerpc32/dl-trampoline.S186
-rw-r--r--libc/sysdeps/powerpc/powerpc32/elf/bzero.S37
-rwxr-xr-xlibc/sysdeps/powerpc/powerpc32/elf/configure52
-rw-r--r--libc/sysdeps/powerpc/powerpc32/elf/configure.in38
-rw-r--r--libc/sysdeps/powerpc/powerpc32/elf/start.S101
-rw-r--r--libc/sysdeps/powerpc/powerpc32/fpu/Makefile3
-rw-r--r--libc/sysdeps/powerpc/powerpc32/fpu/__longjmp-common.S164
-rw-r--r--libc/sysdeps/powerpc/powerpc32/fpu/__longjmp.S42
-rw-r--r--libc/sysdeps/powerpc/powerpc32/fpu/fprrest.S95
-rw-r--r--libc/sysdeps/powerpc/powerpc32/fpu/fprsave.S112
-rw-r--r--libc/sysdeps/powerpc/powerpc32/fpu/s_ceil.S83
-rw-r--r--libc/sysdeps/powerpc/powerpc32/fpu/s_ceilf.S75
-rw-r--r--libc/sysdeps/powerpc/powerpc32/fpu/s_copysign.S60
-rw-r--r--libc/sysdeps/powerpc/powerpc32/fpu/s_copysignf.S1
-rw-r--r--libc/sysdeps/powerpc/powerpc32/fpu/s_copysignl.S50
-rw-r--r--libc/sysdeps/powerpc/powerpc32/fpu/s_fabs.S5
-rw-r--r--libc/sysdeps/powerpc/powerpc32/fpu/s_fabsl.S36
-rw-r--r--libc/sysdeps/powerpc/powerpc32/fpu/s_fdim.c5
-rw-r--r--libc/sysdeps/powerpc/powerpc32/fpu/s_floor.S83
-rw-r--r--libc/sysdeps/powerpc/powerpc32/fpu/s_floorf.S75
-rw-r--r--libc/sysdeps/powerpc/powerpc32/fpu/s_fmax.S5
-rw-r--r--libc/sysdeps/powerpc/powerpc32/fpu/s_fmin.S5
-rw-r--r--libc/sysdeps/powerpc/powerpc32/fpu/s_isnan.c7
-rw-r--r--libc/sysdeps/powerpc/powerpc32/fpu/s_llrint.c35
-rw-r--r--libc/sysdeps/powerpc/powerpc32/fpu/s_llrintf.c27
-rw-r--r--libc/sysdeps/powerpc/powerpc32/fpu/s_lrint.S45
-rw-r--r--libc/sysdeps/powerpc/powerpc32/fpu/s_lround.S99
-rw-r--r--libc/sysdeps/powerpc/powerpc32/fpu/s_lroundf.S2
-rw-r--r--libc/sysdeps/powerpc/powerpc32/fpu/s_rint.S79
-rw-r--r--libc/sysdeps/powerpc/powerpc32/fpu/s_rintf.S68
-rw-r--r--libc/sysdeps/powerpc/powerpc32/fpu/s_round.S103
-rw-r--r--libc/sysdeps/powerpc/powerpc32/fpu/s_roundf.S95
-rw-r--r--libc/sysdeps/powerpc/powerpc32/fpu/s_trunc.S90
-rw-r--r--libc/sysdeps/powerpc/powerpc32/fpu/s_truncf.S82
-rw-r--r--libc/sysdeps/powerpc/powerpc32/fpu/setjmp-common.S184
-rw-r--r--libc/sysdeps/powerpc/powerpc32/fpu/setjmp.S45
-rw-r--r--libc/sysdeps/powerpc/powerpc32/gprrest0.S70
-rw-r--r--libc/sysdeps/powerpc/powerpc32/gprrest1.S64
-rw-r--r--libc/sysdeps/powerpc/powerpc32/gprsave0.S88
-rw-r--r--libc/sysdeps/powerpc/powerpc32/gprsave1.S64
-rw-r--r--libc/sysdeps/powerpc/powerpc32/hp-timing.h82
-rw-r--r--libc/sysdeps/powerpc/powerpc32/libgcc-compat.S144
-rw-r--r--libc/sysdeps/powerpc/powerpc32/lshift.S133
-rw-r--r--libc/sysdeps/powerpc/powerpc32/memset.S342
-rw-r--r--libc/sysdeps/powerpc/powerpc32/mul_1.S53
-rw-r--r--libc/sysdeps/powerpc/powerpc32/ppc-mcount.S81
-rw-r--r--libc/sysdeps/powerpc/powerpc32/register-dump.h121
-rw-r--r--libc/sysdeps/powerpc/powerpc32/rshift.S63
-rw-r--r--libc/sysdeps/powerpc/powerpc32/setjmp-common.S74
-rw-r--r--libc/sysdeps/powerpc/powerpc32/setjmp.S44
-rw-r--r--libc/sysdeps/powerpc/powerpc32/stpcpy.S122
-rw-r--r--libc/sysdeps/powerpc/powerpc32/strchr.S131
-rw-r--r--libc/sysdeps/powerpc/powerpc32/strcmp.S127
-rw-r--r--libc/sysdeps/powerpc/powerpc32/strcpy.S121
-rw-r--r--libc/sysdeps/powerpc/powerpc32/strlen.S160
-rw-r--r--libc/sysdeps/powerpc/powerpc32/strncmp.S161
-rw-r--r--libc/sysdeps/powerpc/powerpc32/sub_n.S78
-rw-r--r--libc/sysdeps/powerpc/powerpc32/submul_1.S59
-rw-r--r--libc/sysdeps/powerpc/powerpc32/sysdep.h153
78 files changed, 6759 insertions, 0 deletions
diff --git a/libc/sysdeps/powerpc/powerpc32/Implies b/libc/sysdeps/powerpc/powerpc32/Implies
new file mode 100644
index 000000000..39a34c5f5
--- /dev/null
+++ b/libc/sysdeps/powerpc/powerpc32/Implies
@@ -0,0 +1 @@
+wordsize-32
diff --git a/libc/sysdeps/powerpc/powerpc32/Makefile b/libc/sysdeps/powerpc/powerpc32/Makefile
new file mode 100644
index 000000000..1d58a063d
--- /dev/null
+++ b/libc/sysdeps/powerpc/powerpc32/Makefile
@@ -0,0 +1,44 @@
+# Powerpc32 specific build options.
+
+ifeq ($(with-fp),no)
++cflags += -msoft-float
+sysdep-LDFLAGS += -msoft-float
+endif
+
+ifeq ($(subdir),misc)
+sysdep_routines += gprsave0 gprrest0 gprsave1 gprrest1
+endif
+
+# On PPC, -fpic works until the GOT contains 32768 bytes, and possibly
+# more depending on how clever the linker is. Each GOT entry takes 4 bytes,
+# so that's at least 8192 entries. Since libc only uses about 2000 entries,
+# we want to use -fpic, because this generates fewer relocs.
+ifeq (yes,$(build-shared))
+pic-ccflag = -fpic
+endif
+
+ifeq ($(subdir),csu)
+ifneq ($(elf),no)
+# The initfini generation code doesn't work in the presence of -fPIC, so
+# we use -fpic instead which is much better.
+CFLAGS-initfini.s += -fpic -O1
+
+# There is no benefit to using sdata for these objects, and the user
+# of the library should be able to control what goes into sdata.
+CFLAGS-init.o = -G0
+CFLAGS-gmon-start.o = -G0
+endif
+ifeq (yes,$(build-shared))
+# Compatibility
+ifeq (yes,$(have-protected))
+CPPFLAGS-libgcc-compat.S = -DHAVE_DOT_HIDDEN
+endif
+sysdep_routines += libgcc-compat
+shared-only-routines += libgcc-compat
+endif
+endif
+
+ifeq ($(subdir),elf)
+# extra shared linker files to link only into dl-allobjs.so
+sysdep-rtld-routines += dl-start
+endif
diff --git a/libc/sysdeps/powerpc/powerpc32/Versions b/libc/sysdeps/powerpc/powerpc32/Versions
new file mode 100644
index 000000000..3635c4a4a
--- /dev/null
+++ b/libc/sysdeps/powerpc/powerpc32/Versions
@@ -0,0 +1,34 @@
+libc {
+ GLIBC_2.0 {
+ # Functions from libgcc.
+ __divdi3; __moddi3; __udivdi3; __umoddi3;
+ __cmpdi2; __ucmpdi2;
+ __ashldi3; __ashrdi3; __lshrdi3;
+ __fixdfdi; __fixunsdfdi;
+ __fixsfdi; __fixunssfdi;
+ __floatdidf; __floatdisf;
+ }
+}
+
+libm {
+ GLIBC_2.2 {
+ # Special functions to save and restore registers used by the
+ # runtime libraries.
+ _restgpr0_13; _restgpr0_14; _restgpr0_15; _restgpr0_16; _restgpr0_17;
+ _restgpr0_18; _restgpr0_19; _restgpr0_20; _restgpr0_21; _restgpr0_22;
+ _restgpr0_22; _restgpr0_23; _restgpr0_24; _restgpr0_25; _restgpr0_26;
+ _restgpr0_27; _restgpr0_28; _restgpr0_29; _restgpr0_30; _restgpr0_31;
+ _savegpr0_13; _savegpr0_14; _savegpr0_15; _savegpr0_16; _savegpr0_17;
+ _savegpr0_18; _savegpr0_19; _savegpr0_20; _savegpr0_21; _savegpr0_22;
+ _savegpr0_22; _savegpr0_23; _savegpr0_24; _savegpr0_25; _savegpr0_26;
+ _savegpr0_27; _savegpr0_28; _savegpr0_29; _savegpr0_30; _savegpr0_31;
+ _restgpr1_13; _restgpr1_14; _restgpr1_15; _restgpr1_16; _restgpr1_17;
+ _restgpr1_18; _restgpr1_19; _restgpr1_20; _restgpr1_21; _restgpr1_22;
+ _restgpr1_22; _restgpr1_23; _restgpr1_24; _restgpr1_25; _restgpr1_26;
+ _restgpr1_27; _restgpr1_28; _restgpr1_29; _restgpr1_30; _restgpr1_31;
+ _savegpr1_13; _savegpr1_14; _savegpr1_15; _savegpr1_16; _savegpr1_17;
+ _savegpr1_18; _savegpr1_19; _savegpr1_20; _savegpr1_21; _savegpr1_22;
+ _savegpr1_22; _savegpr1_23; _savegpr1_24; _savegpr1_25; _savegpr1_26;
+ _savegpr1_27; _savegpr1_28; _savegpr1_29; _savegpr1_30; _savegpr1_31;
+ }
+}
diff --git a/libc/sysdeps/powerpc/powerpc32/__longjmp-common.S b/libc/sysdeps/powerpc/powerpc32/__longjmp-common.S
new file mode 100644
index 000000000..411b6a20c
--- /dev/null
+++ b/libc/sysdeps/powerpc/powerpc32/__longjmp-common.S
@@ -0,0 +1,63 @@
+/* longjmp for PowerPC.
+ Copyright (C) 1995-1997, 1999-2001, 2003, 2004, 2005, 2006
+ Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <sysdep.h>
+#define _ASM
+#ifdef __NO_VMX__
+# include <novmxsetjmp.h>
+#else
+# include <jmpbuf-offsets.h>
+#endif
+#include <bp-sym.h>
+#include <bp-asm.h>
+
+ENTRY (BP_SYM (__longjmp))
+ CHECK_BOUNDS_BOTH_WIDE_LIT (r3, r8, r9, JB_SIZE)
+
+ lwz r1,(JB_GPR1*4)(r3)
+ lwz r0,(JB_LR*4)(r3)
+ lwz r14,((JB_GPRS+0)*4)(r3)
+ lwz r15,((JB_GPRS+1)*4)(r3)
+ lwz r16,((JB_GPRS+2)*4)(r3)
+ lwz r17,((JB_GPRS+3)*4)(r3)
+ lwz r18,((JB_GPRS+4)*4)(r3)
+ lwz r19,((JB_GPRS+5)*4)(r3)
+ lwz r20,((JB_GPRS+6)*4)(r3)
+#ifdef PTR_DEMANGLE
+ PTR_DEMANGLE (r0, r25)
+ PTR_DEMANGLE2 (r1, r25)
+#endif
+ mtlr r0
+ lwz r21,((JB_GPRS+7)*4)(r3)
+ lwz r22,((JB_GPRS+8)*4)(r3)
+ lwz r0,(JB_CR*4)(r3)
+ lwz r23,((JB_GPRS+9)*4)(r3)
+ lwz r24,((JB_GPRS+10)*4)(r3)
+ lwz r25,((JB_GPRS+11)*4)(r3)
+ mtcrf 0xFF,r0
+ lwz r26,((JB_GPRS+12)*4)(r3)
+ lwz r27,((JB_GPRS+13)*4)(r3)
+ lwz r28,((JB_GPRS+14)*4)(r3)
+ lwz r29,((JB_GPRS+15)*4)(r3)
+ lwz r30,((JB_GPRS+16)*4)(r3)
+ lwz r31,((JB_GPRS+17)*4)(r3)
+ mr r3,r4
+ blr
+END (BP_SYM (__longjmp))
diff --git a/libc/sysdeps/powerpc/powerpc32/__longjmp.S b/libc/sysdeps/powerpc/powerpc32/__longjmp.S
new file mode 100644
index 000000000..5a050f1e7
--- /dev/null
+++ b/libc/sysdeps/powerpc/powerpc32/__longjmp.S
@@ -0,0 +1,39 @@
+/* AltiVec/VMX (new) version of __longjmp for PowerPC.
+ Copyright (C) 1995-1997,1999,2000,2003,2004 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <libc-symbols.h>
+#include <shlib-compat.h>
+
+#if defined NOT_IN_libc
+/* Build a non-versioned object for rtld-*. */
+# include "__longjmp-common.S"
+
+#else /* !NOT_IN_libc */
+strong_alias (__vmx__longjmp, __longjmp);
+# define __longjmp __vmx__longjmp
+# include "__longjmp-common.S"
+
+# if defined SHARED && SHLIB_COMPAT (libc, GLIBC_2_0, GLIBC_2_3_4)
+# define __NO_VMX__
+# undef JB_SIZE
+# undef __longjmp
+# define __longjmp __novmx__longjmp
+# include "__longjmp-common.S"
+# endif
+#endif /* !NOT_IN_libc */
diff --git a/libc/sysdeps/powerpc/powerpc32/add_n.S b/libc/sysdeps/powerpc/powerpc32/add_n.S
new file mode 100644
index 000000000..89e1a30c1
--- /dev/null
+++ b/libc/sysdeps/powerpc/powerpc32/add_n.S
@@ -0,0 +1,77 @@
+/* Add two limb vectors of equal, non-zero length for PowerPC.
+ Copyright (C) 1997, 1999, 2000 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <sysdep.h>
+#include <bp-sym.h>
+#include <bp-asm.h>
+
+/* mp_limb_t mpn_add_n (mp_ptr res_ptr, mp_srcptr s1_ptr, mp_srcptr s2_ptr,
+ mp_size_t size)
+ Calculate s1+s2 and put result in res_ptr; return carry, 0 or 1. */
+
+/* Note on optimisation: This code is optimal for the 601. Almost every other
+ possible 2-unrolled inner loop will not be. Also, watch out for the
+ alignment... */
+
+EALIGN (BP_SYM (__mpn_add_n), 3, 0)
+
+#if __BOUNDED_POINTERS__
+ slwi r10,r6,2 /* convert limbs to bytes */
+ CHECK_BOUNDS_BOTH_WIDE (r3, r8, r9, r10)
+ CHECK_BOUNDS_BOTH_WIDE (r4, r8, r9, r10)
+ CHECK_BOUNDS_BOTH_WIDE (r5, r8, r9, r10)
+#endif
+/* Set up for loop below. */
+ mtcrf 0x01,r6
+ srwi. r7,r6,1
+ li r10,0
+ mtctr r7
+ bt 31,L(2)
+
+/* Clear the carry. */
+ addic r0,r0,0
+/* Adjust pointers for loop. */
+ addi r3,r3,-4
+ addi r4,r4,-4
+ addi r5,r5,-4
+ b L(0)
+
+L(2): lwz r7,0(r5)
+ lwz r6,0(r4)
+ addc r6,r6,r7
+ stw r6,0(r3)
+ beq L(1)
+
+/* The loop. */
+
+/* Align start of loop to an odd word boundary to guarantee that the
+ last two words can be fetched in one access (for 601). */
+L(0): lwz r9,4(r4)
+ lwz r8,4(r5)
+ lwzu r6,8(r4)
+ lwzu r7,8(r5)
+ adde r8,r9,r8
+ stw r8,4(r3)
+ adde r6,r6,r7
+ stwu r6,8(r3)
+ bdnz L(0)
+/* Return the carry. */
+L(1): addze r3,r10
+ blr
+END (BP_SYM (__mpn_add_n))
diff --git a/libc/sysdeps/powerpc/powerpc32/addmul_1.S b/libc/sysdeps/powerpc/powerpc32/addmul_1.S
new file mode 100644
index 000000000..98fad2b8e
--- /dev/null
+++ b/libc/sysdeps/powerpc/powerpc32/addmul_1.S
@@ -0,0 +1,56 @@
+/* Multiply a limb vector by a single limb, for PowerPC.
+ Copyright (C) 1993-1995, 1997, 1999, 2000 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <sysdep.h>
+#include <bp-sym.h>
+#include <bp-asm.h>
+
+/* mp_limb_t mpn_addmul_1 (mp_ptr res_ptr, mp_srcptr s1_ptr,
+ mp_size_t s1_size, mp_limb_t s2_limb)
+ Calculate res+s1*s2 and put result back in res; return carry. */
+ENTRY (BP_SYM (__mpn_addmul_1))
+#if __BOUNDED_POINTERS__
+ slwi r10,r5,2 /* convert limbs to bytes */
+ CHECK_BOUNDS_BOTH_WIDE (r3, r8, r9, r10)
+ CHECK_BOUNDS_BOTH_WIDE (r4, r8, r9, r10)
+#endif
+ mtctr r5
+
+ lwz r0,0(r4)
+ mullw r7,r0,r6
+ mulhwu r10,r0,r6
+ lwz r9,0(r3)
+ addc r8,r7,r9
+ addi r3,r3,-4 /* adjust res_ptr */
+ bdz L(1)
+
+L(0): lwzu r0,4(r4)
+ stwu r8,4(r3)
+ mullw r8,r0,r6
+ adde r7,r8,r10
+ mulhwu r10,r0,r6
+ lwz r9,4(r3)
+ addze r10,r10
+ addc r8,r7,r9
+ bdnz L(0)
+
+L(1): stw r8,4(r3)
+ addze r3,r10
+ blr
+END (BP_SYM (__mpn_addmul_1))
diff --git a/libc/sysdeps/powerpc/powerpc32/backtrace.c b/libc/sysdeps/powerpc/powerpc32/backtrace.c
new file mode 100644
index 000000000..e7e12544c
--- /dev/null
+++ b/libc/sysdeps/powerpc/powerpc32/backtrace.c
@@ -0,0 +1,67 @@
+/* Return backtrace of current program state.
+ Copyright (C) 1998, 2000, 2005 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <execinfo.h>
+#include <stddef.h>
+#include <bp-checks.h>
+
+/* This is the stack layout we see with every stack frame.
+ Note that every routine is required by the ABI to lay out the stack
+ like this.
+
+ +----------------+ +-----------------+
+ %r1 -> | %r1 last frame--------> | %r1 last frame--->... --> NULL
+ | | | |
+ | (unused) | | return address |
+ +----------------+ +-----------------+
+*/
+struct layout
+{
+ struct layout *__unbounded next;
+ void *__unbounded return_address;
+};
+
+int
+__backtrace (void **array, int size)
+{
+ struct layout *current;
+ int count;
+
+ /* Force gcc to spill LR. */
+ asm volatile ("" : "=l"(current));
+
+ /* Get the address on top-of-stack. */
+ asm volatile ("lwz %0,0(1)" : "=r"(current));
+ current = BOUNDED_1 (current);
+
+ for ( count = 0;
+ current != NULL && count < size;
+ current = BOUNDED_1 (current->next), count++)
+ array[count] = current->return_address;
+
+ /* It's possible the second-last stack frame can't return
+ (that is, it's __libc_start_main), in which case
+ the CRT startup code will have set its LR to 'NULL'. */
+ if (count > 0 && array[count-1] == NULL)
+ count--;
+
+ return count;
+}
+weak_alias (__backtrace, backtrace)
+libc_hidden_def (__backtrace)
diff --git a/libc/sysdeps/powerpc/powerpc32/bits/atomic.h b/libc/sysdeps/powerpc/powerpc32/bits/atomic.h
new file mode 100644
index 000000000..6fcc669fb
--- /dev/null
+++ b/libc/sysdeps/powerpc/powerpc32/bits/atomic.h
@@ -0,0 +1,118 @@
+/* Atomic operations. PowerPC32 version.
+ Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Paul Mackerras <paulus@au.ibm.com>, 2003.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+/*
+ * The 32-bit exchange_bool is different on powerpc64 because the subf
+ * does signed 64-bit arthmatic while the lwarx is 32-bit unsigned
+ * (a load word and zero (high 32) form). So powerpc64 has a slightly
+ * different version in sysdeps/powerpc/powerpc64/bits/atomic.h.
+ */
+# define __arch_compare_and_exchange_bool_32_acq(mem, newval, oldval) \
+({ \
+ unsigned int __tmp; \
+ __asm __volatile ( \
+ "1: lwarx %0,0,%1\n" \
+ " subf. %0,%2,%0\n" \
+ " bne 2f\n" \
+ " stwcx. %3,0,%1\n" \
+ " bne- 1b\n" \
+ "2: " __ARCH_ACQ_INSTR \
+ : "=&r" (__tmp) \
+ : "b" (mem), "r" (oldval), "r" (newval) \
+ : "cr0", "memory"); \
+ __tmp != 0; \
+})
+
+# define __arch_compare_and_exchange_bool_32_rel(mem, newval, oldval) \
+({ \
+ unsigned int __tmp; \
+ __asm __volatile (__ARCH_REL_INSTR "\n" \
+ "1: lwarx %0,0,%1\n" \
+ " subf. %0,%2,%0\n" \
+ " bne 2f\n" \
+ " stwcx. %3,0,%1\n" \
+ " bne- 1b\n" \
+ "2: " \
+ : "=&r" (__tmp) \
+ : "b" (mem), "r" (oldval), "r" (newval) \
+ : "cr0", "memory"); \
+ __tmp != 0; \
+})
+
+/* Powerpc32 processors don't implement the 64-bit (doubleword) forms of
+ load and reserve (ldarx) and store conditional (stdcx.) instructions.
+ So for powerpc32 we stub out the 64-bit forms. */
+# define __arch_compare_and_exchange_bool_64_acq(mem, newval, oldval) \
+ (abort (), 0)
+
+# define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \
+ (abort (), (__typeof (*mem)) 0)
+
+# define __arch_compare_and_exchange_bool_64_rel(mem, newval, oldval) \
+ (abort (), 0)
+
+# define __arch_compare_and_exchange_val_64_rel(mem, newval, oldval) \
+ (abort (), (__typeof (*mem)) 0)
+
+# define __arch_atomic_exchange_64_acq(mem, value) \
+ ({ abort (); (*mem) = (value); })
+
+# define __arch_atomic_exchange_64_rel(mem, value) \
+ ({ abort (); (*mem) = (value); })
+
+# define __arch_atomic_exchange_and_add_64(mem, value) \
+ ({ abort (); (*mem) = (value); })
+
+# define __arch_atomic_increment_val_64(mem) \
+ ({ abort (); (*mem)++; })
+
+# define __arch_atomic_decrement_val_64(mem) \
+ ({ abort (); (*mem)--; })
+
+# define __arch_atomic_decrement_if_positive_64(mem) \
+ ({ abort (); (*mem)--; })
+
+#ifdef _ARCH_PWR4
+/*
+ * Newer powerpc64 processors support the new "light weight" sync (lwsync)
+ * So if the build is using -mcpu=[power4,power5,power5+,970] we can
+ * safely use lwsync.
+ */
+# define atomic_read_barrier() __asm ("lwsync" ::: "memory")
+/*
+ * "light weight" sync can also be used for the release barrier.
+ */
+# ifndef UP
+# define __ARCH_REL_INSTR "lwsync"
+# endif
+#else
+/*
+ * Older powerpc32 processors don't support the new "light weight"
+ * sync (lwsync). So the only safe option is to use normal sync
+ * for all powerpc32 applications.
+ */
+# define atomic_read_barrier() __asm ("sync" ::: "memory")
+#endif
+
+/*
+ * Include the rest of the atomic ops macros which are common to both
+ * powerpc32 and powerpc64.
+ */
+#include_next <bits/atomic.h>
diff --git a/libc/sysdeps/powerpc/powerpc32/bits/wordsize.h b/libc/sysdeps/powerpc/powerpc32/bits/wordsize.h
new file mode 100644
index 000000000..1a79c8636
--- /dev/null
+++ b/libc/sysdeps/powerpc/powerpc32/bits/wordsize.h
@@ -0,0 +1,8 @@
+/* Determine the wordsize from the preprocessor defines. */
+
+#if defined __powerpc64__
+# define __WORDSIZE 64
+# define __WORDSIZE_COMPAT32 1
+#else
+# define __WORDSIZE 32
+#endif
diff --git a/libc/sysdeps/powerpc/powerpc32/bp-asm.h b/libc/sysdeps/powerpc/powerpc32/bp-asm.h
new file mode 100644
index 000000000..b3bbba757
--- /dev/null
+++ b/libc/sysdeps/powerpc/powerpc32/bp-asm.h
@@ -0,0 +1,114 @@
+/* Bounded-pointer definitions for PowerPC assembler.
+ Copyright (C) 2000 Free Software Foundation, Inc.
+ Contributed by Greg McGary <greg@mcgary.org>
+ This file is part of the GNU C Library. Its master source is NOT part of
+ the C library, however. The master source lives in the GNU MP Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#if __BOUNDED_POINTERS__
+
+/* Byte offsets of BP components. */
+# define oVALUE 0
+# define oLOW 4
+# define oHIGH 8
+
+/* Don't check bounds, just convert the BP register to its simple
+ pointer value. */
+
+# define DISCARD_BOUNDS(rBP) \
+ lwz rBP, oVALUE(rBP)
+
+/* Check low bound, with the side effect that the BP register is converted
+ its simple pointer value. Move the high bound into a register for
+ later use. */
+
+# define CHECK_BOUNDS_LOW(rBP, rLOW, rHIGH) \
+ lwz rHIGH, oHIGH(rBP); \
+ lwz rLOW, oLOW(rBP); \
+ lwz rBP, oVALUE(rBP); \
+ twllt rBP, rLOW
+
+/* Check the high bound, which is in a register, using the given
+ conditional trap instruction. */
+
+# define CHECK_BOUNDS_HIGH(rVALUE, rHIGH, TWLcc) \
+ TWLcc rVALUE, rHIGH
+
+/* Check the high bound, which is stored at the return-value's high
+ bound slot, using the given conditional trap instruction. */
+
+# define CHECK_BOUNDS_HIGH_RTN(rVALUE, rHIGH, TWLcc) \
+ lwz rHIGH, oHIGH(rRTN); \
+ TWLcc rVALUE, rHIGH
+
+/* Check both bounds, with the side effect that the BP register is
+ converted to its simple pointer value. */
+
+# define CHECK_BOUNDS_BOTH(rBP, rLOW, rHIGH) \
+ CHECK_BOUNDS_LOW(rBP, rLOW, rHIGH); \
+ twlge rBP, rHIGH
+
+/* Check bounds on a memory region of given length, with the side
+ effect that the BP register is converted to its simple pointer
+ value. */
+
+# define CHECK_BOUNDS_BOTH_WIDE(rBP, rLOW, rHIGH, rLENGTH) \
+ CHECK_BOUNDS_LOW (rBP, rLOW, rHIGH); \
+ sub rHIGH, rHIGH, rLENGTH; \
+ twlgt rBP, rHIGH
+
+# define CHECK_BOUNDS_BOTH_WIDE_LIT(rBP, rLOW, rHIGH, LENGTH) \
+ CHECK_BOUNDS_LOW (rBP, rLOW, rHIGH); \
+ subi rHIGH, rHIGH, LENGTH; \
+ twlgt rBP, rHIGH
+
+/* Store a pointer value register into the return-value's pointer
+ value slot. */
+
+# define STORE_RETURN_VALUE(rVALUE) \
+ stw rVALUE, oVALUE(rRTN)
+
+/* Store a low and high bounds into the return-value's pointer bounds
+ slots. */
+
+# define STORE_RETURN_BOUNDS(rLOW, rHIGH) \
+ stw rLOW, oLOW(rRTN); \
+ stw rHIGH, oHIGH(rRTN)
+
+/* Stuff zero value/low/high into the BP addressed by rRTN. */
+
+# define RETURN_NULL_BOUNDED_POINTER \
+ li r4, 0; \
+ STORE_RETURN_VALUE (r4); \
+ STORE_RETURN_BOUNDS (r4, r4)
+
+#else
+
+# define DISCARD_BOUNDS(rBP)
+# define CHECK_BOUNDS_LOW(rBP, rLOW, rHIGH)
+# define CHECK_BOUNDS_HIGH(rVALUE, rHIGH, TWLcc)
+# define CHECK_BOUNDS_HIGH_RTN(rVALUE, rHIGH, TWLcc)
+# define CHECK_BOUNDS_BOTH(rBP, rLOW, rHIGH)
+# define CHECK_BOUNDS_BOTH_WIDE(rBP, rLOW, rHIGH, rLENGTH)
+# define CHECK_BOUNDS_BOTH_WIDE_LIT(rBP, rLOW, rHIGH, LENGTH)
+# define STORE_RETURN_VALUE(rVALUE)
+# define STORE_RETURN_BOUNDS(rLOW, rHIGH)
+
+# define RETURN_NULL_BOUNDED_POINTER li rRTN, 0
+
+#endif
+
diff --git a/libc/sysdeps/powerpc/powerpc32/bsd-_setjmp.S b/libc/sysdeps/powerpc/powerpc32/bsd-_setjmp.S
new file mode 100644
index 000000000..4c28c2e54
--- /dev/null
+++ b/libc/sysdeps/powerpc/powerpc32/bsd-_setjmp.S
@@ -0,0 +1,59 @@
+/* BSD `_setjmp' entry point to `sigsetjmp (..., 0)'. PowerPC32/64 version.
+ Copyright (C) 1994,1997,1999,2000,2002,2003,2004
+ Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+#include <shlib-compat.h>
+#include <libc-symbols.h>
+#include <sysdep.h>
+#include <bp-sym.h>
+
+#if defined NOT_IN_libc
+/* Build a non-versioned object for rtld-*. */
+ENTRY (BP_SYM (_setjmp))
+ li r4,0 /* Set second argument to 0. */
+ b BP_SYM (__sigsetjmp@local)
+END (BP_SYM (_setjmp))
+libc_hidden_def (_setjmp)
+#else
+/* Build a versioned object for libc. */
+
+# if defined SHARED && SHLIB_COMPAT (libc, GLIBC_2_0, GLIBC_2_3_4)
+symbol_version (__novmx_setjmp,_setjmp,GLIBC_2.0);
+
+ENTRY (BP_SYM (__novmx_setjmp))
+ li r4,0 /* Set second argument to 0. */
+ b BP_SYM (__novmx__sigsetjmp@local)
+END (BP_SYM (__novmx_setjmp))
+libc_hidden_def (__novmx_setjmp)
+# endif /* defined SHARED && SHLIB_COMPAT (libc, GLIBC_2_0, GLIBC_2_3_4) */
+
+default_symbol_version (__vmx_setjmp,_setjmp,GLIBC_2.3.4)
+/* __GI__setjmp prototype is needed for ntpl i.e. _setjmp is defined
+ as a libc_hidden_proto & is used in sysdeps/generic/libc-start.c
+ if HAVE_CLEANUP_JMP_BUF is defined */
+ENTRY (BP_SYM (__GI__setjmp))
+ li r4,0 /* Set second argument to 0. */
+ b BP_SYM (__vmx__sigsetjmp@local)
+END (BP_SYM (__GI__setjmp))
+
+ENTRY (BP_SYM (__vmx_setjmp))
+ li r4,0 /* Set second argument to 0. */
+ b BP_SYM (__vmx__sigsetjmp@local)
+END (BP_SYM (__vmx_setjmp))
+libc_hidden_def (__vmx_setjmp)
+#endif /* !NOT_IN_libc */
diff --git a/libc/sysdeps/powerpc/powerpc32/bsd-setjmp.S b/libc/sysdeps/powerpc/powerpc32/bsd-setjmp.S
new file mode 100644
index 000000000..01b195d83
--- /dev/null
+++ b/libc/sysdeps/powerpc/powerpc32/bsd-setjmp.S
@@ -0,0 +1,41 @@
+/* BSD `setjmp' entry point to `sigsetjmp (..., 1)'. PowerPC32/64 version.
+ Copyright (C) 1994,1997,1999,2000,2003,2004 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+#include <shlib-compat.h>
+#include <libc-symbols.h>
+#include <sysdep.h>
+#include <bp-sym.h>
+
+#if defined SHARED && SHLIB_COMPAT (libc, GLIBC_2_0, GLIBC_2_3_4)
+
+ENTRY (__novmxsetjmp)
+ li r4,1 /* Set second argument to 1. */
+ b __novmx__sigsetjmp@local
+END (__novmxsetjmp)
+strong_alias (__novmxsetjmp, __novmx__setjmp)
+symbol_version (__novmxsetjmp, setjmp, GLIBC_2.0)
+
+#endif /* defined SHARED && SHLIB_COMPAT (libc, GLIBC_2_0, GLIBC_2_3_4) ) */
+
+ENTRY (__vmxsetjmp)
+ li r4,1 /* Set second argument to 1. */
+ b __vmx__sigsetjmp@local
+END (__vmxsetjmp)
+strong_alias (__vmxsetjmp, __vmx__setjmp)
+strong_alias (__vmx__setjmp, __setjmp)
+default_symbol_version (__vmxsetjmp,setjmp,GLIBC_2.3.4)
diff --git a/libc/sysdeps/powerpc/powerpc32/configure b/libc/sysdeps/powerpc/powerpc32/configure
new file mode 100644
index 000000000..0ff56c936
--- /dev/null
+++ b/libc/sysdeps/powerpc/powerpc32/configure
@@ -0,0 +1,62 @@
+# This file is generated from configure.in by Autoconf. DO NOT EDIT!
+ # Local configure fragment for sysdeps/powerpc/powerpc32.
+
+# See whether gas has R_PPC_REL16 relocs.
+echo "$as_me:$LINENO: checking for R_PPC_REL16 gas support" >&5
+echo $ECHO_N "checking for R_PPC_REL16 gas support... $ECHO_C" >&6
+if test "${libc_cv_ppc_rel16+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ cat > conftest.s <<\EOF
+ .text
+ addis 11,30,_GLOBAL_OFFSET_TABLE_-.@ha
+EOF
+if { ac_try='${CC-cc} -c $CFLAGS conftest.s 1>&5'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ libc_cv_ppc_rel16=yes
+else
+ libc_cv_ppc_rel16=no
+fi
+rm -f conftest*
+fi
+echo "$as_me:$LINENO: result: $libc_cv_ppc_rel16" >&5
+echo "${ECHO_T}$libc_cv_ppc_rel16" >&6
+if test $libc_cv_ppc_rel16 = yes; then
+ cat >>confdefs.h <<\_ACEOF
+#define HAVE_ASM_PPC_REL16 1
+_ACEOF
+
+fi
+
+# See whether GCC uses -msecure-plt.
+echo "$as_me:$LINENO: checking for -msecure-plt by default" >&5
+echo $ECHO_N "checking for -msecure-plt by default... $ECHO_C" >&6
+if test "${libc_cv_ppc_secure_plt+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ echo 'int foo (void) { extern int bar; return bar; }' > conftest.c
+libc_cv_ppc_secure_plt=no
+if { ac_try='${CC-cc} -S $CFLAGS conftest.c -fpic -o conftest.s 1>&5'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ if grep '_GLOBAL_OFFSET_TABLE_-.*@ha' conftest.s > /dev/null 2>&1; then
+ libc_cv_ppc_secure_plt=yes
+ fi
+fi
+rm -rf conftest*
+fi
+echo "$as_me:$LINENO: result: $libc_cv_ppc_secure_plt" >&5
+echo "${ECHO_T}$libc_cv_ppc_secure_plt" >&6
+if test $libc_cv_ppc_secure_plt = yes; then
+ cat >>confdefs.h <<\_ACEOF
+#define HAVE_PPC_SECURE_PLT 1
+_ACEOF
+
+fi
diff --git a/libc/sysdeps/powerpc/powerpc32/configure.in b/libc/sysdeps/powerpc/powerpc32/configure.in
new file mode 100644
index 000000000..7219ad993
--- /dev/null
+++ b/libc/sysdeps/powerpc/powerpc32/configure.in
@@ -0,0 +1,32 @@
+GLIBC_PROVIDES dnl See aclocal.m4 in the top level source directory.
+# Local configure fragment for sysdeps/powerpc/powerpc32.
+
+# See whether gas has R_PPC_REL16 relocs.
+AC_CACHE_CHECK(for R_PPC_REL16 gas support, libc_cv_ppc_rel16, [dnl
+cat > conftest.s <<\EOF
+ .text
+ addis 11,30,_GLOBAL_OFFSET_TABLE_-.@ha
+EOF
+if AC_TRY_COMMAND(${CC-cc} -c $CFLAGS conftest.s 1>&AS_MESSAGE_LOG_FD); then
+ libc_cv_ppc_rel16=yes
+else
+ libc_cv_ppc_rel16=no
+fi
+rm -f conftest*])
+if test $libc_cv_ppc_rel16 = yes; then
+ AC_DEFINE(HAVE_ASM_PPC_REL16)
+fi
+
+# See whether GCC uses -msecure-plt.
+AC_CACHE_CHECK(for -msecure-plt by default, libc_cv_ppc_secure_plt, [dnl
+echo 'int foo (void) { extern int bar; return bar; }' > conftest.c
+libc_cv_ppc_secure_plt=no
+if AC_TRY_COMMAND(${CC-cc} -S $CFLAGS conftest.c -fpic -o conftest.s 1>&AS_MESSAGE_LOG_FD); then
+ if grep '_GLOBAL_OFFSET_TABLE_-.*@ha' conftest.s > /dev/null 2>&1; then
+ libc_cv_ppc_secure_plt=yes
+ fi
+fi
+rm -rf conftest*])
+if test $libc_cv_ppc_secure_plt = yes; then
+ AC_DEFINE(HAVE_PPC_SECURE_PLT)
+fi
diff --git a/libc/sysdeps/powerpc/powerpc32/dl-dtprocnum.h b/libc/sysdeps/powerpc/powerpc32/dl-dtprocnum.h
new file mode 100644
index 000000000..7fe2be793
--- /dev/null
+++ b/libc/sysdeps/powerpc/powerpc32/dl-dtprocnum.h
@@ -0,0 +1,3 @@
+/* Number of extra dynamic section entries for this architecture. By
+ default there are none. */
+#define DT_THISPROCNUM DT_PPC_NUM
diff --git a/libc/sysdeps/powerpc/powerpc32/dl-machine.c b/libc/sysdeps/powerpc/powerpc32/dl-machine.c
new file mode 100644
index 000000000..4120a0238
--- /dev/null
+++ b/libc/sysdeps/powerpc/powerpc32/dl-machine.c
@@ -0,0 +1,620 @@
+/* Machine-dependent ELF dynamic relocation functions. PowerPC version.
+ Copyright (C) 1995-2003, 2004, 2005 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <unistd.h>
+#include <string.h>
+#include <sys/param.h>
+#include <link.h>
+#include <ldsodefs.h>
+#include <elf/dynamic-link.h>
+#include <dl-machine.h>
+#include <stdio-common/_itoa.h>
+
+/* The value __cache_line_size is defined in memset.S and is initialised
+ by _dl_sysdep_start via DL_PLATFORM_INIT. */
+extern int __cache_line_size;
+weak_extern (__cache_line_size)
+
+/* Because ld.so is now versioned, these functions can be in their own file;
+ no relocations need to be done to call them.
+ Of course, if ld.so is not versioned... */
+#if defined SHARED && !(DO_VERSIONING - 0)
+#error This will not work with versioning turned off, sorry.
+#endif
+
+
+/* Stuff for the PLT. */
+#define PLT_INITIAL_ENTRY_WORDS 18
+#define PLT_LONGBRANCH_ENTRY_WORDS 0
+#define PLT_TRAMPOLINE_ENTRY_WORDS 6
+#define PLT_DOUBLE_SIZE (1<<13)
+#define PLT_ENTRY_START_WORDS(entry_number) \
+ (PLT_INITIAL_ENTRY_WORDS + (entry_number)*2 \
+ + ((entry_number) > PLT_DOUBLE_SIZE \
+ ? ((entry_number) - PLT_DOUBLE_SIZE)*2 \
+ : 0))
+#define PLT_DATA_START_WORDS(num_entries) PLT_ENTRY_START_WORDS(num_entries)
+
+/* Macros to build PowerPC opcode words. */
+#define OPCODE_ADDI(rd,ra,simm) \
+ (0x38000000 | (rd) << 21 | (ra) << 16 | ((simm) & 0xffff))
+#define OPCODE_ADDIS(rd,ra,simm) \
+ (0x3c000000 | (rd) << 21 | (ra) << 16 | ((simm) & 0xffff))
+#define OPCODE_ADD(rd,ra,rb) \
+ (0x7c000214 | (rd) << 21 | (ra) << 16 | (rb) << 11)
+#define OPCODE_B(target) (0x48000000 | ((target) & 0x03fffffc))
+#define OPCODE_BA(target) (0x48000002 | ((target) & 0x03fffffc))
+#define OPCODE_BCTR() 0x4e800420
+#define OPCODE_LWZ(rd,d,ra) \
+ (0x80000000 | (rd) << 21 | (ra) << 16 | ((d) & 0xffff))
+#define OPCODE_LWZU(rd,d,ra) \
+ (0x84000000 | (rd) << 21 | (ra) << 16 | ((d) & 0xffff))
+#define OPCODE_MTCTR(rd) (0x7C0903A6 | (rd) << 21)
+#define OPCODE_RLWINM(ra,rs,sh,mb,me) \
+ (0x54000000 | (rs) << 21 | (ra) << 16 | (sh) << 11 | (mb) << 6 | (me) << 1)
+
+#define OPCODE_LI(rd,simm) OPCODE_ADDI(rd,0,simm)
+#define OPCODE_ADDIS_HI(rd,ra,value) \
+ OPCODE_ADDIS(rd,ra,((value) + 0x8000) >> 16)
+#define OPCODE_LIS_HI(rd,value) OPCODE_ADDIS_HI(rd,0,value)
+#define OPCODE_SLWI(ra,rs,sh) OPCODE_RLWINM(ra,rs,sh,0,31-sh)
+
+
+#define PPC_DCBST(where) asm volatile ("dcbst 0,%0" : : "r"(where) : "memory")
+#define PPC_SYNC asm volatile ("sync" : : : "memory")
+#define PPC_ISYNC asm volatile ("sync; isync" : : : "memory")
+#define PPC_ICBI(where) asm volatile ("icbi 0,%0" : : "r"(where) : "memory")
+#define PPC_DIE asm volatile ("tweq 0,0")
+
+/* Use this when you've modified some code, but it won't be in the
+ instruction fetch queue (or when it doesn't matter if it is). */
+#define MODIFIED_CODE_NOQUEUE(where) \
+ do { PPC_DCBST(where); PPC_SYNC; PPC_ICBI(where); } while (0)
+/* Use this when it might be in the instruction queue. */
+#define MODIFIED_CODE(where) \
+ do { PPC_DCBST(where); PPC_SYNC; PPC_ICBI(where); PPC_ISYNC; } while (0)
+
+
+/* The idea here is that to conform to the ABI, we are supposed to try
+ to load dynamic objects between 0x10000 (we actually use 0x40000 as
+ the lower bound, to increase the chance of a memory reference from
+ a null pointer giving a segfault) and the program's load address;
+ this may allow us to use a branch instruction in the PLT rather
+ than a computed jump. The address is only used as a preference for
+ mmap, so if we get it wrong the worst that happens is that it gets
+ mapped somewhere else. */
+
+ElfW(Addr)
+__elf_preferred_address (struct link_map *loader, size_t maplength,
+ ElfW(Addr) mapstartpref)
+{
+ ElfW(Addr) low, high;
+ struct link_map *l;
+ Lmid_t nsid;
+
+ /* If the object has a preference, load it there! */
+ if (mapstartpref != 0)
+ return mapstartpref;
+
+ /* Otherwise, quickly look for a suitable gap between 0x3FFFF and
+ 0x70000000. 0x3FFFF is so that references off NULL pointers will
+ cause a segfault, 0x70000000 is just paranoia (it should always
+ be superceded by the program's load address). */
+ low = 0x0003FFFF;
+ high = 0x70000000;
+ for (nsid = 0; nsid < DL_NNS; ++nsid)
+ for (l = GL(dl_ns)[nsid]._ns_loaded; l; l = l->l_next)
+ {
+ ElfW(Addr) mapstart, mapend;
+ mapstart = l->l_map_start & ~(GLRO(dl_pagesize) - 1);
+ mapend = l->l_map_end | (GLRO(dl_pagesize) - 1);
+ assert (mapend > mapstart);
+
+ /* Prefer gaps below the main executable, note that l ==
+ _dl_loaded does not work for static binaries loading
+ e.g. libnss_*.so. */
+ if ((mapend >= high || l->l_type == lt_executable)
+ && high >= mapstart)
+ high = mapstart;
+ else if (mapend >= low && low >= mapstart)
+ low = mapend;
+ else if (high >= mapend && mapstart >= low)
+ {
+ if (high - mapend >= mapstart - low)
+ low = mapend;
+ else
+ high = mapstart;
+ }
+ }
+
+ high -= 0x10000; /* Allow some room between objects. */
+ maplength = (maplength | (GLRO(dl_pagesize) - 1)) + 1;
+ if (high <= low || high - low < maplength )
+ return 0;
+ return high - maplength; /* Both high and maplength are page-aligned. */
+}
+
+/* Set up the loaded object described by L so its unrelocated PLT
+ entries will jump to the on-demand fixup code in dl-runtime.c.
+ Also install a small trampoline to be used by entries that have
+ been relocated to an address too far away for a single branch. */
+
+/* There are many kinds of PLT entries:
+
+ (1) A direct jump to the actual routine, either a relative or
+ absolute branch. These are set up in __elf_machine_fixup_plt.
+
+ (2) Short lazy entries. These cover the first 8192 slots in
+ the PLT, and look like (where 'index' goes from 0 to 8191):
+
+ li %r11, index*4
+ b &plt[PLT_TRAMPOLINE_ENTRY_WORDS+1]
+
+ (3) Short indirect jumps. These replace (2) when a direct jump
+ wouldn't reach. They look the same except that the branch
+ is 'b &plt[PLT_LONGBRANCH_ENTRY_WORDS]'.
+
+ (4) Long lazy entries. These cover the slots when a short entry
+ won't fit ('index*4' overflows its field), and look like:
+
+ lis %r11, %hi(index*4 + &plt[PLT_DATA_START_WORDS])
+ lwzu %r12, %r11, %lo(index*4 + &plt[PLT_DATA_START_WORDS])
+ b &plt[PLT_TRAMPOLINE_ENTRY_WORDS]
+ bctr
+
+ (5) Long indirect jumps. These replace (4) when a direct jump
+ wouldn't reach. They look like:
+
+ lis %r11, %hi(index*4 + &plt[PLT_DATA_START_WORDS])
+ lwz %r12, %r11, %lo(index*4 + &plt[PLT_DATA_START_WORDS])
+ mtctr %r12
+ bctr
+
+ (6) Long direct jumps. These are used when thread-safety is not
+ required. They look like:
+
+ lis %r12, %hi(finaladdr)
+ addi %r12, %r12, %lo(finaladdr)
+ mtctr %r12
+ bctr
+
+
+ The lazy entries, (2) and (4), are set up here in
+ __elf_machine_runtime_setup. (1), (3), and (5) are set up in
+ __elf_machine_fixup_plt. (1), (3), and (6) can also be constructed
+ in __process_machine_rela.
+
+ The reason for the somewhat strange construction of the long
+ entries, (4) and (5), is that we need to ensure thread-safety. For
+ (1) and (3), this is obvious because only one instruction is
+ changed and the PPC architecture guarantees that aligned stores are
+ atomic. For (5), this is more tricky. When changing (4) to (5),
+ the `b' instruction is first changed to to `mtctr'; this is safe
+ and is why the `lwzu' instruction is not just a simple `addi'.
+ Once this is done, and is visible to all processors, the `lwzu' can
+ safely be changed to a `lwz'. */
+int
+__elf_machine_runtime_setup (struct link_map *map, int lazy, int profile)
+{
+ if (map->l_info[DT_JMPREL])
+ {
+ Elf32_Word i;
+ Elf32_Word *plt = (Elf32_Word *) D_PTR (map, l_info[DT_PLTGOT]);
+ Elf32_Word num_plt_entries = (map->l_info[DT_PLTRELSZ]->d_un.d_val
+ / sizeof (Elf32_Rela));
+ Elf32_Word rel_offset_words = PLT_DATA_START_WORDS (num_plt_entries);
+ Elf32_Word data_words = (Elf32_Word) (plt + rel_offset_words);
+ Elf32_Word size_modified;
+
+ extern void _dl_runtime_resolve (void);
+ extern void _dl_prof_resolve (void);
+
+ /* Convert the index in r11 into an actual address, and get the
+ word at that address. */
+ plt[PLT_LONGBRANCH_ENTRY_WORDS] = OPCODE_ADDIS_HI (11, 11, data_words);
+ plt[PLT_LONGBRANCH_ENTRY_WORDS + 1] = OPCODE_LWZ (11, data_words, 11);
+
+ /* Call the procedure at that address. */
+ plt[PLT_LONGBRANCH_ENTRY_WORDS + 2] = OPCODE_MTCTR (11);
+ plt[PLT_LONGBRANCH_ENTRY_WORDS + 3] = OPCODE_BCTR ();
+
+ if (lazy)
+ {
+ Elf32_Word *tramp = plt + PLT_TRAMPOLINE_ENTRY_WORDS;
+ Elf32_Word dlrr = (Elf32_Word)(profile
+ ? _dl_prof_resolve
+ : _dl_runtime_resolve);
+ Elf32_Word offset;
+
+ if (profile && GLRO(dl_profile) != NULL
+ && _dl_name_match_p (GLRO(dl_profile), map))
+ /* This is the object we are looking for. Say that we really
+ want profiling and the timers are started. */
+ GL(dl_profile_map) = map;
+
+ /* For the long entries, subtract off data_words. */
+ tramp[0] = OPCODE_ADDIS_HI (11, 11, -data_words);
+ tramp[1] = OPCODE_ADDI (11, 11, -data_words);
+
+ /* Multiply index of entry by 3 (in r11). */
+ tramp[2] = OPCODE_SLWI (12, 11, 1);
+ tramp[3] = OPCODE_ADD (11, 12, 11);
+ if (dlrr <= 0x01fffffc || dlrr >= 0xfe000000)
+ {
+ /* Load address of link map in r12. */
+ tramp[4] = OPCODE_LI (12, (Elf32_Word) map);
+ tramp[5] = OPCODE_ADDIS_HI (12, 12, (Elf32_Word) map);
+
+ /* Call _dl_runtime_resolve. */
+ tramp[6] = OPCODE_BA (dlrr);
+ }
+ else
+ {
+ /* Get address of _dl_runtime_resolve in CTR. */
+ tramp[4] = OPCODE_LI (12, dlrr);
+ tramp[5] = OPCODE_ADDIS_HI (12, 12, dlrr);
+ tramp[6] = OPCODE_MTCTR (12);
+
+ /* Load address of link map in r12. */
+ tramp[7] = OPCODE_LI (12, (Elf32_Word) map);
+ tramp[8] = OPCODE_ADDIS_HI (12, 12, (Elf32_Word) map);
+
+ /* Call _dl_runtime_resolve. */
+ tramp[9] = OPCODE_BCTR ();
+ }
+
+ /* Set up the lazy PLT entries. */
+ offset = PLT_INITIAL_ENTRY_WORDS;
+ i = 0;
+ while (i < num_plt_entries && i < PLT_DOUBLE_SIZE)
+ {
+ plt[offset ] = OPCODE_LI (11, i * 4);
+ plt[offset+1] = OPCODE_B ((PLT_TRAMPOLINE_ENTRY_WORDS + 2
+ - (offset+1))
+ * 4);
+ i++;
+ offset += 2;
+ }
+ while (i < num_plt_entries)
+ {
+ plt[offset ] = OPCODE_LIS_HI (11, i * 4 + data_words);
+ plt[offset+1] = OPCODE_LWZU (12, i * 4 + data_words, 11);
+ plt[offset+2] = OPCODE_B ((PLT_TRAMPOLINE_ENTRY_WORDS
+ - (offset+2))
+ * 4);
+ plt[offset+3] = OPCODE_BCTR ();
+ i++;
+ offset += 4;
+ }
+ }
+
+ /* Now, we've modified code. We need to write the changes from
+ the data cache to a second-level unified cache, then make
+ sure that stale data in the instruction cache is removed.
+ (In a multiprocessor system, the effect is more complex.)
+ Most of the PLT shouldn't be in the instruction cache, but
+ there may be a little overlap at the start and the end.
+
+ Assumes that dcbst and icbi apply to lines of 16 bytes or
+ more. Current known line sizes are 16, 32, and 128 bytes.
+ The following gets the __cache_line_size, when available. */
+
+ /* Default minimum 4 words per cache line. */
+ int line_size_words = 4;
+
+ /* Don't try this until ld.so has relocated itself! */
+ int *line_size_ptr = &__cache_line_size;
+ if (lazy && line_size_ptr != NULL)
+ {
+ /* Verify that __cache_line_size is defined and set. */
+ if (*line_size_ptr != 0)
+ /* Convert bytes to words. */
+ line_size_words = *line_size_ptr / 4;
+ }
+
+ size_modified = lazy ? rel_offset_words : 6;
+ for (i = 0; i < size_modified; i += line_size_words)
+ PPC_DCBST (plt + i);
+ PPC_DCBST (plt + size_modified - 1);
+ PPC_SYNC;
+
+ for (i = 0; i < size_modified; i += line_size_words)
+ PPC_ICBI (plt + i);
+ PPC_ICBI (plt + size_modified - 1);
+ PPC_ISYNC;
+ }
+
+ return lazy;
+}
+
+Elf32_Addr
+__elf_machine_fixup_plt (struct link_map *map, const Elf32_Rela *reloc,
+ Elf32_Addr *reloc_addr, Elf32_Addr finaladdr)
+{
+ Elf32_Sword delta = finaladdr - (Elf32_Word) reloc_addr;
+ if (delta << 6 >> 6 == delta)
+ *reloc_addr = OPCODE_B (delta);
+ else if (finaladdr <= 0x01fffffc || finaladdr >= 0xfe000000)
+ *reloc_addr = OPCODE_BA (finaladdr);
+ else
+ {
+ Elf32_Word *plt, *data_words;
+ Elf32_Word index, offset, num_plt_entries;
+
+ num_plt_entries = (map->l_info[DT_PLTRELSZ]->d_un.d_val
+ / sizeof(Elf32_Rela));
+ plt = (Elf32_Word *) D_PTR (map, l_info[DT_PLTGOT]);
+ offset = reloc_addr - plt;
+ index = (offset - PLT_INITIAL_ENTRY_WORDS)/2;
+ data_words = plt + PLT_DATA_START_WORDS (num_plt_entries);
+
+ reloc_addr += 1;
+
+ if (index < PLT_DOUBLE_SIZE)
+ {
+ data_words[index] = finaladdr;
+ PPC_SYNC;
+ *reloc_addr = OPCODE_B ((PLT_LONGBRANCH_ENTRY_WORDS - (offset+1))
+ * 4);
+ }
+ else
+ {
+ index -= (index - PLT_DOUBLE_SIZE)/2;
+
+ data_words[index] = finaladdr;
+ PPC_SYNC;
+
+ reloc_addr[1] = OPCODE_MTCTR (12);
+ MODIFIED_CODE_NOQUEUE (reloc_addr + 1);
+ PPC_SYNC;
+
+ reloc_addr[0] = OPCODE_LWZ (12,
+ (Elf32_Word) (data_words + index), 11);
+ }
+ }
+ MODIFIED_CODE (reloc_addr);
+ return finaladdr;
+}
+
+void
+_dl_reloc_overflow (struct link_map *map,
+ const char *name,
+ Elf32_Addr *const reloc_addr,
+ const Elf32_Sym *refsym)
+{
+ char buffer[128];
+ char *t;
+ t = stpcpy (buffer, name);
+ t = stpcpy (t, " relocation at 0x00000000");
+ _itoa_word ((unsigned) reloc_addr, t, 16, 0);
+ if (refsym)
+ {
+ const char *strtab;
+
+ strtab = (const void *) D_PTR (map, l_info[DT_STRTAB]);
+ t = stpcpy (t, " for symbol `");
+ t = stpcpy (t, strtab + refsym->st_name);
+ t = stpcpy (t, "'");
+ }
+ t = stpcpy (t, " out of range");
+ _dl_signal_error (0, map->l_name, NULL, buffer);
+}
+
+void
+__process_machine_rela (struct link_map *map,
+ const Elf32_Rela *reloc,
+ struct link_map *sym_map,
+ const Elf32_Sym *sym,
+ const Elf32_Sym *refsym,
+ Elf32_Addr *const reloc_addr,
+ Elf32_Addr const finaladdr,
+ int rinfo)
+{
+ switch (rinfo)
+ {
+ case R_PPC_NONE:
+ return;
+
+ case R_PPC_ADDR32:
+ case R_PPC_GLOB_DAT:
+ case R_PPC_RELATIVE:
+ *reloc_addr = finaladdr;
+ return;
+
+ case R_PPC_UADDR32:
+ ((char *) reloc_addr)[0] = finaladdr >> 24;
+ ((char *) reloc_addr)[1] = finaladdr >> 16;
+ ((char *) reloc_addr)[2] = finaladdr >> 8;
+ ((char *) reloc_addr)[3] = finaladdr;
+ break;
+
+ case R_PPC_ADDR24:
+ if (__builtin_expect (finaladdr > 0x01fffffc && finaladdr < 0xfe000000, 0))
+ _dl_reloc_overflow (map, "R_PPC_ADDR24", reloc_addr, refsym);
+ *reloc_addr = (*reloc_addr & 0xfc000003) | (finaladdr & 0x3fffffc);
+ break;
+
+ case R_PPC_ADDR16:
+ if (__builtin_expect (finaladdr > 0x7fff && finaladdr < 0xffff8000, 0))
+ _dl_reloc_overflow (map, "R_PPC_ADDR16", reloc_addr, refsym);
+ *(Elf32_Half*) reloc_addr = finaladdr;
+ break;
+
+ case R_PPC_UADDR16:
+ if (__builtin_expect (finaladdr > 0x7fff && finaladdr < 0xffff8000, 0))
+ _dl_reloc_overflow (map, "R_PPC_UADDR16", reloc_addr, refsym);
+ ((char *) reloc_addr)[0] = finaladdr >> 8;
+ ((char *) reloc_addr)[1] = finaladdr;
+ break;
+
+ case R_PPC_ADDR16_LO:
+ *(Elf32_Half*) reloc_addr = finaladdr;
+ break;
+
+ case R_PPC_ADDR16_HI:
+ *(Elf32_Half*) reloc_addr = finaladdr >> 16;
+ break;
+
+ case R_PPC_ADDR16_HA:
+ *(Elf32_Half*) reloc_addr = (finaladdr + 0x8000) >> 16;
+ break;
+
+ case R_PPC_ADDR14:
+ case R_PPC_ADDR14_BRTAKEN:
+ case R_PPC_ADDR14_BRNTAKEN:
+ if (__builtin_expect (finaladdr > 0x7fff && finaladdr < 0xffff8000, 0))
+ _dl_reloc_overflow (map, "R_PPC_ADDR14", reloc_addr, refsym);
+ *reloc_addr = (*reloc_addr & 0xffff0003) | (finaladdr & 0xfffc);
+ if (rinfo != R_PPC_ADDR14)
+ *reloc_addr = ((*reloc_addr & 0xffdfffff)
+ | ((rinfo == R_PPC_ADDR14_BRTAKEN)
+ ^ (finaladdr >> 31)) << 21);
+ break;
+
+ case R_PPC_REL24:
+ {
+ Elf32_Sword delta = finaladdr - (Elf32_Word) reloc_addr;
+ if (delta << 6 >> 6 != delta)
+ _dl_reloc_overflow (map, "R_PPC_REL24", reloc_addr, refsym);
+ *reloc_addr = (*reloc_addr & 0xfc000003) | (delta & 0x3fffffc);
+ }
+ break;
+
+ case R_PPC_COPY:
+ if (sym == NULL)
+ /* This can happen in trace mode when an object could not be
+ found. */
+ return;
+ if (sym->st_size > refsym->st_size
+ || (GLRO(dl_verbose) && sym->st_size < refsym->st_size))
+ {
+ const char *strtab;
+
+ strtab = (const void *) D_PTR (map, l_info[DT_STRTAB]);
+ _dl_error_printf ("\
+%s: Symbol `%s' has different size in shared object, onsider re-linking\n",
+ rtld_progname ?: "<program name unknown>",
+ strtab + refsym->st_name);
+ }
+ memcpy (reloc_addr, (char *) finaladdr, MIN (sym->st_size,
+ refsym->st_size));
+ return;
+
+ case R_PPC_REL32:
+ *reloc_addr = finaladdr - (Elf32_Word) reloc_addr;
+ return;
+
+ case R_PPC_JMP_SLOT:
+ /* It used to be that elf_machine_fixup_plt was used here,
+ but that doesn't work when ld.so relocates itself
+ for the second time. On the bright side, there's
+ no need to worry about thread-safety here. */
+ {
+ Elf32_Sword delta = finaladdr - (Elf32_Word) reloc_addr;
+ if (delta << 6 >> 6 == delta)
+ *reloc_addr = OPCODE_B (delta);
+ else if (finaladdr <= 0x01fffffc || finaladdr >= 0xfe000000)
+ *reloc_addr = OPCODE_BA (finaladdr);
+ else
+ {
+ Elf32_Word *plt, *data_words;
+ Elf32_Word index, offset, num_plt_entries;
+
+ plt = (Elf32_Word *) D_PTR (map, l_info[DT_PLTGOT]);
+ offset = reloc_addr - plt;
+
+ if (offset < PLT_DOUBLE_SIZE*2 + PLT_INITIAL_ENTRY_WORDS)
+ {
+ index = (offset - PLT_INITIAL_ENTRY_WORDS)/2;
+ num_plt_entries = (map->l_info[DT_PLTRELSZ]->d_un.d_val
+ / sizeof(Elf32_Rela));
+ data_words = plt + PLT_DATA_START_WORDS (num_plt_entries);
+ data_words[index] = finaladdr;
+ reloc_addr[0] = OPCODE_LI (11, index * 4);
+ reloc_addr[1] = OPCODE_B ((PLT_LONGBRANCH_ENTRY_WORDS
+ - (offset+1))
+ * 4);
+ MODIFIED_CODE_NOQUEUE (reloc_addr + 1);
+ }
+ else
+ {
+ reloc_addr[0] = OPCODE_LIS_HI (12, finaladdr);
+ reloc_addr[1] = OPCODE_ADDI (12, 12, finaladdr);
+ reloc_addr[2] = OPCODE_MTCTR (12);
+ reloc_addr[3] = OPCODE_BCTR ();
+ MODIFIED_CODE_NOQUEUE (reloc_addr + 3);
+ }
+ }
+ }
+ break;
+
+#ifdef USE_TLS
+#define CHECK_STATIC_TLS(map, sym_map) \
+ do { \
+ if (__builtin_expect ((sym_map)->l_tls_offset == NO_TLS_OFFSET, 0)) \
+ _dl_allocate_static_tls (sym_map); \
+ } while (0)
+# define DO_TLS_RELOC(suffix) \
+ case R_PPC_DTPREL##suffix: \
+ /* During relocation all TLS symbols are defined and used. \
+ Therefore the offset is already correct. */ \
+ if (sym_map != NULL) \
+ do_reloc##suffix ("R_PPC_DTPREL"#suffix, \
+ TLS_DTPREL_VALUE (sym, reloc)); \
+ break; \
+ case R_PPC_TPREL##suffix: \
+ if (sym_map != NULL) \
+ { \
+ CHECK_STATIC_TLS (map, sym_map); \
+ do_reloc##suffix ("R_PPC_TPREL"#suffix, \
+ TLS_TPREL_VALUE (sym_map, sym, reloc)); \
+ } \
+ break;
+
+ inline void do_reloc16 (const char *r_name, Elf32_Addr value)
+ {
+ if (__builtin_expect (value > 0x7fff && value < 0xffff8000, 0))
+ _dl_reloc_overflow (map, r_name, reloc_addr, refsym);
+ *(Elf32_Half *) reloc_addr = value;
+ }
+ inline void do_reloc16_LO (const char *r_name, Elf32_Addr value)
+ {
+ *(Elf32_Half *) reloc_addr = value;
+ }
+ inline void do_reloc16_HI (const char *r_name, Elf32_Addr value)
+ {
+ *(Elf32_Half *) reloc_addr = value >> 16;
+ }
+ inline void do_reloc16_HA (const char *r_name, Elf32_Addr value)
+ {
+ *(Elf32_Half *) reloc_addr = (value + 0x8000) >> 16;
+ }
+ DO_TLS_RELOC (16)
+ DO_TLS_RELOC (16_LO)
+ DO_TLS_RELOC (16_HI)
+ DO_TLS_RELOC (16_HA)
+#endif
+
+ default:
+ _dl_reloc_bad_type (map, rinfo, 0);
+ return;
+ }
+
+ MODIFIED_CODE_NOQUEUE (reloc_addr);
+}
diff --git a/libc/sysdeps/powerpc/powerpc32/dl-machine.h b/libc/sysdeps/powerpc/powerpc32/dl-machine.h
new file mode 100644
index 000000000..496fa71ec
--- /dev/null
+++ b/libc/sysdeps/powerpc/powerpc32/dl-machine.h
@@ -0,0 +1,402 @@
+/* Machine-dependent ELF dynamic relocation inline functions. PowerPC version.
+ Copyright (C) 1995-2002, 2003, 2005 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#ifndef dl_machine_h
+#define dl_machine_h
+
+#define ELF_MACHINE_NAME "powerpc"
+
+#include <assert.h>
+#include <dl-tls.h>
+
+/* Translate a processor specific dynamic tag to the index
+ in l_info array. */
+#define DT_PPC(x) (DT_PPC_##x - DT_LOPROC + DT_NUM)
+
+/* Return nonzero iff ELF header is compatible with the running host. */
+static inline int
+elf_machine_matches_host (const Elf32_Ehdr *ehdr)
+{
+ return ehdr->e_machine == EM_PPC;
+}
+
+/* Return the value of the GOT pointer. */
+static inline Elf32_Addr * __attribute__ ((const))
+ppc_got (void)
+{
+ Elf32_Addr *got;
+#ifdef HAVE_ASM_PPC_REL16
+ asm ("bcl 20,31,1f\n"
+ "1: mflr %0\n"
+ " addis %0,%0,_GLOBAL_OFFSET_TABLE_-1b@ha\n"
+ " addi %0,%0,_GLOBAL_OFFSET_TABLE_-1b@l\n"
+ : "=b" (got) : : "lr");
+#else
+ asm (" bl _GLOBAL_OFFSET_TABLE_-4@local"
+ : "=l" (got));
+#endif
+ return got;
+}
+
+/* Return the link-time address of _DYNAMIC, stored as
+ the first value in the GOT. */
+static inline Elf32_Addr __attribute__ ((const))
+elf_machine_dynamic (void)
+{
+ return *ppc_got ();
+}
+
+/* Return the run-time load address of the shared object. */
+static inline Elf32_Addr __attribute__ ((const))
+elf_machine_load_address (void)
+{
+ Elf32_Addr *branchaddr;
+ Elf32_Addr runtime_dynamic;
+
+ /* This is much harder than you'd expect. Possibly I'm missing something.
+ The 'obvious' way:
+
+ Apparently, "bcl 20,31,$+4" is what should be used to load LR
+ with the address of the next instruction.
+ I think this is so that machines that do bl/blr pairing don't
+ get confused.
+
+ asm ("bcl 20,31,0f ;"
+ "0: mflr 0 ;"
+ "lis %0,0b@ha;"
+ "addi %0,%0,0b@l;"
+ "subf %0,%0,0"
+ : "=b" (addr) : : "r0", "lr");
+
+ doesn't work, because the linker doesn't have to (and in fact doesn't)
+ update the @ha and @l references; the loader (which runs after this
+ code) will do that.
+
+ Instead, we use the following trick:
+
+ The linker puts the _link-time_ address of _DYNAMIC at the first
+ word in the GOT. We could branch to that address, if we wanted,
+ by using an @local reloc; the linker works this out, so it's safe
+ to use now. We can't, of course, actually branch there, because
+ we'd cause an illegal instruction exception; so we need to compute
+ the address ourselves. That gives us the following code: */
+
+ /* Get address of the 'b _DYNAMIC@local'... */
+ asm ("bcl 20,31,0f;"
+ "b _DYNAMIC@local;"
+ "0:"
+ : "=l" (branchaddr));
+
+ /* So now work out the difference between where the branch actually points,
+ and the offset of that location in memory from the start of the file. */
+ runtime_dynamic = ((Elf32_Addr) branchaddr
+ + ((Elf32_Sword) (*branchaddr << 6 & 0xffffff00) >> 6));
+
+ return runtime_dynamic - elf_machine_dynamic ();
+}
+
+#define ELF_MACHINE_BEFORE_RTLD_RELOC(dynamic_info) /* nothing */
+
+/* The PLT uses Elf32_Rela relocs. */
+#define elf_machine_relplt elf_machine_rela
+
+/* Mask identifying addresses reserved for the user program,
+ where the dynamic linker should not map anything. */
+#define ELF_MACHINE_USER_ADDRESS_MASK 0xf0000000UL
+
+/* The actual _start code is in dl-start.S. Use a really
+ ugly bit of assembler to let dl-start.o see _dl_start. */
+#define RTLD_START asm (".globl _dl_start");
+
+/* Decide where a relocatable object should be loaded. */
+extern ElfW(Addr)
+__elf_preferred_address(struct link_map *loader, size_t maplength,
+ ElfW(Addr) mapstartpref);
+#define ELF_PREFERRED_ADDRESS(loader, maplength, mapstartpref) \
+ __elf_preferred_address (loader, maplength, mapstartpref)
+
+/* ELF_RTYPE_CLASS_PLT iff TYPE describes relocation of a PLT entry, so
+ PLT entries should not be allowed to define the value.
+ ELF_RTYPE_CLASS_NOCOPY iff TYPE should not be allowed to resolve to one
+ of the main executable's symbols, as for a COPY reloc. */
+/* We never want to use a PLT entry as the destination of a
+ reloc, when what is being relocated is a branch. This is
+ partly for efficiency, but mostly so we avoid loops. */
+#if defined USE_TLS && (!defined RTLD_BOOTSTRAP || USE___THREAD)
+#define elf_machine_type_class(type) \
+ ((((type) == R_PPC_JMP_SLOT \
+ || (type) == R_PPC_REL24 \
+ || ((type) >= R_PPC_DTPMOD32 /* contiguous TLS */ \
+ && (type) <= R_PPC_DTPREL32) \
+ || (type) == R_PPC_ADDR24) * ELF_RTYPE_CLASS_PLT) \
+ | (((type) == R_PPC_COPY) * ELF_RTYPE_CLASS_COPY))
+#else
+#define elf_machine_type_class(type) \
+ ((((type) == R_PPC_JMP_SLOT \
+ || (type) == R_PPC_REL24 \
+ || (type) == R_PPC_ADDR24) * ELF_RTYPE_CLASS_PLT) \
+ | (((type) == R_PPC_COPY) * ELF_RTYPE_CLASS_COPY))
+#endif
+
+/* A reloc type used for ld.so cmdline arg lookups to reject PLT entries. */
+#define ELF_MACHINE_JMP_SLOT R_PPC_JMP_SLOT
+
+/* The PowerPC never uses REL relocations. */
+#define ELF_MACHINE_NO_REL 1
+
+/* Set up the loaded object described by MAP so its unrelocated PLT
+ entries will jump to the on-demand fixup code in dl-runtime.c.
+ Also install a small trampoline to be used by entries that have
+ been relocated to an address too far away for a single branch. */
+extern int __elf_machine_runtime_setup (struct link_map *map,
+ int lazy, int profile);
+
+static inline int
+elf_machine_runtime_setup (struct link_map *map,
+ int lazy, int profile)
+{
+ if (map->l_info[DT_JMPREL] == 0)
+ return lazy;
+
+ if (map->l_info[DT_PPC(GOT)] == 0)
+ /* Handle old style PLT. */
+ return __elf_machine_runtime_setup (map, lazy, profile);
+
+ /* New style non-exec PLT consisting of an array of addresses. */
+ map->l_info[DT_PPC(GOT)]->d_un.d_ptr += map->l_addr;
+ if (lazy)
+ {
+ Elf32_Addr *plt, *got, glink;
+ Elf32_Word num_plt_entries;
+ void (*dlrr) (void);
+ extern void _dl_runtime_resolve (void);
+ extern void _dl_prof_resolve (void);
+
+ if (__builtin_expect (!profile, 1))
+ dlrr = _dl_runtime_resolve;
+ else
+ {
+ if (GLRO(dl_profile) != NULL
+ &&_dl_name_match_p (GLRO(dl_profile), map))
+ GL(dl_profile_map) = map;
+ dlrr = _dl_prof_resolve;
+ }
+ got = (Elf32_Addr *) map->l_info[DT_PPC(GOT)]->d_un.d_ptr;
+ glink = got[1];
+ got[1] = (Elf32_Addr) dlrr;
+ got[2] = (Elf32_Addr) map;
+
+ /* Relocate everything in .plt by the load address offset. */
+ plt = (Elf32_Addr *) D_PTR (map, l_info[DT_PLTGOT]);
+ num_plt_entries = (map->l_info[DT_PLTRELSZ]->d_un.d_val
+ / sizeof (Elf32_Rela));
+
+ /* If a library is prelinked but we have to relocate anyway,
+ we have to be able to undo the prelinking of .plt section.
+ The prelinker saved us at got[1] address of .glink
+ section's start. */
+ if (glink)
+ {
+ glink += map->l_addr;
+ while (num_plt_entries-- != 0)
+ *plt++ = glink, glink += 4;
+ }
+ else
+ while (num_plt_entries-- != 0)
+ *plt++ += map->l_addr;
+ }
+ return lazy;
+}
+
+/* Change the PLT entry whose reloc is 'reloc' to call the actual routine. */
+extern Elf32_Addr __elf_machine_fixup_plt (struct link_map *map,
+ const Elf32_Rela *reloc,
+ Elf32_Addr *reloc_addr,
+ Elf32_Addr finaladdr);
+
+static inline Elf32_Addr
+elf_machine_fixup_plt (struct link_map *map, lookup_t t,
+ const Elf32_Rela *reloc,
+ Elf32_Addr *reloc_addr, Elf64_Addr finaladdr)
+{
+ if (map->l_info[DT_PPC(GOT)] == 0)
+ /* Handle old style PLT. */
+ return __elf_machine_fixup_plt (map, reloc, reloc_addr, finaladdr);
+
+ *reloc_addr = finaladdr;
+ return finaladdr;
+}
+
+/* Return the final value of a plt relocation. */
+static inline Elf32_Addr
+elf_machine_plt_value (struct link_map *map, const Elf32_Rela *reloc,
+ Elf32_Addr value)
+{
+ return value + reloc->r_addend;
+}
+
+
+/* Names of the architecture-specific auditing callback functions. */
+#define ARCH_LA_PLTENTER ppc32_gnu_pltenter
+#define ARCH_LA_PLTEXIT ppc32_gnu_pltexit
+
+#endif /* dl_machine_h */
+
+#ifdef RESOLVE_MAP
+
+/* Do the actual processing of a reloc, once its target address
+ has been determined. */
+extern void __process_machine_rela (struct link_map *map,
+ const Elf32_Rela *reloc,
+ struct link_map *sym_map,
+ const Elf32_Sym *sym,
+ const Elf32_Sym *refsym,
+ Elf32_Addr *const reloc_addr,
+ Elf32_Addr finaladdr,
+ int rinfo) attribute_hidden;
+
+/* Call _dl_signal_error when a resolved value overflows a relocated area. */
+extern void _dl_reloc_overflow (struct link_map *map,
+ const char *name,
+ Elf32_Addr *const reloc_addr,
+ const Elf32_Sym *refsym) attribute_hidden;
+
+/* Perform the relocation specified by RELOC and SYM (which is fully resolved).
+ LOADADDR is the load address of the object; INFO is an array indexed
+ by DT_* of the .dynamic section info. */
+
+auto inline void __attribute__ ((always_inline))
+elf_machine_rela (struct link_map *map, const Elf32_Rela *reloc,
+ const Elf32_Sym *sym, const struct r_found_version *version,
+ void *const reloc_addr_arg)
+{
+ Elf32_Addr *const reloc_addr = reloc_addr_arg;
+ const Elf32_Sym *const refsym = sym;
+ Elf32_Addr value;
+ const int r_type = ELF32_R_TYPE (reloc->r_info);
+ struct link_map *sym_map = NULL;
+
+#ifndef RESOLVE_CONFLICT_FIND_MAP
+ if (r_type == R_PPC_RELATIVE)
+ {
+ *reloc_addr = map->l_addr + reloc->r_addend;
+ return;
+ }
+
+ if (__builtin_expect (r_type == R_PPC_NONE, 0))
+ return;
+
+ /* binutils on ppc32 includes st_value in r_addend for relocations
+ against local symbols. */
+ if (__builtin_expect (ELF32_ST_BIND (sym->st_info) == STB_LOCAL, 0)
+ && sym->st_shndx != SHN_UNDEF)
+ value = map->l_addr;
+ else
+ {
+ sym_map = RESOLVE_MAP (&sym, version, r_type);
+ value = sym_map == NULL ? 0 : sym_map->l_addr + sym->st_value;
+ }
+ value += reloc->r_addend;
+#else
+ value = reloc->r_addend;
+#endif
+
+ /* A small amount of code is duplicated here for speed. In libc,
+ more than 90% of the relocs are R_PPC_RELATIVE; in the X11 shared
+ libraries, 60% are R_PPC_RELATIVE, 24% are R_PPC_GLOB_DAT or
+ R_PPC_ADDR32, and 16% are R_PPC_JMP_SLOT (which this routine
+ wouldn't usually handle). As an bonus, doing this here allows
+ the switch statement in __process_machine_rela to work. */
+ switch (r_type)
+ {
+ case R_PPC_GLOB_DAT:
+ case R_PPC_ADDR32:
+ *reloc_addr = value;
+ break;
+
+#if defined USE_TLS && (!defined RTLD_BOOTSTRAP || USE___THREAD) \
+ && !defined RESOLVE_CONFLICT_FIND_MAP
+# ifdef RTLD_BOOTSTRAP
+# define NOT_BOOTSTRAP 0
+# else
+# define NOT_BOOTSTRAP 1
+# endif
+
+ case R_PPC_DTPMOD32:
+ if (!NOT_BOOTSTRAP)
+ /* During startup the dynamic linker is always index 1. */
+ *reloc_addr = 1;
+ else if (sym_map != NULL)
+ /* Get the information from the link map returned by the
+ RESOLVE_MAP function. */
+ *reloc_addr = sym_map->l_tls_modid;
+ break;
+ case R_PPC_DTPREL32:
+ /* During relocation all TLS symbols are defined and used.
+ Therefore the offset is already correct. */
+ if (NOT_BOOTSTRAP && sym_map != NULL)
+ *reloc_addr = TLS_DTPREL_VALUE (sym, reloc);
+ break;
+ case R_PPC_TPREL32:
+ if (!NOT_BOOTSTRAP || sym_map != NULL)
+ {
+ if (NOT_BOOTSTRAP)
+ CHECK_STATIC_TLS (map, sym_map);
+ *reloc_addr = TLS_TPREL_VALUE (sym_map, sym, reloc);
+ }
+ break;
+#endif /* USE_TLS etc. */
+
+ case R_PPC_JMP_SLOT:
+#ifdef RESOLVE_CONFLICT_FIND_MAP
+ RESOLVE_CONFLICT_FIND_MAP (map, reloc_addr);
+#endif
+ if (map->l_info[DT_PPC(GOT)] != 0)
+ {
+ *reloc_addr = value;
+ break;
+ }
+ /* FALLTHROUGH */
+
+ default:
+ __process_machine_rela (map, reloc, sym_map, sym, refsym,
+ reloc_addr, value, r_type);
+ }
+}
+
+auto inline void __attribute__ ((always_inline))
+elf_machine_rela_relative (Elf32_Addr l_addr, const Elf32_Rela *reloc,
+ void *const reloc_addr_arg)
+{
+ Elf32_Addr *const reloc_addr = reloc_addr_arg;
+ *reloc_addr = l_addr + reloc->r_addend;
+}
+
+auto inline void __attribute__ ((always_inline))
+elf_machine_lazy_rel (struct link_map *map,
+ Elf32_Addr l_addr, const Elf32_Rela *reloc)
+{
+ /* elf_machine_runtime_setup handles this. */
+}
+
+/* The SVR4 ABI specifies that the JMPREL relocs must be inside the
+ DT_RELA table. */
+#define ELF_MACHINE_PLTREL_OVERLAP 1
+
+#endif /* RESOLVE_MAP */
diff --git a/libc/sysdeps/powerpc/powerpc32/dl-start.S b/libc/sysdeps/powerpc/powerpc32/dl-start.S
new file mode 100644
index 000000000..c77c4de19
--- /dev/null
+++ b/libc/sysdeps/powerpc/powerpc32/dl-start.S
@@ -0,0 +1,110 @@
+/* Machine-dependent ELF startup code. PowerPC version.
+ Copyright (C) 1995-2000, 2002, 2004, 2005, 2006 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA
+ 02110-1301 USA. */
+
+#include <sysdep.h>
+
+/* Initial entry point code for the dynamic linker.
+ The C function `_dl_start' is the real entry point;
+ its return value is the user program's entry point. */
+ENTRY(_start)
+/* We start with the following on the stack, from top:
+ argc (4 bytes);
+ arguments for program (terminated by NULL);
+ environment variables (terminated by NULL);
+ arguments for the program loader. */
+
+/* Call _dl_start with one parameter pointing at argc */
+ mr r3,r1
+/* (we have to frob the stack pointer a bit to allow room for
+ _dl_start to save the link register). */
+ li r4,0
+ addi r1,r1,-16
+ stw r4,0(r1)
+ bl _dl_start@local
+
+ /* FALLTHRU */
+_dl_start_user:
+/* Now, we do our main work of calling initialisation procedures.
+ The ELF ABI doesn't say anything about parameters for these,
+ so we just pass argc, argv, and the environment.
+ Changing these is strongly discouraged (not least because argc is
+ passed by value!). */
+
+/* Put our GOT pointer in r31, */
+#ifdef HAVE_ASM_PPC_REL16
+ bcl 20,31,1f
+1: mflr r31
+ addis r31,r31,_GLOBAL_OFFSET_TABLE_-1b@ha
+ addi r31,r31,_GLOBAL_OFFSET_TABLE_-1b@l
+#else
+ bl _GLOBAL_OFFSET_TABLE_-4@local
+ mflr r31
+#endif
+/* the address of _start in r30, */
+ mr r30,r3
+/* &_dl_argc in 29, &_dl_argv in 27, and _dl_loaded in 28. */
+ lwz r28,_rtld_global@got(r31)
+ lwz r29,_dl_argc@got(r31)
+ lwz r27,_dl_argv@got(r31)
+
+/* Call _dl_init (_dl_loaded, _dl_argc, _dl_argv, _dl_argv+_dl_argc+1). */
+ lwz r3,0(r28)
+ lwz r4,0(r29)
+ lwz r5,0(r27)
+ slwi r6,r4,2
+ add r6,r5,r6
+ addi r6,r6,4
+ bl _dl_init_internal@local
+
+/* Now, to conform to the ELF ABI, we have to: */
+/* Pass argc (actually _dl_argc) in r3; */
+ lwz r3,0(r29)
+/* pass argv (actually _dl_argv) in r4; */
+ lwz r4,0(r27)
+/* pass envp (actually _dl_argv+_dl_argc+1) in r5; */
+ slwi r5,r3,2
+ add r6,r4,r5
+ addi r5,r6,4
+/* pass the auxilary vector in r6. This is passed to us just after _envp. */
+2: lwzu r0,4(r6)
+ cmpwi r0,0
+ bne 2b
+ addi r6,r6,4
+/* Pass a termination function pointer (in this case _dl_fini) in r7. */
+ lwz r7,_dl_fini@got(r31)
+/* Now, call the start function in r30... */
+ mtctr r30
+/* Pass the stack pointer in r1 (so far so good), pointing to a NULL value.
+ (This lets our startup code distinguish between a program linked statically,
+ which linux will call with argc on top of the stack which will hopefully
+ never be zero, and a dynamically linked program which will always have
+ a NULL on the top of the stack).
+ Take the opportunity to clear LR, so anyone who accidentally returns
+ from _start gets SEGV. Also clear the next few words of the stack. */
+
+_dl_main_dispatch:
+ li r31,0
+ stw r31,0(r1)
+ mtlr r31
+ stw r31,4(r1)
+ stw r31,8(r1)
+ stw r31,12(r1)
+/* Go do it! */
+ bctr
+END(_start)
diff --git a/libc/sysdeps/powerpc/powerpc32/dl-trampoline.S b/libc/sysdeps/powerpc/powerpc32/dl-trampoline.S
new file mode 100644
index 000000000..6a158c3ff
--- /dev/null
+++ b/libc/sysdeps/powerpc/powerpc32/dl-trampoline.S
@@ -0,0 +1,186 @@
+/* PLT trampolines. PPC32 version.
+ Copyright (C) 2005, 2006 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA
+ 02110-1301 USA. */
+
+#include <sysdep.h>
+
+ .section ".text"
+ .align 2
+ .globl _dl_runtime_resolve
+ .type _dl_runtime_resolve,@function
+_dl_runtime_resolve:
+ cfi_startproc
+ # We need to save the registers used to pass parameters, and register 0,
+ # which is used by _mcount; the registers are saved in a stack frame.
+ stwu r1,-64(r1)
+ cfi_adjust_cfa_offset (64)
+ stw r0,12(r1)
+ stw r3,16(r1)
+ stw r4,20(r1)
+ # The code that calls this has put parameters for `fixup' in r12 and r11.
+ mr r3,r12
+ stw r5,24(r1)
+ mr r4,r11
+ stw r6,28(r1)
+ mflr r0
+ # We also need to save some of the condition register fields
+ stw r7,32(r1)
+ # Don't clobber the caller's LRSAVE, it is needed by _mcount.
+ stw r0,48(r1)
+ cfi_offset (lr, -16)
+ stw r8,36(r1)
+ mfcr r0
+ stw r9,40(r1)
+ stw r10,44(r1)
+ stw r0,8(r1)
+ bl _dl_fixup@local
+ # 'fixup' returns the address we want to branch to.
+ mtctr r3
+ # Put the registers back...
+ lwz r0,48(r1)
+ lwz r10,44(r1)
+ lwz r9,40(r1)
+ mtlr r0
+ lwz r8,36(r1)
+ lwz r0,8(r1)
+ lwz r7,32(r1)
+ lwz r6,28(r1)
+ mtcrf 0xFF,r0
+ lwz r5,24(r1)
+ lwz r4,20(r1)
+ lwz r3,16(r1)
+ lwz r0,12(r1)
+ # ...unwind the stack frame, and jump to the PLT entry we updated.
+ addi r1,r1,64
+ bctr
+ cfi_endproc
+ .size _dl_runtime_resolve,.-_dl_runtime_resolve
+
+#ifndef PROF
+ .align 2
+ .globl _dl_prof_resolve
+ .type _dl_prof_resolve,@function
+_dl_prof_resolve:
+ cfi_startproc
+ # We need to save the registers used to pass parameters, and register 0,
+ # which is used by _mcount; the registers are saved in a stack frame.
+ stwu r1,-320(r1)
+ cfi_adjust_cfa_offset (320)
+ /* Stack layout:
+
+ +312 stackframe
+ +308 lr
+ +304 r1
+ +288 v12
+ +272 v11
+ +256 v10
+ +240 v9
+ +224 v8
+ +208 v7
+ +192 v6
+ +176 v5
+ +160 v4
+ +144 v3
+ +128 v2
+ +112 v1
+ +104 fp8
+ +96 fp7
+ +88 fp6
+ +80 fp5
+ +72 fp4
+ +64 fp3
+ +56 fp2
+ +48 fp1
+ +44 r10
+ +40 r9
+ +36 r8
+ +32 r7
+ +28 r6
+ +24 r5
+ +20 r4
+ +16 r3
+ +12 r0
+ +8 cr
+ r1 link
+ */
+ stw r0,12(r1)
+ stw r3,16(r1)
+ stw r4,20(r1)
+ # The code that calls this has put parameters for `fixup' in r12 and r11.
+ mr r3,r12
+ stw r5,24(r1)
+ mr r4,r11
+ stw r6,28(r1)
+ mflr r5
+ # We also need to save some of the condition register fields.
+ stw r7,32(r1)
+ # Don't clobber the caller's LRSAVE, it is needed by _mcount.
+ stw r5,308(r1)
+ cfi_offset (lr, -12)
+ stw r8,36(r1)
+ mfcr r0
+ stw r9,40(r1)
+ stw r10,44(r1)
+ stw r0,8(r1)
+ # Save the floating point registers
+ stfd fp1,48(r1)
+ stfd fp2,56(r1)
+ stfd fp3,64(r1)
+ stfd fp4,72(r1)
+ stfd fp5,80(r1)
+ stfd fp6,88(r1)
+ stfd fp7,96(r1)
+ stfd fp8,104(r1)
+ # XXX TODO: store vmx registers
+ # Load the extra parameters.
+ addi r6,r1,16
+ addi r7,r1,312
+ li r0,-1
+ stw r0,0(r7)
+ bl _dl_profile_fixup@local
+ # 'fixup' returns the address we want to branch to.
+ mtctr r3
+ # Put the registers back...
+ lwz r0,308(r1)
+ lwz r10,44(r1)
+ lwz r9,40(r1)
+ mtlr r0
+ lwz r8,36(r1)
+ lwz r0,8(r1)
+ lwz r7,32(r1)
+ lwz r6,28(r1)
+ mtcrf 0xFF,r0
+ lwz r5,24(r1)
+ lwz r4,20(r1)
+ lwz r3,16(r1)
+ lwz r0,12(r1)
+ # Load the floating point registers.
+ lfd fp1,48(r1)
+ lfd fp2,56(r1)
+ lfd fp3,64(r1)
+ lfd fp4,72(r1)
+ lfd fp5,80(r1)
+ lfd fp6,88(r1)
+ lfd fp7,96(r1)
+ lfd fp8,104(r1)
+ # ...unwind the stack frame, and jump to the PLT entry we updated.
+ addi r1,r1,320
+ bctr
+ cfi_endproc
+ .size _dl_prof_resolve,.-_dl_prof_resolve
+#endif
diff --git a/libc/sysdeps/powerpc/powerpc32/elf/bzero.S b/libc/sysdeps/powerpc/powerpc32/elf/bzero.S
new file mode 100644
index 000000000..17c6f5611
--- /dev/null
+++ b/libc/sysdeps/powerpc/powerpc32/elf/bzero.S
@@ -0,0 +1,37 @@
+/* Optimized bzero `implementation' for PowerPC.
+ Copyright (C) 1997, 1999, 2000 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <sysdep.h>
+#include <bp-sym.h>
+
+ENTRY (BP_SYM (__bzero))
+
+#if __BOUNDED_POINTERS__
+ mr r6,r4
+ li r5,0
+ mr r4,r3
+ /* Tell memset that we don't want a return value. */
+ li r3,0
+#else
+ mr r5,r4
+ li r4,0
+#endif
+ b BP_SYM (memset)@local
+END (BP_SYM (__bzero))
+weak_alias (BP_SYM (__bzero), BP_SYM (bzero))
diff --git a/libc/sysdeps/powerpc/powerpc32/elf/configure b/libc/sysdeps/powerpc/powerpc32/elf/configure
new file mode 100755
index 000000000..536052e0e
--- /dev/null
+++ b/libc/sysdeps/powerpc/powerpc32/elf/configure
@@ -0,0 +1,52 @@
+# This file is generated from configure.in by Autoconf. DO NOT EDIT!
+ # Local configure fragment for sysdeps/powerpc32/elf.
+
+if test "$usetls" != no; then
+# Check for support of thread-local storage handling in assembler and
+# linker.
+echo "$as_me:$LINENO: checking for powerpc32 TLS support" >&5
+echo $ECHO_N "checking for powerpc32 TLS support... $ECHO_C" >&6
+if test "${libc_cv_powerpc32_tls+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ cat > conftest.s <<\EOF
+ .section ".tdata","awT",@progbits
+x: .long 1
+x1: .long 1
+x2: .long 1
+ .text
+ addi 3,31,x@got@tlsgd
+ addi 3,31,x1@got@tlsld
+ addi 9,3,x1@dtprel
+ addis 9,3,x2@dtprel@ha
+ addi 9,9,x2@dtprel@l
+ lwz 0,x1@dtprel(3)
+ addis 9,3,x2@dtprel@ha
+ lwz 0,x2@dtprel@l(9)
+ lwz 9,x3@got@tprel(31)
+ add 9,9,x@tls
+ addi 9,2,x1@tprel
+ addis 9,2,x2@tprel@ha
+ addi 9,9,x2@tprel@l
+EOF
+if { ac_try='${CC-cc} -c $CFLAGS conftest.s 1>&5'
+ { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+ (eval $ac_try) 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ libc_cv_powerpc32_tls=yes
+else
+ libc_cv_powerpc32_tls=no
+fi
+rm -f conftest*
+fi
+echo "$as_me:$LINENO: result: $libc_cv_powerpc32_tls" >&5
+echo "${ECHO_T}$libc_cv_powerpc32_tls" >&6
+if test $libc_cv_powerpc32_tls = yes; then
+ cat >>confdefs.h <<\_ACEOF
+#define HAVE_TLS_SUPPORT 1
+_ACEOF
+
+fi
+fi
diff --git a/libc/sysdeps/powerpc/powerpc32/elf/configure.in b/libc/sysdeps/powerpc/powerpc32/elf/configure.in
new file mode 100644
index 000000000..fbf41a498
--- /dev/null
+++ b/libc/sysdeps/powerpc/powerpc32/elf/configure.in
@@ -0,0 +1,38 @@
+GLIBC_PROVIDES dnl See aclocal.m4 in the top level source directory.
+# Local configure fragment for sysdeps/powerpc32/elf.
+
+if test "$usetls" != no; then
+# Check for support of thread-local storage handling in assembler and
+# linker.
+AC_CACHE_CHECK(for powerpc32 TLS support, libc_cv_powerpc32_tls, [dnl
+cat > conftest.s <<\EOF
+ .section ".tdata","awT",@progbits
+x: .long 1
+x1: .long 1
+x2: .long 1
+ .text
+ addi 3,31,x@got@tlsgd
+ addi 3,31,x1@got@tlsld
+ addi 9,3,x1@dtprel
+ addis 9,3,x2@dtprel@ha
+ addi 9,9,x2@dtprel@l
+ lwz 0,x1@dtprel(3)
+ addis 9,3,x2@dtprel@ha
+ lwz 0,x2@dtprel@l(9)
+ lwz 9,x3@got@tprel(31)
+ add 9,9,x@tls
+ addi 9,2,x1@tprel
+ addis 9,2,x2@tprel@ha
+ addi 9,9,x2@tprel@l
+EOF
+dnl
+if AC_TRY_COMMAND(${CC-cc} -c $CFLAGS conftest.s 1>&AS_MESSAGE_LOG_FD); then
+ libc_cv_powerpc32_tls=yes
+else
+ libc_cv_powerpc32_tls=no
+fi
+rm -f conftest*])
+if test $libc_cv_powerpc32_tls = yes; then
+ AC_DEFINE(HAVE_TLS_SUPPORT)
+fi
+fi
diff --git a/libc/sysdeps/powerpc/powerpc32/elf/start.S b/libc/sysdeps/powerpc/powerpc32/elf/start.S
new file mode 100644
index 000000000..bafd2ae00
--- /dev/null
+++ b/libc/sysdeps/powerpc/powerpc32/elf/start.S
@@ -0,0 +1,101 @@
+/* Startup code for programs linked with GNU libc.
+ Copyright (C) 1998,1999,2000,2001,2002,2003 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ In addition to the permissions in the GNU Lesser General Public
+ License, the Free Software Foundation gives you unlimited
+ permission to link the compiled version of this file with other
+ programs, and to distribute those programs without any restriction
+ coming from the use of this file. (The GNU Lesser General Public
+ License restrictions do apply in other respects; for example, they
+ cover modification of the file, and distribution when not linked
+ into another program.)
+
+ Note that people who make modified versions of this file are not
+ obligated to grant this special exception for their modified
+ versions; it is their choice whether to do so. The GNU Lesser
+ General Public License gives permission to release a modified
+ version without this exception; this exception also makes it
+ possible to release a modified version which carries forward this
+ exception.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <sysdep.h>
+#include "bp-sym.h"
+
+ /* These are the various addresses we require. */
+#ifdef PIC
+ .section ".data"
+#else
+ .section ".rodata"
+#endif
+ .align 2
+L(start_addresses):
+ .long _SDA_BASE_
+ .long BP_SYM (main)
+ .long __libc_csu_init
+ .long __libc_csu_fini
+ ASM_SIZE_DIRECTIVE(L(start_addresses))
+
+ .section ".text"
+#if defined PIC && !defined HAVE_ASM_PPC_REL16
+L(start_addressesp):
+ .long L(start_addresses)-L(branch)
+#endif
+ENTRY(_start)
+ /* Save the stack pointer, in case we're statically linked under Linux. */
+ mr r9,r1
+ /* Set up an initial stack frame, and clear the LR. */
+ clrrwi r1,r1,4
+#ifdef PIC
+ bcl 20,31,L(branch)
+L(branch):
+ li r0,0
+ mflr r13
+#else
+ li r0,0
+#endif
+ stwu r1,-16(r1)
+ mtlr r0
+ stw r0,0(r1)
+ /* Set r13 to point at the 'small data area', and put the address of
+ start_addresses in r8. Also load the GOT pointer so that new PLT
+ calls work, like the one to __libc_start_main. */
+#ifdef PIC
+# ifdef HAVE_ASM_PPC_REL16
+ addis r30,r13,_GLOBAL_OFFSET_TABLE_-L(branch)@ha
+ addis r8,r13,L(start_addresses)-L(branch)@ha
+ addi r30,r30,_GLOBAL_OFFSET_TABLE_-L(branch)@l
+ lwzu r13,L(start_addresses)-L(branch)@l(r8)
+# else
+ lwz r8,L(start_addressesp)-L(branch)(r13)
+ add r8,r13,r8
+ lwz r13,0(r8)
+# endif
+#else
+ lis r8,L(start_addresses)@ha
+ lwzu r13,L(start_addresses)@l(r8)
+#endif
+ /* and continue in libc-start, in glibc. */
+ b JUMPTARGET(BP_SYM (__libc_start_main))
+END(_start)
+
+/* Define a symbol for the first piece of initialized data. */
+ .section ".data"
+ .globl __data_start
+__data_start:
+weak_alias (__data_start, data_start)
diff --git a/libc/sysdeps/powerpc/powerpc32/fpu/Makefile b/libc/sysdeps/powerpc/powerpc32/fpu/Makefile
new file mode 100644
index 000000000..e05073970
--- /dev/null
+++ b/libc/sysdeps/powerpc/powerpc32/fpu/Makefile
@@ -0,0 +1,3 @@
+ifeq ($(subdir),misc)
+sysdep_routines += fprsave fprrest
+endif
diff --git a/libc/sysdeps/powerpc/powerpc32/fpu/__longjmp-common.S b/libc/sysdeps/powerpc/powerpc32/fpu/__longjmp-common.S
new file mode 100644
index 000000000..aa24b059d
--- /dev/null
+++ b/libc/sysdeps/powerpc/powerpc32/fpu/__longjmp-common.S
@@ -0,0 +1,164 @@
+/* longjmp for PowerPC.
+ Copyright (C) 1995-99, 2000, 2003-2005, 2006 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 1 Franklin Street, Fifth Floor, Boston MA
+ 02110-1301 USA. */
+
+#include <sysdep.h>
+#define _ASM
+#ifdef __NO_VMX__
+# include <novmxsetjmp.h>
+#else
+# include <jmpbuf-offsets.h>
+#endif
+#include <bp-sym.h>
+#include <bp-asm.h>
+
+ .machine "altivec"
+ENTRY (BP_SYM (__longjmp))
+ CHECK_BOUNDS_BOTH_WIDE_LIT (r3, r8, r9, JB_SIZE)
+#ifndef __NO_VMX__
+# ifdef PIC
+ mflr r6
+ cfi_register (lr,r6)
+# ifdef HAVE_ASM_PPC_REL16
+ bcl 20,31,1f
+1: mflr r5
+ addis r5,r5,_GLOBAL_OFFSET_TABLE_-1b@ha
+ addi r5,r5,_GLOBAL_OFFSET_TABLE_-1b@l
+# else
+ bl _GLOBAL_OFFSET_TABLE_@local-4
+ mflr r5
+# endif
+# ifdef SHARED
+ lwz r5,_rtld_global_ro@got(r5)
+ mtlr r6
+ cfi_same_value (lr)
+ lwz r5,RTLD_GLOBAL_RO_DL_HWCAP_OFFSET(r5)
+# else
+ lwz r5,_dl_hwcap@got(r5)
+ mtlr r6
+ cfi_same_value (lr)
+ lwz r5,0(r5)
+# endif
+# else
+ lis r5,_dl_hwcap@ha
+ lwz r5,_dl_hwcap@l(r5)
+# endif
+ andis. r5,r5,(PPC_FEATURE_HAS_ALTIVEC >> 16)
+ beq L(no_vmx)
+ la r5,((JB_VRS)*4)(3)
+ andi. r6,r5,0xf
+ lwz r0,((JB_VRSAVE)*4)(3)
+ mtspr VRSAVE,r0
+ beq+ aligned_restore_vmx
+ addi r6,r5,16
+ lvsl v0,0,r5
+ lvx v1,0,r5
+ addi r5,r5,32
+ lvx v21,0,r6
+ vperm v20,v1,v21,v0
+# define load_misaligned_vmx_lo_loaded(loadvr,lovr,shiftvr,loadgpr,addgpr) \
+ addi addgpr,addgpr,32; \
+ lvx lovr,0,loadgpr; \
+ vperm loadvr,loadvr,lovr,shiftvr;
+ load_misaligned_vmx_lo_loaded(v21,v22,v0,r5,r6)
+ load_misaligned_vmx_lo_loaded(v22,v23,v0,r6,r5)
+ load_misaligned_vmx_lo_loaded(v23,v24,v0,r5,r6)
+ load_misaligned_vmx_lo_loaded(v24,v25,v0,r6,r5)
+ load_misaligned_vmx_lo_loaded(v25,v26,v0,r5,r6)
+ load_misaligned_vmx_lo_loaded(v26,v27,v0,r6,r5)
+ load_misaligned_vmx_lo_loaded(v27,v28,v0,r5,r6)
+ load_misaligned_vmx_lo_loaded(v28,v29,v0,r6,r5)
+ load_misaligned_vmx_lo_loaded(v29,v30,v0,r5,r6)
+ load_misaligned_vmx_lo_loaded(v30,v31,v0,r6,r5)
+ lvx v1,0,r5
+ vperm v31,v31,v1,v0
+ b L(no_vmx)
+aligned_restore_vmx:
+ addi r6,r5,16
+ lvx v20,0,r5
+ addi r5,r5,32
+ lvx v21,0,r6
+ addi r6,r6,32
+ lvx v22,0,r5
+ addi r5,r5,32
+ lvx v23,0,r6
+ addi r6,r6,32
+ lvx v24,0,r5
+ addi r5,r5,32
+ lvx v25,0,r6
+ addi r6,r6,32
+ lvx v26,0,r5
+ addi r5,r5,32
+ lvx v27,0,r6
+ addi r6,r6,32
+ lvx v28,0,r5
+ addi r5,r5,32
+ lvx v29,0,r6
+ addi r6,r6,32
+ lvx v30,0,r5
+ lvx v31,0,r6
+L(no_vmx):
+#endif
+ lwz r1,(JB_GPR1*4)(r3)
+ lwz r0,(JB_LR*4)(r3)
+ lwz r14,((JB_GPRS+0)*4)(r3)
+ lfd fp14,((JB_FPRS+0*2)*4)(r3)
+ lwz r15,((JB_GPRS+1)*4)(r3)
+ lfd fp15,((JB_FPRS+1*2)*4)(r3)
+ lwz r16,((JB_GPRS+2)*4)(r3)
+ lfd fp16,((JB_FPRS+2*2)*4)(r3)
+ lwz r17,((JB_GPRS+3)*4)(r3)
+ lfd fp17,((JB_FPRS+3*2)*4)(r3)
+ lwz r18,((JB_GPRS+4)*4)(r3)
+ lfd fp18,((JB_FPRS+4*2)*4)(r3)
+ lwz r19,((JB_GPRS+5)*4)(r3)
+ lfd fp19,((JB_FPRS+5*2)*4)(r3)
+ lwz r20,((JB_GPRS+6)*4)(r3)
+ lfd fp20,((JB_FPRS+6*2)*4)(r3)
+#ifdef PTR_DEMANGLE
+ PTR_DEMANGLE (r0, r25)
+ PTR_DEMANGLE2 (r1, r25)
+#endif
+ mtlr r0
+ lwz r21,((JB_GPRS+7)*4)(r3)
+ lfd fp21,((JB_FPRS+7*2)*4)(r3)
+ lwz r22,((JB_GPRS+8)*4)(r3)
+ lfd fp22,((JB_FPRS+8*2)*4)(r3)
+ lwz r0,(JB_CR*4)(r3)
+ lwz r23,((JB_GPRS+9)*4)(r3)
+ lfd fp23,((JB_FPRS+9*2)*4)(r3)
+ lwz r24,((JB_GPRS+10)*4)(r3)
+ lfd fp24,((JB_FPRS+10*2)*4)(r3)
+ lwz r25,((JB_GPRS+11)*4)(r3)
+ lfd fp25,((JB_FPRS+11*2)*4)(r3)
+ mtcrf 0xFF,r0
+ lwz r26,((JB_GPRS+12)*4)(r3)
+ lfd fp26,((JB_FPRS+12*2)*4)(r3)
+ lwz r27,((JB_GPRS+13)*4)(r3)
+ lfd fp27,((JB_FPRS+13*2)*4)(r3)
+ lwz r28,((JB_GPRS+14)*4)(r3)
+ lfd fp28,((JB_FPRS+14*2)*4)(r3)
+ lwz r29,((JB_GPRS+15)*4)(r3)
+ lfd fp29,((JB_FPRS+15*2)*4)(r3)
+ lwz r30,((JB_GPRS+16)*4)(r3)
+ lfd fp30,((JB_FPRS+16*2)*4)(r3)
+ lwz r31,((JB_GPRS+17)*4)(r3)
+ lfd fp31,((JB_FPRS+17*2)*4)(r3)
+ mr r3,r4
+ blr
+END (BP_SYM (__longjmp))
diff --git a/libc/sysdeps/powerpc/powerpc32/fpu/__longjmp.S b/libc/sysdeps/powerpc/powerpc32/fpu/__longjmp.S
new file mode 100644
index 000000000..161bf213c
--- /dev/null
+++ b/libc/sysdeps/powerpc/powerpc32/fpu/__longjmp.S
@@ -0,0 +1,42 @@
+/* AltiVec/VMX (new) version of __longjmp for PowerPC.
+ Copyright (C) 1995,1996,1997,1999,2000,2004 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <libc-symbols.h>
+#include <rtld-global-offsets.h>
+#include <shlib-compat.h>
+
+#if defined NOT_IN_libc
+/* Build a non-versioned object for rtld-*. */
+# include "__longjmp-common.S"
+
+#else /* !NOT_IN_libc */
+/* Build a versioned object for libc. */
+default_symbol_version (__vmx__longjmp,__longjmp,GLIBC_2.3.4);
+# define __longjmp __vmx__longjmp
+# include "__longjmp-common.S"
+
+# if defined SHARED && SHLIB_COMPAT (libc, GLIBC_2_0, GLIBC_2_3_4)
+# define __NO_VMX__
+# undef JB_SIZE
+symbol_version (__novmx__longjmp,__longjmp,GLIBC_2.0);
+# undef __longjmp
+# define __longjmp __novmx__longjmp
+# include "__longjmp-common.S"
+# endif
+#endif /* !NOT_IN_libc */
diff --git a/libc/sysdeps/powerpc/powerpc32/fpu/fprrest.S b/libc/sysdeps/powerpc/powerpc32/fpu/fprrest.S
new file mode 100644
index 000000000..2f6c6deb2
--- /dev/null
+++ b/libc/sysdeps/powerpc/powerpc32/fpu/fprrest.S
@@ -0,0 +1,95 @@
+/* Copyright (C) 2000, 2001, 2006 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA
+ 02110-1301 USA. */
+
+/*
+ Floating Point Registers (FPRs) restore routine
+*/
+
+#include <sysdep.h>
+
+ENTRY(_restfpr_all)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_restf14)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_restfpr_14)
+C_TEXT(_restf14):
+C_TEXT(_restfpr_14): lfd fp14,-144(r1)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_restf15)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_restfpr_15)
+C_TEXT(_restf15):
+C_TEXT(_restfpr_15): lfd fp15,-136(r1)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_restf16)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_restfpr_16)
+C_TEXT(_restf16):
+C_TEXT(_restfpr_16): lfd fp16,-128(r1)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_restf17)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_restfpr_17)
+C_TEXT(_restf17):
+C_TEXT(_restfpr_17): lfd fp17,-120(r1)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_restf18)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_restfpr_18)
+C_TEXT(_restf18):
+C_TEXT(_restfpr_18): lfd fp18,-112(r1)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_restf19)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_restfpr_19)
+C_TEXT(_restf19):
+C_TEXT(_restfpr_19): lfd fp19,-104(r1)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_restf20)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_restfpr_20)
+C_TEXT(_restf20):
+C_TEXT(_restfpr_20): lfd fp20,-96(r1)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_restf21)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_restfpr_21)
+C_TEXT(_restf21):
+C_TEXT(_restfpr_21): lfd fp21,-88(r1)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_restf22)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_restfpr_22)
+C_TEXT(_restf22):
+C_TEXT(_restfpr_22): lfd fp22,-80(r1)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_restf23)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_restfpr_23)
+C_TEXT(_restf23):
+C_TEXT(_restfpr_23): lfd fp23,-72(r1)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_restf24)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_restfpr_24)
+C_TEXT(_restf24):
+C_TEXT(_restfpr_24): lfd fp24,-64(r1)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_restf25)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_restfpr_25)
+C_TEXT(_restf25):
+C_TEXT(_restfpr_25): lfd fp25,-56(r1)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_restf26)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_restfpr_26)
+C_TEXT(_restf26):
+C_TEXT(_restfpr_26): lfd fp26,-48(r1)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_restf27)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_restfpr_27)
+C_TEXT(_restf27):
+C_TEXT(_restfpr_27): lfd fp27,-40(r1)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_restf28)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_restfpr_28)
+C_TEXT(_restf28):
+C_TEXT(_restfpr_28): lfd fp28,-32(r1)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_restf29)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_restfpr_29)
+C_TEXT(_restf29):
+C_TEXT(_restfpr_29): lwz r0,8(r1) #get return address from frame
+ lfd fp29,-24(r1) #restore f29
+ mtlr r0 #move return address to LR
+ lfd fp30,-16(r1) #restore f30
+ lfd fp31,-8(r1) #restore f31
+ blr #return
+END (_restfpr_all)
diff --git a/libc/sysdeps/powerpc/powerpc32/fpu/fprsave.S b/libc/sysdeps/powerpc/powerpc32/fpu/fprsave.S
new file mode 100644
index 000000000..c05178775
--- /dev/null
+++ b/libc/sysdeps/powerpc/powerpc32/fpu/fprsave.S
@@ -0,0 +1,112 @@
+/* Copyright (C) 2000, 2001, 2006 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA
+ 02110-1301 USA. */
+
+/*
+ Floating Point Registers (FPRs) save routine
+*/
+
+#include <sysdep.h>
+
+ENTRY(_savefpr_all)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_savef14)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_savefpr_14)
+C_TEXT(_savef14):
+C_TEXT(_savefpr_14): stfd fp14,-144(r1)
+ cfi_offset(fp14,-144)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_savef15)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_savefpr_15)
+C_TEXT(_savef15):
+C_TEXT(_savefpr_15): stfd fp15,-136(r1)
+ cfi_offset(fp15,-136)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_savef16)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_savefpr_16)
+C_TEXT(_savef16):
+C_TEXT(_savefpr_16): stfd fp16,-128(r1)
+ cfi_offset(fp16,-128)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_savef17)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_savefpr_17)
+C_TEXT(_savef17):
+C_TEXT(_savefpr_17): stfd fp17,-120(r1)
+ cfi_offset(fp17,-120)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_savef18)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_savefpr_18)
+C_TEXT(_savef18):
+C_TEXT(_savefpr_18): stfd fp18,-112(r1)
+ cfi_offset(fp18,-112)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_savef19)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_savefpr_19)
+C_TEXT(_savef19):
+C_TEXT(_savefpr_19): stfd fp19,-104(r1)
+ cfi_offset(fp19,-104)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_savef20)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_savefpr_20)
+C_TEXT(_savef20):
+C_TEXT(_savefpr_20): stfd fp20,-96(r1)
+ cfi_offset(fp20,-96)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_savef21)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_savefpr_21)
+C_TEXT(_savef21):
+C_TEXT(_savefpr_21): stfd fp21,-88(r1)
+ cfi_offset(fp21,-88)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_savef22)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_savefpr_22)
+C_TEXT(_savef22):
+C_TEXT(_savefpr_22): stfd fp22,-80(r1)
+ cfi_offset(fp22,-80)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_savef23)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_savefpr_23)
+C_TEXT(_savef23):
+C_TEXT(_savefpr_23): stfd fp23,-72(r1)
+ cfi_offset(fp23,-72)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_savef24)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_savefpr_24)
+C_TEXT(_savef24):
+C_TEXT(_savefpr_24): stfd fp24,-64(r1)
+ cfi_offset(fp24,-64)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_savef25)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_savefpr_25)
+C_TEXT(_savef25):
+C_TEXT(_savefpr_25): stfd fp25,-56(r1)
+ cfi_offset(fp25,-56)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_savef26)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_savefpr_26)
+C_TEXT(_savef26):
+C_TEXT(_savefpr_26): stfd fp26,-48(r1)
+ cfi_offset(fp26,-48)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_savef27)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_savefpr_27)
+C_TEXT(_savef27):
+C_TEXT(_savefpr_27): stfd fp27,-40(r1)
+ cfi_offset(fp27,-40)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_savef28)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_savefpr_28)
+C_TEXT(_savef28):
+C_TEXT(_savefpr_28): stfd fp28,-32(r1)
+ cfi_offset(fp28,-32)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_savef29)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_savefpr_29)
+C_TEXT(_savef29):
+C_TEXT(_savefpr_29): stfd fp29,-24(r1) #save f29
+ stfd fp30,-16(r1) #save f30
+ stfd fp31,-8(r1) #save f31
+ cfi_offset(fp29,-24)
+ cfi_offset(fp30,-16)
+ cfi_offset(fp31,-8)
+ stw r0,8(r1) #save LR in callers frame
+ blr #return
+END (_savefpr_all)
diff --git a/libc/sysdeps/powerpc/powerpc32/fpu/s_ceil.S b/libc/sysdeps/powerpc/powerpc32/fpu/s_ceil.S
new file mode 100644
index 000000000..bc74d302f
--- /dev/null
+++ b/libc/sysdeps/powerpc/powerpc32/fpu/s_ceil.S
@@ -0,0 +1,83 @@
+/* ceil function. PowerPC32 version.
+ Copyright (C) 2004, 2006 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 1 Franklin Street, Fifth Floor, Boston MA
+ 02110-1301 USA. */
+
+#include <sysdep.h>
+#include <math_ldbl_opt.h>
+
+ .section .rodata.cst4,"aM",@progbits,4
+ .align 2
+.LC0: /* 2**52 */
+ .long 0x59800000
+
+ .section ".text"
+ENTRY (__ceil)
+ mffs fp11 /* Save current FPU rounding mode. */
+#ifdef SHARED
+ mflr r11
+ cfi_register(lr,r11)
+# ifdef HAVE_ASM_PPC_REL16
+ bcl 20,31,1f
+1: mflr r9
+ addis r9,r9,.LC0-1b@ha
+ lfs fp13,.LC0-1b@l(r9)
+# else
+ bl _GLOBAL_OFFSET_TABLE_@local-4
+ mflr r10
+ lwz r9,.LC0@got(10)
+ lfs fp13,0(r9)
+# endif
+ mtlr r11
+ cfi_same_value (lr)
+#else
+ lis r9,.LC0@ha
+ lfs fp13,.LC0@l(r9)
+#endif
+ fabs fp0,fp1
+ fsub fp12,fp13,fp13 /* generate 0.0 */
+ fcmpu cr7,fp0,fp13 /* if (fabs(x) > TWO52) */
+ fcmpu cr6,fp1,fp12 /* if (x > 0.0) */
+ bnllr- cr7
+ mtfsfi 7,2 /* Set rounding mode toward +inf. */
+ ble- cr6,.L4
+ fadd fp1,fp1,fp13 /* x+= TWO52; */
+ fsub fp1,fp1,fp13 /* x-= TWO52; */
+ fabs fp1,fp1 /* if (x == 0.0) */
+ /* x = 0.0; */
+ mtfsf 0x01,fp11 /* restore previous rounding mode. */
+ blr
+.L4:
+ bge- cr6,.L9 /* if (x < 0.0) */
+ fsub fp1,fp1,fp13 /* x-= TWO52; */
+ fadd fp1,fp1,fp13 /* x+= TWO52; */
+ fnabs fp1,fp1 /* if (x == 0.0) */
+ /* x = -0.0; */
+.L9:
+ mtfsf 0x01,fp11 /* restore previous rounding mode. */
+ blr
+ END (__ceil)
+
+weak_alias (__ceil, ceil)
+
+#ifdef NO_LONG_DOUBLE
+weak_alias (__ceil, ceill)
+strong_alias (__ceil, __ceill)
+#endif
+#if LONG_DOUBLE_COMPAT(libm, GLIBC_2_0)
+compat_symbol (libm, __ceil, ceill, GLIBC_2_0)
+#endif
diff --git a/libc/sysdeps/powerpc/powerpc32/fpu/s_ceilf.S b/libc/sysdeps/powerpc/powerpc32/fpu/s_ceilf.S
new file mode 100644
index 000000000..47a75ec0c
--- /dev/null
+++ b/libc/sysdeps/powerpc/powerpc32/fpu/s_ceilf.S
@@ -0,0 +1,75 @@
+/* float ceil function. PowerPC32 version.
+ Copyright (C) 2004, 2006 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 1 Franklin Street, Fifth Floor, Boston MA
+ 02110-1301 USA. */
+
+#include <sysdep.h>
+
+ .section .rodata.cst4,"aM",@progbits,4
+ .align 2
+.LC0: /* 2**23 */
+ .long 0x4b000000
+
+ .section ".text"
+ENTRY (__ceilf)
+ mffs fp11 /* Save current FPU rounding mode. */
+#ifdef SHARED
+ mflr r11
+ cfi_register(lr,r11)
+# ifdef HAVE_ASM_PPC_REL16
+ bcl 20,31,1f
+1: mflr r9
+ addis r9,r9,.LC0-1b@ha
+ lfs fp13,.LC0-1b@l(r9)
+# else
+ bl _GLOBAL_OFFSET_TABLE_@local-4
+ mflr r10
+ lwz r9,.LC0@got(10)
+ lfs fp13,0(r9)
+# endif
+ mtlr r11
+ cfi_same_value (lr)
+#else
+ lis r9,.LC0@ha
+ lfs fp13,.LC0@l(r9)
+#endif
+ fabs fp0,fp1
+ fsubs fp12,fp13,fp13 /* generate 0.0 */
+ fcmpu cr7,fp0,fp13 /* if (fabs(x) > TWO23) */
+ fcmpu cr6,fp1,fp12 /* if (x > 0.0) */
+ bnllr- cr7
+ mtfsfi 7,2 /* Set rounding mode toward +inf. */
+ ble- cr6,.L4
+ fadds fp1,fp1,fp13 /* x+= TWO23; */
+ fsubs fp1,fp1,fp13 /* x-= TWO23; */
+ fabs fp1,fp1 /* if (x == 0.0) */
+ /* x = 0.0; */
+ mtfsf 0x01,fp11 /* restore previous rounding mode. */
+ blr
+.L4:
+ bge- cr6,.L9 /* if (x < 0.0) */
+ fsubs fp1,fp1,fp13 /* x-= TWO23; */
+ fadds fp1,fp1,fp13 /* x+= TWO23; */
+ fnabs fp1,fp1 /* if (x == 0.0) */
+ /* x = -0.0; */
+.L9:
+ mtfsf 0x01,fp11 /* restore previous rounding mode. */
+ blr
+ END (__ceilf)
+
+weak_alias (__ceilf, ceilf)
+
diff --git a/libc/sysdeps/powerpc/powerpc32/fpu/s_copysign.S b/libc/sysdeps/powerpc/powerpc32/fpu/s_copysign.S
new file mode 100644
index 000000000..dd68b0869
--- /dev/null
+++ b/libc/sysdeps/powerpc/powerpc32/fpu/s_copysign.S
@@ -0,0 +1,60 @@
+/* Copy a sign bit between floating-point values.
+ Copyright (C) 1997, 1999, 2000, 2006 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 1 Franklin Street, Fifth Floor, Boston MA
+ 02110-1301 USA. */
+
+/* This has been coded in assembler because GCC makes such a mess of it
+ when it's coded in C. */
+
+#include <sysdep.h>
+#include <math_ldbl_opt.h>
+
+ENTRY(__copysign)
+/* double [f1] copysign (double [f1] x, double [f2] y);
+ copysign(x,y) returns a value with the magnitude of x and
+ with the sign bit of y. */
+ stwu r1,-16(r1)
+ cfi_adjust_cfa_offset (16)
+ stfd fp2,8(r1)
+ lwz r3,8(r1)
+ cmpwi r3,0
+ addi r1,r1,16
+ cfi_adjust_cfa_offset (-16)
+ blt L(0)
+ fabs fp1,fp1
+ blr
+L(0): fnabs fp1,fp1
+ blr
+ END (__copysign)
+
+weak_alias (__copysign,copysign)
+
+/* It turns out that it's safe to use this code even for single-precision. */
+weak_alias (__copysign,copysignf)
+strong_alias(__copysign,__copysignf)
+
+#ifdef NO_LONG_DOUBLE
+weak_alias (__copysign,copysignl)
+strong_alias(__copysign,__copysignl)
+#endif
+#ifdef IS_IN_libm
+# if LONG_DOUBLE_COMPAT(libm, GLIBC_2_0)
+compat_symbol (libm, __copysign, copysignl, GLIBC_2_0)
+# endif
+#elif LONG_DOUBLE_COMPAT(libc, GLIBC_2_0)
+compat_symbol (libc, __copysign, copysignl, GLIBC_2_0)
+#endif
diff --git a/libc/sysdeps/powerpc/powerpc32/fpu/s_copysignf.S b/libc/sysdeps/powerpc/powerpc32/fpu/s_copysignf.S
new file mode 100644
index 000000000..e05438ae7
--- /dev/null
+++ b/libc/sysdeps/powerpc/powerpc32/fpu/s_copysignf.S
@@ -0,0 +1 @@
+/* __copysignf is in s_copysign.S */
diff --git a/libc/sysdeps/powerpc/powerpc32/fpu/s_copysignl.S b/libc/sysdeps/powerpc/powerpc32/fpu/s_copysignl.S
new file mode 100644
index 000000000..64b6a4543
--- /dev/null
+++ b/libc/sysdeps/powerpc/powerpc32/fpu/s_copysignl.S
@@ -0,0 +1,50 @@
+/* Copy a sign bit between floating-point values.
+ IBM extended format long double version.
+ Copyright (C) 2006 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <sysdep.h>
+#include <math_ldbl_opt.h>
+
+ENTRY(__copysignl)
+/* long double [f1,f2] copysign (long double [f1,f2] x, long double [f3,f4] y);
+ copysign(x,y) returns a value with the magnitude of x and
+ with the sign bit of y. */
+ stwu r1,-16(r1)
+ cfi_adjust_cfa_offset (16)
+ stfd fp3,8(r1)
+ fmr fp0,fp1
+ fabs fp1,fp1
+ fcmpu cr7,fp0,fp1
+ lwz r3,8(r1)
+ cmpwi cr6,r3,0
+ addi r1,r1,16
+ cfi_adjust_cfa_offset (-16)
+ beq cr7,L(0)
+ fneg fp2,fp2
+L(0): bgelr cr6
+ fneg fp1,fp1
+ fneg fp2,fp2
+ blr
+END (__copysignl)
+
+#ifdef IS_IN_libm
+long_double_symbol (libm, __copysignl, copysignl)
+#else
+long_double_symbol (libc, __copysignl, copysignl)
+#endif
diff --git a/libc/sysdeps/powerpc/powerpc32/fpu/s_fabs.S b/libc/sysdeps/powerpc/powerpc32/fpu/s_fabs.S
new file mode 100644
index 000000000..53d21301e
--- /dev/null
+++ b/libc/sysdeps/powerpc/powerpc32/fpu/s_fabs.S
@@ -0,0 +1,5 @@
+#include <math_ldbl_opt.h>
+#include <sysdeps/powerpc/fpu/s_fabs.S>
+#if LONG_DOUBLE_COMPAT(libm, GLIBC_2_0)
+compat_symbol (libm, __fabs, fabsl, GLIBC_2_0)
+#endif
diff --git a/libc/sysdeps/powerpc/powerpc32/fpu/s_fabsl.S b/libc/sysdeps/powerpc/powerpc32/fpu/s_fabsl.S
new file mode 100644
index 000000000..3655e5b2f
--- /dev/null
+++ b/libc/sysdeps/powerpc/powerpc32/fpu/s_fabsl.S
@@ -0,0 +1,36 @@
+/* Copy a sign bit between floating-point values.
+ IBM extended format long double version.
+ Copyright (C) 2004, 2006 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <sysdep.h>
+#include <math_ldbl_opt.h>
+
+ENTRY(__fabsl)
+/* long double [f1,f2] fabs (long double [f1,f2] x);
+ fabs(x,y) returns a value with the magnitude of x and
+ with the sign bit of y. */
+ fmr fp0,fp1
+ fabs fp1,fp1
+ fcmpu cr1,fp0,fp1
+ beqlr cr1
+ fneg fp2,fp2
+ blr
+END (__fabsl)
+
+long_double_symbol (libm, __fabsl, fabsl)
diff --git a/libc/sysdeps/powerpc/powerpc32/fpu/s_fdim.c b/libc/sysdeps/powerpc/powerpc32/fpu/s_fdim.c
new file mode 100644
index 000000000..e34b51ee5
--- /dev/null
+++ b/libc/sysdeps/powerpc/powerpc32/fpu/s_fdim.c
@@ -0,0 +1,5 @@
+#include <math_ldbl_opt.h>
+#include <sysdeps/powerpc/fpu/s_fdim.c>
+#if LONG_DOUBLE_COMPAT(libm, GLIBC_2_1)
+compat_symbol (libm, __fdim, fdiml, GLIBC_2_1);
+#endif
diff --git a/libc/sysdeps/powerpc/powerpc32/fpu/s_floor.S b/libc/sysdeps/powerpc/powerpc32/fpu/s_floor.S
new file mode 100644
index 000000000..a29e4791e
--- /dev/null
+++ b/libc/sysdeps/powerpc/powerpc32/fpu/s_floor.S
@@ -0,0 +1,83 @@
+/* Floor function. PowerPC32 version.
+ Copyright (C) 2004, 2006 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 1 Franklin Street, Fifth Floor, Boston MA
+ 02110-1301 USA. */
+
+#include <sysdep.h>
+#include <math_ldbl_opt.h>
+
+ .section .rodata.cst4,"aM",@progbits,4
+ .align 2
+.LC0: /* 2**52 */
+ .long 0x59800000
+
+ .section ".text"
+ENTRY (__floor)
+ mffs fp11 /* Save current FPU rounding mode. */
+#ifdef SHARED
+ mflr r11
+ cfi_register(lr,r11)
+# ifdef HAVE_ASM_PPC_REL16
+ bcl 20,31,1f
+1: mflr r9
+ addis r9,r9,.LC0-1b@ha
+ lfs fp13,.LC0-1b@l(r9)
+# else
+ bl _GLOBAL_OFFSET_TABLE_@local-4
+ mflr r10
+ lwz r9,.LC0@got(10)
+ lfs fp13,0(r9)
+# endif
+ mtlr r11
+ cfi_same_value (lr)
+#else
+ lis r9,.LC0@ha
+ lfs fp13,.LC0@l(r9)
+#endif
+ fabs fp0,fp1
+ fsub fp12,fp13,fp13 /* generate 0.0 */
+ fcmpu cr7,fp0,fp13 /* if (fabs(x) > TWO52) */
+ fcmpu cr6,fp1,fp12 /* if (x > 0.0) */
+ bnllr- cr7
+ mtfsfi 7,3 /* Set rounding mode toward -inf. */
+ ble- cr6,.L4
+ fadd fp1,fp1,fp13 /* x+= TWO52; */
+ fsub fp1,fp1,fp13 /* x-= TWO52; */
+ fabs fp1,fp1 /* if (x == 0.0) */
+ /* x = 0.0; */
+ mtfsf 0x01,fp11 /* restore previous rounding mode. */
+ blr
+.L4:
+ bge- cr6,.L9 /* if (x < 0.0) */
+ fsub fp1,fp1,fp13 /* x-= TWO52; */
+ fadd fp1,fp1,fp13 /* x+= TWO52; */
+ fnabs fp1,fp1 /* if (x == 0.0) */
+ /* x = -0.0; */
+.L9:
+ mtfsf 0x01,fp11 /* restore previous rounding mode. */
+ blr
+ END (__floor)
+
+weak_alias (__floor, floor)
+
+#ifdef NO_LONG_DOUBLE
+weak_alias (__floor, floorl)
+strong_alias (__floor, __floorl)
+#endif
+#if LONG_DOUBLE_COMPAT(libm, GLIBC_2_0)
+compat_symbol (libm, __floor, floorl, GLIBC_2_0)
+#endif
diff --git a/libc/sysdeps/powerpc/powerpc32/fpu/s_floorf.S b/libc/sysdeps/powerpc/powerpc32/fpu/s_floorf.S
new file mode 100644
index 000000000..99fbdc5f8
--- /dev/null
+++ b/libc/sysdeps/powerpc/powerpc32/fpu/s_floorf.S
@@ -0,0 +1,75 @@
+/* float Floor function. PowerPC32 version.
+ Copyright (C) 2004, 2006 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA
+ 02110-1301 USA. */
+
+#include <sysdep.h>
+
+ .section .rodata.cst4,"aM",@progbits,4
+ .align 2
+.LC0: /* 2**23 */
+ .long 0x4b000000
+
+ .section ".text"
+ENTRY (__floorf)
+ mffs fp11 /* Save current FPU rounding mode. */
+#ifdef SHARED
+ mflr r11
+ cfi_register(lr,r11)
+# ifdef HAVE_ASM_PPC_REL16
+ bcl 20,31,1f
+1: mflr r9
+ addis r9,r9,.LC0-1b@ha
+ lfs fp13,.LC0-1b@l(r9)
+# else
+ bl _GLOBAL_OFFSET_TABLE_@local-4
+ mflr r10
+ lwz r9,.LC0@got(10)
+ lfs fp13,0(r9)
+# endif
+ mtlr r11
+ cfi_same_value (lr)
+#else
+ lis r9,.LC0@ha
+ lfs fp13,.LC0@l(r9)
+#endif
+ fabs fp0,fp1
+ fsubs fp12,fp13,fp13 /* generate 0.0 */
+ fcmpu cr7,fp0,fp13 /* if (fabs(x) > TWO23) */
+ fcmpu cr6,fp1,fp12 /* if (x > 0.0) */
+ bnllr- cr7
+ mtfsfi 7,3 /* Set rounding mode toward -inf. */
+ ble- cr6,.L4
+ fadds fp1,fp1,fp13 /* x+= TWO23; */
+ fsubs fp1,fp1,fp13 /* x-= TWO23; */
+ fabs fp1,fp1 /* if (x == 0.0) */
+ /* x = 0.0; */
+ mtfsf 0x01,fp11 /* restore previous rounding mode. */
+ blr
+.L4:
+ bge- cr6,.L9 /* if (x < 0.0) */
+ fsubs fp1,fp1,fp13 /* x-= TWO23; */
+ fadds fp1,fp1,fp13 /* x+= TWO23; */
+ fnabs fp1,fp1 /* if (x == 0.0) */
+ /* x = -0.0; */
+.L9:
+ mtfsf 0x01,fp11 /* restore previous rounding mode. */
+ blr
+ END (__floorf)
+
+weak_alias (__floorf, floorf)
+
diff --git a/libc/sysdeps/powerpc/powerpc32/fpu/s_fmax.S b/libc/sysdeps/powerpc/powerpc32/fpu/s_fmax.S
new file mode 100644
index 000000000..69735761a
--- /dev/null
+++ b/libc/sysdeps/powerpc/powerpc32/fpu/s_fmax.S
@@ -0,0 +1,5 @@
+#include <math_ldbl_opt.h>
+#include <sysdeps/powerpc/fpu/s_fmax.S>
+#if LONG_DOUBLE_COMPAT(libm, GLIBC_2_1)
+compat_symbol (libm, __fmax, fmaxl, GLIBC_2_1)
+#endif
diff --git a/libc/sysdeps/powerpc/powerpc32/fpu/s_fmin.S b/libc/sysdeps/powerpc/powerpc32/fpu/s_fmin.S
new file mode 100644
index 000000000..6d4a0a946
--- /dev/null
+++ b/libc/sysdeps/powerpc/powerpc32/fpu/s_fmin.S
@@ -0,0 +1,5 @@
+#include <math_ldbl_opt.h>
+#include <sysdeps/powerpc/fpu/s_fmin.S>
+#if LONG_DOUBLE_COMPAT(libm, GLIBC_2_1)
+compat_symbol (libm, __fmin, fminl, GLIBC_2_1)
+#endif
diff --git a/libc/sysdeps/powerpc/powerpc32/fpu/s_isnan.c b/libc/sysdeps/powerpc/powerpc32/fpu/s_isnan.c
new file mode 100644
index 000000000..397717ba9
--- /dev/null
+++ b/libc/sysdeps/powerpc/powerpc32/fpu/s_isnan.c
@@ -0,0 +1,7 @@
+#include <sysdeps/powerpc/fpu/s_isnan.c>
+#ifndef IS_IN_libm
+# if LONG_DOUBLE_COMPAT(libc, GLIBC_2_0)
+compat_symbol (libc, __isnan, __isnanl, GLIBC_2_0);
+compat_symbol (libc, isnan, isnanl, GLIBC_2_0);
+# endif
+#endif
diff --git a/libc/sysdeps/powerpc/powerpc32/fpu/s_llrint.c b/libc/sysdeps/powerpc/powerpc32/fpu/s_llrint.c
new file mode 100644
index 000000000..cb96be7c9
--- /dev/null
+++ b/libc/sysdeps/powerpc/powerpc32/fpu/s_llrint.c
@@ -0,0 +1,35 @@
+/* Round a double value to a long long in the current rounding mode.
+ Copyright (C) 1997, 2006 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <math.h>
+#include <math_ldbl_opt.h>
+
+long long int
+__llrint (double x)
+{
+ return (long long int) __rint (x);
+}
+weak_alias (__llrint, llrint)
+#ifdef NO_LONG_DOUBLE
+strong_alias (__llrint, __llrintl)
+weak_alias (__llrint, llrintl)
+#endif
+#if LONG_DOUBLE_COMPAT(libm, GLIBC_2_1)
+compat_symbol (libm, __llrint, llrintl, GLIBC_2_1);
+#endif
diff --git a/libc/sysdeps/powerpc/powerpc32/fpu/s_llrintf.c b/libc/sysdeps/powerpc/powerpc32/fpu/s_llrintf.c
new file mode 100644
index 000000000..0439e4558
--- /dev/null
+++ b/libc/sysdeps/powerpc/powerpc32/fpu/s_llrintf.c
@@ -0,0 +1,27 @@
+/* Round a float value to a long long in the current rounding mode.
+ Copyright (C) 1997 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include "math.h"
+
+long long int
+__llrintf (float x)
+{
+ return (long long int) __rintf (x);
+}
+weak_alias (__llrintf, llrintf)
diff --git a/libc/sysdeps/powerpc/powerpc32/fpu/s_lrint.S b/libc/sysdeps/powerpc/powerpc32/fpu/s_lrint.S
new file mode 100644
index 000000000..55e9de7e2
--- /dev/null
+++ b/libc/sysdeps/powerpc/powerpc32/fpu/s_lrint.S
@@ -0,0 +1,45 @@
+/* Round double to long int. PowerPC32 version.
+ Copyright (C) 2004, 2006 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <sysdep.h>
+#include <math_ldbl_opt.h>
+
+/* long int[r3] __lrint (double x[fp1]) */
+ENTRY (__lrint)
+ fctiw fp13,fp1
+ stfd fp13,-8(r1)
+ nop /* Insure the following load is in a different dispatch group */
+ nop /* to avoid pipe stall on POWER4&5. */
+ nop
+ lwz r3,-4(r1)
+ blr
+ END (__lrint)
+
+weak_alias (__lrint, lrint)
+
+strong_alias (__lrint, __lrintf)
+weak_alias (__lrint, lrintf)
+
+#ifdef NO_LONG_DOUBLE
+strong_alias (__lrint, __lrintl)
+weak_alias (__lrint, lrintl)
+#endif
+#if LONG_DOUBLE_COMPAT(libm, GLIBC_2_1)
+compat_symbol (libm, __lrint, lrintl, GLIBC_2_1)
+#endif
diff --git a/libc/sysdeps/powerpc/powerpc32/fpu/s_lround.S b/libc/sysdeps/powerpc/powerpc32/fpu/s_lround.S
new file mode 100644
index 000000000..9c534ec2b
--- /dev/null
+++ b/libc/sysdeps/powerpc/powerpc32/fpu/s_lround.S
@@ -0,0 +1,99 @@
+/* lround function. PowerPC32 version.
+ Copyright (C) 2004, 2006 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 1 Franklin Street, Fifth Floor, Boston MA
+ 02110-1301 USA. */
+
+#include <sysdep.h>
+#include <math_ldbl_opt.h>
+
+ .section .rodata.cst8,"aM",@progbits,8
+ .align 2
+.LC0: /* 0.0 */
+ .long 0x00000000
+.LC1: /* 0.5 */
+ .long 0x3f000000
+
+ .section ".text"
+
+/* long [r3] lround (float x [fp1])
+ IEEE 1003.1 lround function. IEEE specifies "round to the nearest
+ integer value, rounding halfway cases away from zero, regardless of
+ the current rounding mode." However PowerPC Architecture defines
+ "round to Nearest" as "Choose the best approximation. In case of a
+ tie, choose the one that is even (least significant bit o).".
+ So we can't use the PowerPC "round to Nearest" mode. Instead we set
+ "round toward Zero" mode and round by adding +-0.5 before rounding
+ to the integer value. */
+
+ENTRY (__lround)
+ stwu r1,-16(r1)
+ cfi_adjust_cfa_offset (16)
+#ifdef SHARED
+ mflr r11
+ cfi_register(lr,r11)
+# ifdef HAVE_ASM_PPC_REL16
+ bcl 20,31,1f
+1: mflr r9
+ addis r9,r9,.LC0-1b@ha
+ addi r9,r9,.LC0-1b@l
+# else
+ bl _GLOBAL_OFFSET_TABLE_@local-4
+ mflr r10
+ lwz r9,.LC0@got(10)
+# endif
+ mtlr r11
+ cfi_same_value (lr)
+ lfs fp12,0(r9)
+#else
+ lis r9,.LC0@ha
+ lfs fp12,.LC0@l(r9)
+#endif
+#ifdef SHARED
+ lfs fp10,.LC1-.LC0(r9)
+#else
+ lis r9,.LC1@ha
+ lfs fp10,.LC1@l(r9)
+#endif
+ fcmpu cr6,fp1,fp12 /* if (x > 0.0) */
+ ble- cr6,.L4
+ fadd fp1,fp1,fp10 /* x+= 0.5; */
+.L9:
+ fctiwz fp2,fp1 /* Convert To Integer DW lround toward 0. */
+ stfd fp2,8(r1)
+ nop /* Ensure the following load is in a different dispatch */
+ nop /* group to avoid pipe stall on POWER4&5. */
+ nop
+ lwz r3,12(r1)
+ addi r1,r1,16
+ blr
+.L4:
+ fsub fp1,fp1,fp10 /* x-= 0.5; */
+ b .L9
+ END (__lround)
+
+weak_alias (__lround, lround)
+
+strong_alias (__lround, __lroundf)
+weak_alias (__lround, lroundf)
+
+#ifdef NO_LONG_DOUBLE
+weak_alias (__lround, lroundl)
+strong_alias (__lround, __lroundl)
+#endif
+#if LONG_DOUBLE_COMPAT(libm, GLIBC_2_1)
+compat_symbol (libm, __lround, lroundl, GLIBC_2_1)
+#endif
diff --git a/libc/sysdeps/powerpc/powerpc32/fpu/s_lroundf.S b/libc/sysdeps/powerpc/powerpc32/fpu/s_lroundf.S
new file mode 100644
index 000000000..e3c992d77
--- /dev/null
+++ b/libc/sysdeps/powerpc/powerpc32/fpu/s_lroundf.S
@@ -0,0 +1,2 @@
+/* __lroundf is in s_lround.S */
+/* __lroundf is in s_lround.S */
diff --git a/libc/sysdeps/powerpc/powerpc32/fpu/s_rint.S b/libc/sysdeps/powerpc/powerpc32/fpu/s_rint.S
new file mode 100644
index 000000000..c8dca313a
--- /dev/null
+++ b/libc/sysdeps/powerpc/powerpc32/fpu/s_rint.S
@@ -0,0 +1,79 @@
+/* Round to int floating-point values. PowerPC32 version.
+ Copyright (C) 2004, 2006 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 1 Franklin Street, Fifth Floor, Boston MA
+ 02110-1301 USA. */
+
+/* This has been coded in assembler because GCC makes such a mess of it
+ when it's coded in C. */
+
+#include <sysdep.h>
+#include <math_ldbl_opt.h>
+
+ .section .rodata.cst4,"aM",@progbits,4
+ .align 2
+.LC0: /* 2**52 */
+ .long 0x59800000
+
+ .section ".text"
+ENTRY (__rint)
+#ifdef SHARED
+ mflr r11
+ cfi_register(lr,r11)
+# ifdef HAVE_ASM_PPC_REL16
+ bcl 20,31,1f
+1: mflr r9
+ addis r9,r9,.LC0-1b@ha
+ lfs fp13,.LC0-1b@l(r9)
+# else
+ bl _GLOBAL_OFFSET_TABLE_@local-4
+ mflr r10
+ lwz r9,.LC0@got(10)
+ lfs fp13,0(r9)
+# endif
+ mtlr r11
+ cfi_same_value (lr)
+#else
+ lis r9,.LC0@ha
+ lfs fp13,.LC0@l(r9)
+#endif
+ fabs fp0,fp1
+ fsub fp12,fp13,fp13 /* generate 0.0 */
+ fcmpu cr7,fp0,fp13 /* if (fabs(x) > TWO52) */
+ fcmpu cr6,fp1,fp12 /* if (x > 0.0) */
+ bnllr- cr7
+ bng- cr6,.L4
+ fadd fp1,fp1,fp13 /* x+= TWO52; */
+ fsub fp1,fp1,fp13 /* x-= TWO52; */
+ fabs fp1,fp1 /* if (x == 0.0) */
+ blr /* x = 0.0; */
+.L4:
+ bnllr- cr6 /* if (x < 0.0) */
+ fsub fp1,fp1,fp13 /* x-= TWO52; */
+ fadd fp1,fp1,fp13 /* x+= TWO52; */
+ fnabs fp1,fp1 /* if (x == 0.0) */
+ blr /* x = -0.0; */
+ END (__rint)
+
+weak_alias (__rint, rint)
+
+#ifdef NO_LONG_DOUBLE
+weak_alias (__rint, rintl)
+strong_alias (__rint, __rintl)
+#endif
+#if LONG_DOUBLE_COMPAT(libm, GLIBC_2_0)
+compat_symbol (libm, __rint, rintl, GLIBC_2_0)
+#endif
diff --git a/libc/sysdeps/powerpc/powerpc32/fpu/s_rintf.S b/libc/sysdeps/powerpc/powerpc32/fpu/s_rintf.S
new file mode 100644
index 000000000..7771cb2bc
--- /dev/null
+++ b/libc/sysdeps/powerpc/powerpc32/fpu/s_rintf.S
@@ -0,0 +1,68 @@
+/* Round float to int floating-point values. PowerPC32 version.
+ Copyright (C) 2004, 2006 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 1 Franklin Street, Fifth Floor, Boston MA
+ 02110-1301 USA. */
+
+#include <sysdep.h>
+
+ .section .rodata.cst4,"aM",@progbits,4
+ .align 2
+.LC0: /* 2**23 */
+ .long 0x4b000000
+
+ .section ".text"
+ENTRY (__rintf)
+#ifdef SHARED
+ mflr r11
+ cfi_register(lr,r11)
+# ifdef HAVE_ASM_PPC_REL16
+ bcl 20,31,1f
+1: mflr r9
+ addis r9,r9,.LC0-1b@ha
+ lfs fp13,.LC0-1b@l(r9)
+# else
+ bl _GLOBAL_OFFSET_TABLE_@local-4
+ mflr r10
+ lwz r9,.LC0@got(10)
+ lfs fp13,0(r9)
+# endif
+ mtlr r11
+ cfi_same_value (lr)
+#else
+ lis r9,.LC0@ha
+ lfs fp13,.LC0@l(r9)
+#endif
+ fabs fp0,fp1
+ fsubs fp12,fp13,fp13 /* generate 0.0 */
+ fcmpu cr7,fp0,fp13 /* if (fabs(x) > TWO23) */
+ fcmpu cr6,fp1,fp12 /* if (x > 0.0) */
+ bnllr- cr7
+ bng- cr6,.L4
+ fadds fp1,fp1,fp13 /* x+= TWO23; */
+ fsubs fp1,fp1,fp13 /* x-= TWO23; */
+ fabs fp1,fp1 /* if (x == 0.0) */
+ blr /* x = 0.0; */
+.L4:
+ bnllr- cr6 /* if (x < 0.0) */
+ fsubs fp1,fp1,fp13 /* x-= TWO23; */
+ fadds fp1,fp1,fp13 /* x+= TWO23; */
+ fnabs fp1,fp1 /* if (x == 0.0) */
+ blr /* x = -0.0; */
+ END (__rintf)
+
+weak_alias (__rintf, rintf)
+
diff --git a/libc/sysdeps/powerpc/powerpc32/fpu/s_round.S b/libc/sysdeps/powerpc/powerpc32/fpu/s_round.S
new file mode 100644
index 000000000..590c87ad8
--- /dev/null
+++ b/libc/sysdeps/powerpc/powerpc32/fpu/s_round.S
@@ -0,0 +1,103 @@
+/* round function. PowerPC32 version.
+ Copyright (C) 2004, 2006 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 1 Franklin Street, Fifth Floor, Boston MA
+ 02110-1301 USA. */
+
+#include <sysdep.h>
+#include <math_ldbl_opt.h>
+
+ .section .rodata.cst8,"aM",@progbits,8
+ .align 2
+.LC0: /* 2**52 */
+ .long 0x59800000
+.LC1: /* 0.5 */
+ .long 0x3f000000
+
+/* double [fp1] round (double x [fp1])
+ IEEE 1003.1 round function. IEEE specifies "round to the nearest
+ integer value, rounding halfway cases away from zero, regardless of
+ the current rounding mode." However PowerPC Architecture defines
+ "Round to Nearest" as "Choose the best approximation. In case of a
+ tie, choose the one that is even (least significant bit o).".
+ So we can't use the PowerPC "Round to Nearest" mode. Instead we set
+ "Round toward Zero" mode and round by adding +-0.5 before rounding
+ to the integer value. */
+
+ .section ".text"
+ENTRY (__round)
+ mffs fp11 /* Save current FPU rounding mode. */
+#ifdef SHARED
+ mflr r11
+ cfi_register(lr,r11)
+# ifdef HAVE_ASM_PPC_REL16
+ bcl 20,31,1f
+1: mflr r9
+ addis r9,r9,.LC0-1b@ha
+ addi r9,r9,.LC0-1b@l
+# else
+ bl _GLOBAL_OFFSET_TABLE_@local-4
+ mflr r10
+ lwz r9,.LC0@got(10)
+# endif
+ mtlr r11
+ cfi_same_value (lr)
+ lfs fp13,0(r9)
+#else
+ lis r9,.LC0@ha
+ lfs fp13,.LC0@l(r9)
+#endif
+ fabs fp0,fp1
+ fsub fp12,fp13,fp13 /* generate 0.0 */
+ fcmpu cr7,fp0,fp13 /* if (fabs(x) > TWO52) */
+ fcmpu cr6,fp1,fp12 /* if (x > 0.0) */
+ bnllr- cr7
+ mtfsfi 7,1 /* Set rounding mode toward 0. */
+#ifdef SHARED
+ lfs fp10,.LC1-.LC0(r9)
+#else
+ lis r9,.LC1@ha
+ lfs fp10,.LC1@l(r9)
+#endif
+ ble- cr6,.L4
+ fadd fp1,fp1,fp10 /* x+= 0.5; */
+ fadd fp1,fp1,fp13 /* x+= TWO52; */
+ fsub fp1,fp1,fp13 /* x-= TWO52; */
+ fabs fp1,fp1 /* if (x == 0.0) */
+ /* x = 0.0; */
+ mtfsf 0x01,fp11 /* restore previous rounding mode. */
+ blr
+.L4:
+ fsub fp9,fp1,fp10 /* x+= 0.5; */
+ bge- cr6,.L9 /* if (x < 0.0) */
+ fsub fp1,fp9,fp13 /* x-= TWO52; */
+ fadd fp1,fp1,fp13 /* x+= TWO52; */
+ fnabs fp1,fp1 /* if (x == 0.0) */
+ /* x = -0.0; */
+.L9:
+ mtfsf 0x01,fp11 /* restore previous rounding mode. */
+ blr
+ END (__round)
+
+weak_alias (__round, round)
+
+#ifdef NO_LONG_DOUBLE
+weak_alias (__round, roundl)
+strong_alias (__round, __roundl)
+#endif
+#if LONG_DOUBLE_COMPAT(libm, GLIBC_2_1)
+compat_symbol (libm, __round, roundl, GLIBC_2_1)
+#endif
diff --git a/libc/sysdeps/powerpc/powerpc32/fpu/s_roundf.S b/libc/sysdeps/powerpc/powerpc32/fpu/s_roundf.S
new file mode 100644
index 000000000..7e99bca31
--- /dev/null
+++ b/libc/sysdeps/powerpc/powerpc32/fpu/s_roundf.S
@@ -0,0 +1,95 @@
+/* roundf function. PowerPC32 version.
+ Copyright (C) 2004, 2006 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 1 Franklin Street, Fifth Floor, Boston MA
+ 02110-1301 USA. */
+
+#include <sysdep.h>
+
+ .section .rodata.cst8,"aM",@progbits,8
+ .align 2
+.LC0: /* 2**23 */
+ .long 0x4b000000
+.LC1: /* 0.5 */
+ .long 0x3f000000
+
+/* float [fp1] roundf (float x [fp1])
+ IEEE 1003.1 round function. IEEE specifies "round to the nearest
+ integer value, rounding halfway cases away from zero, regardless of
+ the current rounding mode." However PowerPC Architecture defines
+ "Round to Nearest" as "Choose the best approximation. In case of a
+ tie, choose the one that is even (least significant bit o).".
+ So we can't use the PowerPC "Round to Nearest" mode. Instead we set
+ "Round toward Zero" mode and round by adding +-0.5 before rounding
+ to the integer value. */
+
+ .section ".text"
+ENTRY (__roundf )
+ mffs fp11 /* Save current FPU rounding mode. */
+#ifdef SHARED
+ mflr r11
+ cfi_register(lr,r11)
+# ifdef HAVE_ASM_PPC_REL16
+ bcl 20,31,1f
+1: mflr r9
+ addis r9,r9,.LC0-1b@ha
+ addi r9,r9,.LC0-1b@l
+# else
+ bl _GLOBAL_OFFSET_TABLE_@local-4
+ mflr r10
+ lwz r9,.LC0@got(10)
+# endif
+ mtlr r11
+ cfi_same_value (lr)
+ lfs fp13,0(r9)
+#else
+ lis r9,.LC0@ha
+ lfs fp13,.LC0@l(r9)
+#endif
+ fabs fp0,fp1
+ fsubs fp12,fp13,fp13 /* generate 0.0 */
+ fcmpu cr7,fp0,fp13 /* if (fabs(x) > TWO23) */
+ fcmpu cr6,fp1,fp12 /* if (x > 0.0) */
+ bnllr- cr7
+ mtfsfi 7,1 /* Set rounding mode toward 0. */
+#ifdef SHARED
+ lfs fp10,.LC1-.LC0(r9)
+#else
+ lis r9,.LC1@ha
+ lfs fp10,.LC1@l(r9)
+#endif
+ ble- cr6,.L4
+ fadds fp1,fp1,fp10 /* x+= 0.5; */
+ fadds fp1,fp1,fp13 /* x+= TWO23; */
+ fsubs fp1,fp1,fp13 /* x-= TWO23; */
+ fabs fp1,fp1 /* if (x == 0.0) */
+ /* x = 0.0; */
+ mtfsf 0x01,fp11 /* restore previous rounding mode. */
+ blr
+.L4:
+ fsubs fp9,fp1,fp10 /* x+= 0.5; */
+ bge- cr6,.L9 /* if (x < 0.0) */
+ fsubs fp1,fp9,fp13 /* x-= TWO23; */
+ fadds fp1,fp1,fp13 /* x+= TWO23; */
+ fnabs fp1,fp1 /* if (x == 0.0) */
+ /* x = -0.0; */
+.L9:
+ mtfsf 0x01,fp11 /* restore previous rounding mode. */
+ blr
+ END (__roundf)
+
+weak_alias (__roundf, roundf)
+
diff --git a/libc/sysdeps/powerpc/powerpc32/fpu/s_trunc.S b/libc/sysdeps/powerpc/powerpc32/fpu/s_trunc.S
new file mode 100644
index 000000000..5bc0856b9
--- /dev/null
+++ b/libc/sysdeps/powerpc/powerpc32/fpu/s_trunc.S
@@ -0,0 +1,90 @@
+/* trunc function. PowerPC32 version.
+ Copyright (C) 2004, 2006 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 1 Franklin Street, Fifth Floor, Boston MA
+ 02110-1301 USA. */
+
+#include <sysdep.h>
+#include <math_ldbl_opt.h>
+
+ .section .rodata.cst4,"aM",@progbits,4
+ .align 2
+.LC0: /* 2**52 */
+ .long 0x59800000
+
+/* double [fp1] trunc (double x [fp1])
+ IEEE 1003.1 trunc function. IEEE specifies "trunc to the integer
+ value, in floating format, nearest to but no larger in magnitude
+ then the argument."
+ We set "round toward Zero" mode and trunc by adding +-2**52 then
+ subtracting +-2**52. */
+
+ .section ".text"
+ENTRY (__trunc)
+ mffs fp11 /* Save current FPU rounding mode. */
+#ifdef SHARED
+ mflr r11
+ cfi_register(lr,r11)
+# ifdef HAVE_ASM_PPC_REL16
+ bcl 20,31,1f
+1: mflr r9
+ addis r9,r9,.LC0-1b@ha
+ lfs fp13,.LC0-1b@l(r9)
+# else
+ bl _GLOBAL_OFFSET_TABLE_@local-4
+ mflr r10
+ lwz r9,.LC0@got(10)
+ lfs fp13,0(r9)
+# endif
+ mtlr r11
+ cfi_same_value (lr)
+#else
+ lis r9,.LC0@ha
+ lfs fp13,.LC0@l(r9)
+#endif
+ fabs fp0,fp1
+ fsub fp12,fp13,fp13 /* generate 0.0 */
+ fcmpu cr7,fp0,fp13 /* if (fabs(x) > TWO52) */
+ fcmpu cr6,fp1,fp12 /* if (x > 0.0) */
+ bnllr- cr7
+ mtfsfi 7,1 /* Set rounding toward 0 mode. */
+ ble- cr6,.L4
+ fadd fp1,fp1,fp13 /* x+= TWO52; */
+ fsub fp1,fp1,fp13 /* x-= TWO52; */
+ fabs fp1,fp1 /* if (x == 0.0) */
+ /* x = 0.0; */
+ mtfsf 0x01,fp11 /* restore previous rounding mode. */
+ blr
+.L4:
+ bge- cr6,.L9 /* if (x < 0.0) */
+ fsub fp1,fp1,fp13 /* x-= TWO52; */
+ fadd fp1,fp1,fp13 /* x+= TWO52; */
+ fnabs fp1,fp1 /* if (x == 0.0) */
+ /* x = -0.0; */
+.L9:
+ mtfsf 0x01,fp11 /* restore previous rounding mode. */
+ blr
+ END (__trunc)
+
+weak_alias (__trunc, trunc)
+
+#ifdef NO_LONG_DOUBLE
+weak_alias (__trunc, truncl)
+strong_alias (__trunc, __truncl)
+#endif
+#if LONG_DOUBLE_COMPAT(libm, GLIBC_2_1)
+compat_symbol (libm, __trunc, truncl, GLIBC_2_1)
+#endif
diff --git a/libc/sysdeps/powerpc/powerpc32/fpu/s_truncf.S b/libc/sysdeps/powerpc/powerpc32/fpu/s_truncf.S
new file mode 100644
index 000000000..e2e3bd674
--- /dev/null
+++ b/libc/sysdeps/powerpc/powerpc32/fpu/s_truncf.S
@@ -0,0 +1,82 @@
+/* truncf function. PowerPC32 version.
+ Copyright (C) 2004, 2006 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 1 Franklin Street, Fifth Floor, Boston MA
+ 02110-1301 USA. */
+
+#include <sysdep.h>
+
+ .section .rodata.cst4,"aM",@progbits,4
+ .align 2
+.LC0: /* 2**23 */
+ .long 0x4b000000
+
+/* float [fp1] truncf (float x [fp1])
+ IEEE 1003.1 trunc function. IEEE specifies "trunc to the integer
+ value, in floating format, nearest to but no larger in magnitude
+ then the argument."
+ We set "round toward Zero" mode and trunc by adding +-2**23 then
+ subtracting +-2**23. */
+
+ .section ".text"
+ENTRY (__truncf)
+ mffs fp11 /* Save current FPU rounding mode. */
+#ifdef SHARED
+ mflr r11
+ cfi_register(lr,r11)
+# ifdef HAVE_ASM_PPC_REL16
+ bcl 20,31,1f
+1: mflr r9
+ addis r9,r9,.LC0-1b@ha
+ lfs fp13,.LC0-1b@l(r9)
+# else
+ bl _GLOBAL_OFFSET_TABLE_@local-4
+ mflr r10
+ lwz r9,.LC0@got(10)
+ lfs fp13,0(r9)
+# endif
+ mtlr r11
+ cfi_same_value (lr)
+#else
+ lis r9,.LC0@ha
+ lfs fp13,.LC0@l(r9)
+#endif
+ fabs fp0,fp1
+ fsubs fp12,fp13,fp13 /* generate 0.0 */
+ fcmpu cr7,fp0,fp13 /* if (fabs(x) > TWO23) */
+ fcmpu cr6,fp1,fp12 /* if (x > 0.0) */
+ bnllr- cr7
+ mtfsfi 7,1 /* Set rounding toward 0 mode. */
+ ble- cr6,.L4
+ fadds fp1,fp1,fp13 /* x+= TWO23; */
+ fsubs fp1,fp1,fp13 /* x-= TWO23; */
+ fabs fp1,fp1 /* if (x == 0.0) */
+ /* x = 0.0; */
+ mtfsf 0x01,fp11 /* restore previous rounding mode. */
+ blr
+.L4:
+ bge- cr6,.L9 /* if (x < 0.0) */
+ fsubs fp1,fp1,fp13 /* x-= TWO23; */
+ fadds fp1,fp1,fp13 /* x+= TWO23; */
+ fnabs fp1,fp1 /* if (x == 0.0) */
+ /* x = -0.0; */
+.L9:
+ mtfsf 0x01,fp11 /* restore previous rounding mode. */
+ blr
+ END (__truncf)
+
+weak_alias (__truncf, truncf)
+
diff --git a/libc/sysdeps/powerpc/powerpc32/fpu/setjmp-common.S b/libc/sysdeps/powerpc/powerpc32/fpu/setjmp-common.S
new file mode 100644
index 000000000..851480d2e
--- /dev/null
+++ b/libc/sysdeps/powerpc/powerpc32/fpu/setjmp-common.S
@@ -0,0 +1,184 @@
+/* setjmp for PowerPC.
+ Copyright (C) 1995-2000, 2003-2005, 2006 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 1 Franklin Street, Fifth Floor, Boston MA
+ 02110-1301 USA. */
+
+#include <sysdep.h>
+#define _ASM
+#ifdef __NO_VMX__
+# include <novmxsetjmp.h>
+#else
+# include <jmpbuf-offsets.h>
+#endif
+#include <bp-sym.h>
+#include <bp-asm.h>
+
+ .machine "altivec"
+ENTRY (BP_SYM (__sigsetjmp))
+ CHECK_BOUNDS_BOTH_WIDE_LIT (r3, r8, r9, JB_SIZE)
+
+#ifdef PTR_MANGLE
+ mr r5,r1
+ PTR_MANGLE(r5, r6)
+ stw r5,(JB_GPR1*4)(3)
+#else
+ stw r1,(JB_GPR1*4)(3)
+#endif
+ mflr r0
+ stw r14,((JB_GPRS+0)*4)(3)
+ stfd fp14,((JB_FPRS+0*2)*4)(3)
+#ifdef PTR_MANGLE
+ PTR_MANGLE2 (r0, r6)
+#endif
+ stw r0,(JB_LR*4)(3)
+ stw r15,((JB_GPRS+1)*4)(3)
+ stfd fp15,((JB_FPRS+1*2)*4)(3)
+ mfcr r0
+ stw r16,((JB_GPRS+2)*4)(3)
+ stfd fp16,((JB_FPRS+2*2)*4)(3)
+ stw r0,(JB_CR*4)(3)
+ stw r17,((JB_GPRS+3)*4)(3)
+ stfd fp17,((JB_FPRS+3*2)*4)(3)
+ stw r18,((JB_GPRS+4)*4)(3)
+ stfd fp18,((JB_FPRS+4*2)*4)(3)
+ stw r19,((JB_GPRS+5)*4)(3)
+ stfd fp19,((JB_FPRS+5*2)*4)(3)
+ stw r20,((JB_GPRS+6)*4)(3)
+ stfd fp20,((JB_FPRS+6*2)*4)(3)
+ stw r21,((JB_GPRS+7)*4)(3)
+ stfd fp21,((JB_FPRS+7*2)*4)(3)
+ stw r22,((JB_GPRS+8)*4)(3)
+ stfd fp22,((JB_FPRS+8*2)*4)(3)
+ stw r23,((JB_GPRS+9)*4)(3)
+ stfd fp23,((JB_FPRS+9*2)*4)(3)
+ stw r24,((JB_GPRS+10)*4)(3)
+ stfd fp24,((JB_FPRS+10*2)*4)(3)
+ stw r25,((JB_GPRS+11)*4)(3)
+ stfd fp25,((JB_FPRS+11*2)*4)(3)
+ stw r26,((JB_GPRS+12)*4)(3)
+ stfd fp26,((JB_FPRS+12*2)*4)(3)
+ stw r27,((JB_GPRS+13)*4)(3)
+ stfd fp27,((JB_FPRS+13*2)*4)(3)
+ stw r28,((JB_GPRS+14)*4)(3)
+ stfd fp28,((JB_FPRS+14*2)*4)(3)
+ stw r29,((JB_GPRS+15)*4)(3)
+ stfd fp29,((JB_FPRS+15*2)*4)(3)
+ stw r30,((JB_GPRS+16)*4)(3)
+ stfd fp30,((JB_FPRS+16*2)*4)(3)
+ stw r31,((JB_GPRS+17)*4)(3)
+ stfd fp31,((JB_FPRS+17*2)*4)(3)
+#ifndef __NO_VMX__
+# ifdef PIC
+ mflr r6
+ cfi_register(lr,r6)
+# ifdef HAVE_ASM_PPC_REL16
+ bcl 20,31,1f
+1: mflr r5
+ addis r5,r5,_GLOBAL_OFFSET_TABLE_-1b@ha
+ addi r5,r5,_GLOBAL_OFFSET_TABLE_-1b@l
+# else
+ bl _GLOBAL_OFFSET_TABLE_@local-4
+ mflr r5
+# endif
+ mtlr r6
+ cfi_same_value (lr)
+# ifdef SHARED
+ lwz r5,_rtld_global_ro@got(r5)
+ lwz r5,RTLD_GLOBAL_RO_DL_HWCAP_OFFSET(r5)
+# else
+ lwz r5,_dl_hwcap@got(r5)
+ lwz r5,0(r5)
+# endif
+# else
+ lis r6,_dl_hwcap@ha
+ lwz r5,_dl_hwcap@l(r6)
+# endif
+ andis. r5,r5,(PPC_FEATURE_HAS_ALTIVEC >> 16)
+ beq L(no_vmx)
+ la r5,((JB_VRS)*4)(3)
+ andi. r6,r5,0xf
+ mfspr r0,VRSAVE
+ stw r0,((JB_VRSAVE)*4)(3)
+ addi r6,r5,16
+ beq+ L(aligned_save_vmx)
+ lvsr v0,0,r5
+ vspltisb v1,-1 /* set v1 to all 1's */
+ vspltisb v2,0 /* set v2 to all 0's */
+ vperm v3,v2,v1,v0 /* v3 contains shift mask with num all 1 bytes on left = misalignment */
+
+
+ /* Special case for v20 we need to preserve what is in save area below v20 before obliterating it */
+ lvx v5,0,r5
+ vperm v20,v20,v20,v0
+ vsel v5,v5,v20,v3
+ vsel v20,v20,v2,v3
+ stvx v5,0,r5
+
+#define save_2vmx_partial(savevr,prev_savevr,hivr,shiftvr,maskvr,savegpr,addgpr) \
+ addi addgpr,addgpr,32; \
+ vperm savevr,savevr,savevr,shiftvr; \
+ vsel hivr,prev_savevr,savevr,maskvr; \
+ stvx hivr,0,savegpr;
+
+ save_2vmx_partial(v21,v20,v5,v0,v3,r6,r5)
+ save_2vmx_partial(v22,v21,v5,v0,v3,r5,r6)
+ save_2vmx_partial(v23,v22,v5,v0,v3,r6,r5)
+ save_2vmx_partial(v24,v23,v5,v0,v3,r5,r6)
+ save_2vmx_partial(v25,v24,v5,v0,v3,r6,r5)
+ save_2vmx_partial(v26,v25,v5,v0,v3,r5,r6)
+ save_2vmx_partial(v27,v26,v5,v0,v3,r6,r5)
+ save_2vmx_partial(v28,v27,v5,v0,v3,r5,r6)
+ save_2vmx_partial(v29,v28,v5,v0,v3,r6,r5)
+ save_2vmx_partial(v30,v29,v5,v0,v3,r5,r6)
+
+ /* Special case for r31 we need to preserve what is in save area above v31 before obliterating it */
+ addi r5,r5,32
+ vperm v31,v31,v31,v0
+ lvx v4,0,r5
+ vsel v5,v30,v31,v3
+ stvx v5,0,r6
+ vsel v4,v31,v4,v3
+ stvx v4,0,r5
+ b L(no_vmx)
+
+L(aligned_save_vmx):
+ stvx 20,0,r5
+ addi r5,r5,32
+ stvx 21,0,r6
+ addi r6,r6,32
+ stvx 22,0,r5
+ addi r5,r5,32
+ stvx 23,0,r6
+ addi r6,r6,32
+ stvx 24,0,r5
+ addi r5,r5,32
+ stvx 25,0,r6
+ addi r6,r6,32
+ stvx 26,0,r5
+ addi r5,r5,32
+ stvx 27,0,r6
+ addi r6,r6,32
+ stvx 28,0,r5
+ addi r5,r5,32
+ stvx 29,0,r6
+ addi r6,r6,32
+ stvx 30,0,r5
+ stvx 31,0,r6
+L(no_vmx):
+#endif
+ b BP_SYM (__sigjmp_save@local)
+END (BP_SYM (__sigsetjmp))
diff --git a/libc/sysdeps/powerpc/powerpc32/fpu/setjmp.S b/libc/sysdeps/powerpc/powerpc32/fpu/setjmp.S
new file mode 100644
index 000000000..139611a9b
--- /dev/null
+++ b/libc/sysdeps/powerpc/powerpc32/fpu/setjmp.S
@@ -0,0 +1,45 @@
+/* non alitivec (old) version of setjmp for PowerPC.
+ Copyright (C) 1995-1997,1999,2000,2003,2004 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <libc-symbols.h>
+#include <rtld-global-offsets.h>
+#include <shlib-compat.h>
+
+#if defined NOT_IN_libc
+/* Build a non-versioned object for rtld-*. */
+# include "setjmp-common.S"
+
+#else /* !NOT_IN_libc */
+/* Build a versioned object for libc. */
+default_symbol_version (__vmx__sigsetjmp,__sigsetjmp,GLIBC_2.3.4)
+# define __sigsetjmp __vmx__sigsetjmp
+# define __sigjmp_save __vmx__sigjmp_save
+# include "setjmp-common.S"
+
+# if defined SHARED && SHLIB_COMPAT (libc, GLIBC_2_0, GLIBC_2_3_4)
+# define __NO_VMX__
+# undef __sigsetjmp
+# undef __sigjmp_save
+# undef JB_SIZE
+symbol_version (__novmx__sigsetjmp,__sigsetjmp,GLIBC_2.0)
+# define __sigsetjmp __novmx__sigsetjmp
+# define __sigjmp_save __novmx__sigjmp_save
+# include "setjmp-common.S"
+# endif
+#endif /* !NOT_IN_libc */
diff --git a/libc/sysdeps/powerpc/powerpc32/gprrest0.S b/libc/sysdeps/powerpc/powerpc32/gprrest0.S
new file mode 100644
index 000000000..90eb4a0c4
--- /dev/null
+++ b/libc/sysdeps/powerpc/powerpc32/gprrest0.S
@@ -0,0 +1,70 @@
+/* Copyright (C) 2000, 2001, 2006 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA
+ 02110-1301 USA. */
+
+/*
+ General Purpose Register (GPR) restore routine
+ when Floating Point Registers (FPRs) are not saved
+
+ Note: This restore routine must not be called when GPR30 or
+ GPR31, or both, are the only registers beings saved. In these
+ cases, the saving and restoring must be done inline.
+*/
+
+#include <sysdep.h>
+
+ENTRY(_restgpr0_all)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_restgpr0_13)
+C_TEXT(_restgpr0_13): lwz r13,-76(r1)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_restgpr0_14)
+C_TEXT(_restgpr0_14): lwz r14,-72(r1)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_restgpr0_15)
+C_TEXT(_restgpr0_15): lwz r15,-68(r1)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_restgpr0_16)
+C_TEXT(_restgpr0_16): lwz r16,-64(r1)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_restgpr0_17)
+C_TEXT(_restgpr0_17): lwz r17,-60(r1)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_restgpr0_18)
+C_TEXT(_restgpr0_18): lwz r18,-56(r1)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_restgpr0_19)
+C_TEXT(_restgpr0_19): lwz r19,-52(r1)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_restgpr0_20)
+C_TEXT(_restgpr0_20): lwz r20,-48(r1)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_restgpr0_21)
+C_TEXT(_restgpr0_21): lwz r21,-44(r1)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_restgpr0_22)
+C_TEXT(_restgpr0_22): lwz r22,-40(r1)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_restgpr0_23)
+C_TEXT(_restgpr0_23): lwz r23,-36(r1)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_restgpr0_24)
+C_TEXT(_restgpr0_24): lwz r24,-32(r1)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_restgpr0_25)
+C_TEXT(_restgpr0_25): lwz r25,-28(r1)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_restgpr0_26)
+C_TEXT(_restgpr0_26): lwz r26,-24(r1)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_restgpr0_27)
+C_TEXT(_restgpr0_27): lwz r27,-20(r1)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_restgpr0_28)
+C_TEXT(_restgpr0_28): lwz r28,-16(r1)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_restgpr0_29)
+C_TEXT(_restgpr0_29): lwz r0,8(r1) #get return address from frame
+ lwz r29,-12(r1) #restore r29
+ mtlr r0 #move return address to LR
+ lwz r30,-8(r1) #restore r30
+ lwz r31,-4(r1) #restore r31
+ blr #return
+END (_restgpr0_all)
diff --git a/libc/sysdeps/powerpc/powerpc32/gprrest1.S b/libc/sysdeps/powerpc/powerpc32/gprrest1.S
new file mode 100644
index 000000000..ca00b8f13
--- /dev/null
+++ b/libc/sysdeps/powerpc/powerpc32/gprrest1.S
@@ -0,0 +1,64 @@
+/* Copyright (C) 2000, 2001, 2006 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA
+ 02110-1301 USA. */
+
+/*
+ General Purpose Register (GPR) restore routine
+ when Floating Point Registers (FPRs) are saved
+*/
+
+#include <sysdep.h>
+
+ENTRY(_restgpr1_all)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_restgpr1_13)
+C_TEXT(_restgpr1_13): lwz r13,-76(r12)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_restgpr1_14)
+C_TEXT(_restgpr1_14): lwz r14,-72(r12)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_restgpr1_15)
+C_TEXT(_restgpr1_15): lwz r15,-68(r12)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_restgpr1_16)
+C_TEXT(_restgpr1_16): lwz r16,-64(r12)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_restgpr1_17)
+C_TEXT(_restgpr1_17): lwz r17,-60(r12)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_restgpr1_18)
+C_TEXT(_restgpr1_18): lwz r18,-56(r12)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_restgpr1_19)
+C_TEXT(_restgpr1_19): lwz r19,-52(r12)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_restgpr1_20)
+C_TEXT(_restgpr1_20): lwz r20,-48(r12)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_restgpr1_21)
+C_TEXT(_restgpr1_21): lwz r21,-44(r12)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_restgpr1_22)
+C_TEXT(_restgpr1_22): lwz r22,-40(r12)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_restgpr1_23)
+C_TEXT(_restgpr1_23): lwz r23,-36(r12)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_restgpr1_24)
+C_TEXT(_restgpr1_24): lwz r24,-32(r12)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_restgpr1_25)
+C_TEXT(_restgpr1_25): lwz r25,-28(r12)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_restgpr1_26)
+C_TEXT(_restgpr1_26): lwz r26,-24(r12)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_restgpr1_27)
+C_TEXT(_restgpr1_27): lwz r27,-20(r12)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_restgpr1_28)
+C_TEXT(_restgpr1_28): lwz r28,-16(r12)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_restgpr1_29)
+C_TEXT(_restgpr1_29): lwz r29,-12(r12) #restore r29
+ lwz r30,-8(r12) #restore r30
+ lwz r31,-4(r12) #restore r31
+ blr #return
+END (_restgpr1_all)
diff --git a/libc/sysdeps/powerpc/powerpc32/gprsave0.S b/libc/sysdeps/powerpc/powerpc32/gprsave0.S
new file mode 100644
index 000000000..c74272b56
--- /dev/null
+++ b/libc/sysdeps/powerpc/powerpc32/gprsave0.S
@@ -0,0 +1,88 @@
+/* Copyright (C) 2000, 2001, 2006 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA
+ 02110-1301 USA. */
+
+/*
+ General Purpose Register (GPR) save routine
+ when Floating Point Registers (FPRs) are not saved
+
+ Note: This save routine must not be called when GPR30 or
+ GPR31, or both, are the only registers beings saved. In these
+ cases, the saving and restoring must be done inline.
+*/
+
+#include <sysdep.h>
+
+ENTRY(_savegpr0_all)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_savegpr0_13)
+C_TEXT(_savegpr0_13): stw r13,-76(r1)
+ cfi_offset(r13,-76)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_savegpr0_14)
+C_TEXT(_savegpr0_14): stw r14,-72(r1)
+ cfi_offset(r14,-72)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_savegpr0_15)
+C_TEXT(_savegpr0_15): stw r15,-68(r1)
+ cfi_offset(r15,-68)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_savegpr0_16)
+C_TEXT(_savegpr0_16): stw r16,-64(r1)
+ cfi_offset(r16,-64)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_savegpr0_17)
+C_TEXT(_savegpr0_17): stw r17,-60(r1)
+ cfi_offset(r17,-60)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_savegpr0_18)
+C_TEXT(_savegpr0_18): stw r18,-56(r1)
+ cfi_offset(r18,-56)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_savegpr0_19)
+C_TEXT(_savegpr0_19): stw r19,-52(r1)
+ cfi_offset(r19,-52)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_savegpr0_20)
+C_TEXT(_savegpr0_20): stw r20,-48(r1)
+ cfi_offset(r20,-48)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_savegpr0_21)
+C_TEXT(_savegpr0_21): stw r21,-44(r1)
+ cfi_offset(r21,-44)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_savegpr0_22)
+C_TEXT(_savegpr0_22): stw r22,-40(r1)
+ cfi_offset(r22,-40)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_savegpr0_23)
+C_TEXT(_savegpr0_23): stw r23,-36(r1)
+ cfi_offset(r23,-36)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_savegpr0_24)
+C_TEXT(_savegpr0_24): stw r24,-32(r1)
+ cfi_offset(r24,-32)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_savegpr0_25)
+C_TEXT(_savegpr0_25): stw r25,-28(r1)
+ cfi_offset(r25,-28)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_savegpr0_26)
+C_TEXT(_savegpr0_26): stw r26,-24(r1)
+ cfi_offset(r26,-24)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_savegpr0_27)
+C_TEXT(_savegpr0_27): stw r27,-20(r1)
+ cfi_offset(r27,-20)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_savegpr0_28)
+C_TEXT(_savegpr0_28): stw r28,-16(r1)
+ cfi_offset(r28,-16)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_savegpr0_29)
+C_TEXT(_savegpr0_29): stw r29,-12(r1) #save r29
+ stw r30,-8(r1) #save r30
+ stw r31,-4(r1) #save r31
+ cfi_offset(r29,-12)
+ cfi_offset(r30,-8)
+ cfi_offset(r31,-4)
+ stw r0,8(r1) #save LR in callers frame
+ blr #return
+END (_savegpr0_all)
diff --git a/libc/sysdeps/powerpc/powerpc32/gprsave1.S b/libc/sysdeps/powerpc/powerpc32/gprsave1.S
new file mode 100644
index 000000000..6c1790129
--- /dev/null
+++ b/libc/sysdeps/powerpc/powerpc32/gprsave1.S
@@ -0,0 +1,64 @@
+/* Copyright (C) 2000, 2001, 2006 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA
+ 02110-1301 USA. */
+
+/*
+ General Purpose Register (GPR) save routine
+ when Floating Point Registers (FPRs) are saved
+*/
+
+#include <sysdep.h>
+
+ENTRY(_savegpr1_all)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_savegpr1_13)
+C_TEXT(_savegpr1_13): stw r13,-76(r12)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_savegpr1_14)
+C_TEXT(_savegpr1_14): stw r14,-72(r12)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_savegpr1_15)
+C_TEXT(_savegpr1_15): stw r15,-68(r12)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_savegpr1_16)
+C_TEXT(_savegpr1_16): stw r16,-64(r12)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_savegpr1_17)
+C_TEXT(_savegpr1_17): stw r17,-60(r12)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_savegpr1_18)
+C_TEXT(_savegpr1_18): stw r18,-56(r12)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_savegpr1_19)
+C_TEXT(_savegpr1_19): stw r19,-52(r12)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_savegpr1_20)
+C_TEXT(_savegpr1_20): stw r20,-48(r12)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_savegpr1_21)
+C_TEXT(_savegpr1_21): stw r21,-44(r12)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_savegpr1_22)
+C_TEXT(_savegpr1_22): stw r22,-40(r12)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_savegpr1_23)
+C_TEXT(_savegpr1_23): stw r23,-36(r12)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_savegpr1_24)
+C_TEXT(_savegpr1_24): stw r24,-32(r12)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_savegpr1_25)
+C_TEXT(_savegpr1_25): stw r25,-28(r12)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_savegpr1_26)
+C_TEXT(_savegpr1_26): stw r26,-24(r12)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_savegpr1_27)
+C_TEXT(_savegpr1_27): stw r27,-20(r12)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_savegpr1_28)
+C_TEXT(_savegpr1_28): stw r28,-16(r12)
+ ASM_GLOBAL_DIRECTIVE C_TEXT(_savegpr1_29)
+C_TEXT(_savegpr1_29): stw r29,-12(r12) #save r29
+ stw r30,-8(r12) #save r30
+ stw r31,-4(r12) #save r31
+ blr #return
+END (_savegpr1_all)
diff --git a/libc/sysdeps/powerpc/powerpc32/hp-timing.h b/libc/sysdeps/powerpc/powerpc32/hp-timing.h
new file mode 100644
index 000000000..b62b0f213
--- /dev/null
+++ b/libc/sysdeps/powerpc/powerpc32/hp-timing.h
@@ -0,0 +1,82 @@
+/* High precision, low overhead timing functions. Linux/PPC32 version.
+ Copyright (C) 2005 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#ifndef _HP_TIMING_H
+#define _HP_TIMING_H 1
+
+
+/* There are no generic definitions for the times. We could write something
+ using the `gettimeofday' system call where available but the overhead of
+ the system call might be too high.
+
+ In case a platform supports timers in the hardware the following macros
+ and types must be defined:
+
+ - HP_TIMING_AVAIL: test for availability.
+
+ - HP_TIMING_INLINE: this macro is non-zero if the functionality is not
+ implemented using function calls but instead uses some inlined code
+ which might simply consist of a few assembler instructions. We have to
+ know this since we might want to use the macros here in places where we
+ cannot make function calls.
+
+ - hp_timing_t: This is the type for variables used to store the time
+ values.
+
+ - HP_TIMING_ZERO: clear `hp_timing_t' object.
+
+ - HP_TIMING_NOW: place timestamp for current time in variable given as
+ parameter.
+
+ - HP_TIMING_DIFF_INIT: do whatever is necessary to be able to use the
+ HP_TIMING_DIFF macro.
+
+ - HP_TIMING_DIFF: compute difference between two times and store it
+ in a third. Source and destination might overlap.
+
+ - HP_TIMING_ACCUM: add time difference to another variable. This might
+ be a bit more complicated to implement for some platforms as the
+ operation should be thread-safe and 64bit arithmetic on 32bit platforms
+ is not.
+
+ - HP_TIMING_ACCUM_NT: this is the variant for situations where we know
+ there are no threads involved.
+
+ - HP_TIMING_PRINT: write decimal representation of the timing value into
+ the given string. This operation need not be inline even though
+ HP_TIMING_INLINE is specified.
+
+*/
+
+/* Provide dummy definitions. */
+#define HP_TIMING_AVAIL (0)
+#define HP_TIMING_INLINE (0)
+typedef unsigned long long int hp_timing_t;
+#define HP_TIMING_ZERO(Var)
+#define HP_TIMING_NOW(var)
+#define HP_TIMING_DIFF_INIT()
+#define HP_TIMING_DIFF(Diff, Start, End)
+#define HP_TIMING_ACCUM(Sum, Diff)
+#define HP_TIMING_ACCUM_NT(Sum, Diff)
+#define HP_TIMING_PRINT(Buf, Len, Val)
+
+/* Since this implementation is not available we tell the user about it. */
+#define HP_TIMING_NONAVAIL 1
+
+#endif /* hp-timing.h */
diff --git a/libc/sysdeps/powerpc/powerpc32/libgcc-compat.S b/libc/sysdeps/powerpc/powerpc32/libgcc-compat.S
new file mode 100644
index 000000000..196293fd9
--- /dev/null
+++ b/libc/sysdeps/powerpc/powerpc32/libgcc-compat.S
@@ -0,0 +1,144 @@
+/* pre-.hidden libgcc compatibility
+ Copyright (C) 2002 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+ .file "libgcc-compat.S"
+
+#include <shlib-compat.h>
+
+#if SHLIB_COMPAT (libc, GLIBC_2_0, GLIBC_2_2_6)
+
+#define __ashldi3_v_glibc20 INTUSE (__ashldi3)
+#define __ashrdi3_v_glibc20 INTUSE (__ashrdi3)
+#define __lshrdi3_v_glibc20 INTUSE (__lshrdi3)
+#define __cmpdi2_v_glibc20 INTUSE (__cmpdi2)
+#define __ucmpdi2_v_glibc20 INTUSE (__ucmpdi2)
+#define __fixdfdi_v_glibc20 INTUSE (__fixdfdi)
+#define __fixsfdi_v_glibc20 INTUSE (__fixsfdi)
+#define __fixunsdfdi_v_glibc20 INTUSE (__fixunsdfdi)
+#define __fixunssfdi_v_glibc20 INTUSE (__fixunssfdi)
+#define __floatdidf_v_glibc20 INTUSE (__floatdidf)
+#define __floatdisf_v_glibc20 INTUSE (__floatdisf)
+
+ .symver __ashldi3_v_glibc20,__ashldi3@GLIBC_2.0
+ .symver __ashrdi3_v_glibc20,__ashrdi3@GLIBC_2.0
+ .symver __lshrdi3_v_glibc20,__lshrdi3@GLIBC_2.0
+ .symver __cmpdi2_v_glibc20,__cmpdi2@GLIBC_2.0
+ .symver __ucmpdi2_v_glibc20,__ucmpdi2@GLIBC_2.0
+ .symver __fixdfdi_v_glibc20,__fixdfdi@GLIBC_2.0
+ .symver __fixunsdfdi_v_glibc20,__fixunsdfdi@GLIBC_2.0
+ .symver __fixsfdi_v_glibc20,__fixsfdi@GLIBC_2.0
+ .symver __fixunssfdi_v_glibc20,__fixunssfdi@GLIBC_2.0
+ .symver __floatdidf_v_glibc20,__floatdidf@GLIBC_2.0
+ .symver __floatdisf_v_glibc20,__floatdisf@GLIBC_2.0
+
+#ifdef HAVE_DOT_HIDDEN
+ .hidden __ashldi3
+ .hidden __ashrdi3
+ .hidden __lshrdi3
+ .hidden __cmpdi2
+ .hidden __ucmpdi2
+ .hidden __fixdfdi
+ .hidden __fixsfdi
+ .hidden __fixunsdfdi
+ .hidden __fixunssfdi
+ .hidden __floatdidf
+ .hidden __floatdisf
+#endif
+
+ .section ".text"
+
+ .align 2
+ .globl __ashldi3_v_glibc20
+ .type __ashldi3_v_glibc20,@function
+__ashldi3_v_glibc20:
+ b __ashldi3@local
+.Lfe5:
+ .size __ashldi3_v_glibc20,.Lfe5-__ashldi3_v_glibc20
+ .align 2
+ .globl __ashrdi3_v_glibc20
+ .type __ashrdi3_v_glibc20,@function
+__ashrdi3_v_glibc20:
+ b __ashrdi3@local
+.Lfe6:
+ .size __ashrdi3_v_glibc20,.Lfe6-__ashrdi3_v_glibc20
+ .align 2
+ .globl __lshrdi3_v_glibc20
+ .type __lshrdi3_v_glibc20,@function
+__lshrdi3_v_glibc20:
+ b __lshrdi3@local
+.Lfe7:
+ .size __lshrdi3_v_glibc20,.Lfe7-__lshrdi3_v_glibc20
+ .align 2
+ .globl __cmpdi2_v_glibc20
+ .type __cmpdi2_v_glibc20,@function
+__cmpdi2_v_glibc20:
+ b __cmpdi2@local
+.Lfe8:
+ .size __cmpdi2_v_glibc20,.Lfe8-__cmpdi2_v_glibc20
+ .align 2
+ .globl __ucmpdi2_v_glibc20
+ .type __ucmpdi2_v_glibc20,@function
+__ucmpdi2_v_glibc20:
+ b __ucmpdi2@local
+.Lfe9:
+ .size __ucmpdi2_v_glibc20,.Lfe9-__ucmpdi2_v_glibc20
+ .align 2
+ .globl __fixdfdi_v_glibc20
+ .type __fixdfdi_v_glibc20,@function
+__fixdfdi_v_glibc20:
+ b __fixdfdi@local
+.Lfe10:
+ .size __fixdfdi_v_glibc20,.Lfe10-__fixdfdi_v_glibc20
+ .align 2
+ .globl __fixunsdfdi_v_glibc20
+ .type __fixunsdfdi_v_glibc20,@function
+__fixunsdfdi_v_glibc20:
+ b __fixunsdfdi@local
+.Lfe11:
+ .size __fixunsdfdi_v_glibc20,.Lfe11-__fixunsdfdi_v_glibc20
+ .align 2
+ .globl __fixsfdi_v_glibc20
+ .type __fixsfdi_v_glibc20,@function
+__fixsfdi_v_glibc20:
+ b __fixsfdi@local
+.Lfe12:
+ .size __fixsfdi_v_glibc20,.Lfe12-__fixsfdi_v_glibc20
+ .align 2
+ .globl __fixunssfdi_v_glibc20
+ .type __fixunssfdi_v_glibc20,@function
+__fixunssfdi_v_glibc20:
+ b __fixunssfdi@local
+.Lfe13:
+ .size __fixunssfdi_v_glibc20,.Lfe13-__fixunssfdi_v_glibc20
+ .align 2
+ .globl __floatdidf_v_glibc20
+ .type __floatdidf_v_glibc20,@function
+__floatdidf_v_glibc20:
+ b __floatdidf@local
+.Lfe14:
+ .size __floatdidf_v_glibc20,.Lfe14-__floatdidf_v_glibc20
+ .align 2
+ .globl __floatdisf_v_glibc20
+ .type __floatdisf_v_glibc20,@function
+__floatdisf_v_glibc20:
+ b __floatdisf@local
+.Lfe15:
+ .size __floatdisf_v_glibc20,.Lfe15-__floatdisf_v_glibc20
+
+#endif
diff --git a/libc/sysdeps/powerpc/powerpc32/lshift.S b/libc/sysdeps/powerpc/powerpc32/lshift.S
new file mode 100644
index 000000000..65054f229
--- /dev/null
+++ b/libc/sysdeps/powerpc/powerpc32/lshift.S
@@ -0,0 +1,133 @@
+/* Shift a limb left, low level routine.
+ Copyright (C) 1996, 1997, 1999, 2000, 2006 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA
+ 02110-1301 USA. */
+
+#include <sysdep.h>
+#include <bp-sym.h>
+#include <bp-asm.h>
+
+/* mp_limb_t mpn_lshift (mp_ptr wp, mp_srcptr up, mp_size_t usize,
+ unsigned int cnt) */
+
+EALIGN (BP_SYM (__mpn_lshift), 3, 0)
+
+#if __BOUNDED_POINTERS__
+ slwi r10,r5,2 /* convert limbs to bytes */
+ CHECK_BOUNDS_BOTH_WIDE (r3, r8, r9, r10)
+ CHECK_BOUNDS_BOTH_WIDE (r4, r8, r9, r10)
+#endif
+ mtctr r5 # copy size into CTR
+ cmplwi cr0,r5,16 # is size < 16
+ slwi r0,r5,2
+ add r7,r3,r0 # make r7 point at end of res
+ add r4,r4,r0 # make r4 point at end of s1
+ lwzu r11,-4(r4) # load first s1 limb
+ subfic r8,r6,32
+ srw r3,r11,r8 # compute function return value
+ bge cr0,L(big) # branch if size >= 16
+
+ bdz L(end1)
+
+L(0): lwzu r10,-4(r4)
+ slw r9,r11,r6
+ srw r12,r10,r8
+ or r9,r9,r12
+ stwu r9,-4(r7)
+ bdz L(end2)
+ lwzu r11,-4(r4)
+ slw r9,r10,r6
+ srw r12,r11,r8
+ or r9,r9,r12
+ stwu r9,-4(r7)
+ bdnz L(0)
+
+L(end1):slw r0,r11,r6
+ stw r0,-4(r7)
+ blr
+
+
+/* Guaranteed not to succeed. */
+L(boom): tweq r0,r0
+
+/* We imitate a case statement, by using (yuk!) fixed-length code chunks,
+ of size 4*12 bytes. We have to do this (or something) to make this PIC. */
+L(big): mflr r9
+ cfi_register(lr,r9)
+ bltl- cr0,L(boom) # Never taken, only used to set LR.
+ slwi r10,r6,4
+ mflr r12
+ add r10,r12,r10
+ slwi r8,r6,5
+ add r10,r8,r10
+ mtctr r10
+ addi r5,r5,-1
+ mtlr r9
+ cfi_same_value (lr)
+ bctr
+
+L(end2):slw r0,r10,r6
+ stw r0,-4(r7)
+ blr
+
+#define DO_LSHIFT(n) \
+ mtctr r5; \
+L(n): lwzu r10,-4(r4); \
+ slwi r9,r11,n; \
+ inslwi r9,r10,n,32-n; \
+ stwu r9,-4(r7); \
+ bdz- L(end2); \
+ lwzu r11,-4(r4); \
+ slwi r9,r10,n; \
+ inslwi r9,r11,n,32-n; \
+ stwu r9,-4(r7); \
+ bdnz L(n); \
+ b L(end1)
+
+ DO_LSHIFT(1)
+ DO_LSHIFT(2)
+ DO_LSHIFT(3)
+ DO_LSHIFT(4)
+ DO_LSHIFT(5)
+ DO_LSHIFT(6)
+ DO_LSHIFT(7)
+ DO_LSHIFT(8)
+ DO_LSHIFT(9)
+ DO_LSHIFT(10)
+ DO_LSHIFT(11)
+ DO_LSHIFT(12)
+ DO_LSHIFT(13)
+ DO_LSHIFT(14)
+ DO_LSHIFT(15)
+ DO_LSHIFT(16)
+ DO_LSHIFT(17)
+ DO_LSHIFT(18)
+ DO_LSHIFT(19)
+ DO_LSHIFT(20)
+ DO_LSHIFT(21)
+ DO_LSHIFT(22)
+ DO_LSHIFT(23)
+ DO_LSHIFT(24)
+ DO_LSHIFT(25)
+ DO_LSHIFT(26)
+ DO_LSHIFT(27)
+ DO_LSHIFT(28)
+ DO_LSHIFT(29)
+ DO_LSHIFT(30)
+ DO_LSHIFT(31)
+
+END (BP_SYM (__mpn_lshift))
diff --git a/libc/sysdeps/powerpc/powerpc32/memset.S b/libc/sysdeps/powerpc/powerpc32/memset.S
new file mode 100644
index 000000000..f09c29467
--- /dev/null
+++ b/libc/sysdeps/powerpc/powerpc32/memset.S
@@ -0,0 +1,342 @@
+/* Optimized memset implementation for PowerPC.
+ Copyright (C) 1997, 1999, 2000, 2003 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <sysdep.h>
+#include <bp-sym.h>
+#include <bp-asm.h>
+
+/* Define a global static that can hold the cache line size. The
+ assumption is that startup code will access the "aux vector" to
+ to obtain the value set by the kernel and store it into this
+ variable. */
+
+ .globl __cache_line_size
+ .lcomm __cache_line_size,4,4
+
+/* __ptr_t [r3] memset (__ptr_t s [r3], int c [r4], size_t n [r5]));
+ Returns 's'.
+
+ The memset is done in four sizes: byte (8 bits), word (32 bits),
+ 32-byte blocks (256 bits) and __cache_line_size (128, 256, 1024 bits).
+ There is a special case for setting whole cache lines to 0, which
+ takes advantage of the dcbz instruction. */
+
+ .section ".text"
+EALIGN (BP_SYM (memset), 5, 1)
+
+#define rTMP r0
+#define rRTN r3 /* initial value of 1st argument */
+#if __BOUNDED_POINTERS__
+# define rMEMP0 r4 /* original value of 1st arg */
+# define rCHR r5 /* char to set in each byte */
+# define rLEN r6 /* length of region to set */
+# define rMEMP r10 /* address at which we are storing */
+#else
+# define rMEMP0 r3 /* original value of 1st arg */
+# define rCHR r4 /* char to set in each byte */
+# define rLEN r5 /* length of region to set */
+# define rMEMP r6 /* address at which we are storing */
+#endif
+#define rALIGN r7 /* number of bytes we are setting now (when aligning) */
+#define rMEMP2 r8
+
+#define rPOS32 r7 /* constant +32 for clearing with dcbz */
+#define rNEG64 r8 /* constant -64 for clearing with dcbz */
+#define rNEG32 r9 /* constant -32 for clearing with dcbz */
+
+#define rGOT r9 /* Address of the Global Offset Table. */
+#define rCLS r8 /* Cache line size obtained from static. */
+#define rCLM r9 /* Cache line size mask to check for cache alignment. */
+
+#if __BOUNDED_POINTERS__
+ cmplwi cr1, rRTN, 0
+ CHECK_BOUNDS_BOTH_WIDE (rMEMP0, rTMP, rTMP2, rLEN)
+ beq cr1, L(b0)
+ STORE_RETURN_VALUE (rMEMP0)
+ STORE_RETURN_BOUNDS (rTMP, rTMP2)
+L(b0):
+#endif
+
+/* take care of case for size <= 4 */
+ cmplwi cr1, rLEN, 4
+ andi. rALIGN, rMEMP0, 3
+ mr rMEMP, rMEMP0
+ ble- cr1, L(small)
+/* align to word boundary */
+ cmplwi cr5, rLEN, 31
+ rlwimi rCHR, rCHR, 8, 16, 23
+ beq+ L(aligned) /* 8th instruction from .align */
+ mtcrf 0x01, rMEMP0
+ subfic rALIGN, rALIGN, 4
+ add rMEMP, rMEMP, rALIGN
+ sub rLEN, rLEN, rALIGN
+ bf+ 31, L(g0)
+ stb rCHR, 0(rMEMP0)
+ bt 30, L(aligned)
+L(g0): sth rCHR, -2(rMEMP) /* 16th instruction from .align */
+/* take care of case for size < 31 */
+L(aligned):
+ mtcrf 0x01, rLEN
+ rlwimi rCHR, rCHR, 16, 0, 15
+ ble cr5, L(medium)
+/* align to cache line boundary... */
+ andi. rALIGN, rMEMP, 0x1C
+ subfic rALIGN, rALIGN, 0x20
+ beq L(caligned)
+ mtcrf 0x01, rALIGN
+ add rMEMP, rMEMP, rALIGN
+ sub rLEN, rLEN, rALIGN
+ cmplwi cr1, rALIGN, 0x10
+ mr rMEMP2, rMEMP
+ bf 28, L(a1)
+ stw rCHR, -4(rMEMP2)
+ stwu rCHR, -8(rMEMP2)
+L(a1): blt cr1, L(a2)
+ stw rCHR, -4(rMEMP2) /* 32nd instruction from .align */
+ stw rCHR, -8(rMEMP2)
+ stw rCHR, -12(rMEMP2)
+ stwu rCHR, -16(rMEMP2)
+L(a2): bf 29, L(caligned)
+ stw rCHR, -4(rMEMP2)
+/* now aligned to a cache line. */
+L(caligned):
+ cmplwi cr1, rCHR, 0
+ clrrwi. rALIGN, rLEN, 5
+ mtcrf 0x01, rLEN /* 40th instruction from .align */
+
+/* Check if we can use the special case for clearing memory using dcbz.
+ This requires that we know the correct cache line size for this
+ processor. Getting the __cache_line_size may require establishing GOT
+ addressability, so branch out of line to set this up. */
+ beq cr1, L(checklinesize)
+
+/* Store blocks of 32-bytes (256-bits) starting on a 32-byte boundary.
+ Can't assume that rCHR is zero or that the cache line size is either
+ 32-bytes or even known. */
+L(nondcbz):
+ srwi rTMP, rALIGN, 5
+ mtctr rTMP
+ beq L(medium) /* we may not actually get to do a full line */
+ clrlwi. rLEN, rLEN, 27
+ add rMEMP, rMEMP, rALIGN
+ li rNEG64, -0x40
+ bdz L(cloopdone) /* 48th instruction from .align */
+
+/* We can't use dcbz here as we don't know the cache line size. We can
+ use "data cache block touch for store", which is safe. */
+L(c3): dcbtst rNEG64, rMEMP
+ stw rCHR, -4(rMEMP)
+ stw rCHR, -8(rMEMP)
+ stw rCHR, -12(rMEMP)
+ stw rCHR, -16(rMEMP)
+ nop /* let 601 fetch last 4 instructions of loop */
+ stw rCHR, -20(rMEMP)
+ stw rCHR, -24(rMEMP) /* 56th instruction from .align */
+ nop /* let 601 fetch first 8 instructions of loop */
+ stw rCHR, -28(rMEMP)
+ stwu rCHR, -32(rMEMP)
+ bdnz L(c3)
+L(cloopdone):
+ stw rCHR, -4(rMEMP)
+ stw rCHR, -8(rMEMP)
+ stw rCHR, -12(rMEMP)
+ stw rCHR, -16(rMEMP) /* 64th instruction from .align */
+ stw rCHR, -20(rMEMP)
+ cmplwi cr1, rLEN, 16
+ stw rCHR, -24(rMEMP)
+ stw rCHR, -28(rMEMP)
+ stwu rCHR, -32(rMEMP)
+ beqlr
+ add rMEMP, rMEMP, rALIGN
+ b L(medium_tail2) /* 72nd instruction from .align */
+
+ .align 5
+ nop
+/* Clear cache lines of memory in 128-byte chunks.
+ This code is optimized for processors with 32-byte cache lines.
+ It is further optimized for the 601 processor, which requires
+ some care in how the code is aligned in the i-cache. */
+L(zloopstart):
+ clrlwi rLEN, rLEN, 27
+ mtcrf 0x02, rALIGN
+ srwi. rTMP, rALIGN, 7
+ mtctr rTMP
+ li rPOS32, 0x20
+ li rNEG64, -0x40
+ cmplwi cr1, rLEN, 16 /* 8 */
+ bf 26, L(z0)
+ dcbz 0, rMEMP
+ addi rMEMP, rMEMP, 0x20
+L(z0): li rNEG32, -0x20
+ bf 25, L(z1)
+ dcbz 0, rMEMP
+ dcbz rPOS32, rMEMP
+ addi rMEMP, rMEMP, 0x40 /* 16 */
+L(z1): cmplwi cr5, rLEN, 0
+ beq L(medium)
+L(zloop):
+ dcbz 0, rMEMP
+ dcbz rPOS32, rMEMP
+ addi rMEMP, rMEMP, 0x80
+ dcbz rNEG64, rMEMP
+ dcbz rNEG32, rMEMP
+ bdnz L(zloop)
+ beqlr cr5
+ b L(medium_tail2)
+
+ .align 5
+L(small):
+/* Memset of 4 bytes or less. */
+ cmplwi cr5, rLEN, 1
+ cmplwi cr1, rLEN, 3
+ bltlr cr5
+ stb rCHR, 0(rMEMP)
+ beqlr cr5
+ nop
+ stb rCHR, 1(rMEMP)
+ bltlr cr1
+ stb rCHR, 2(rMEMP)
+ beqlr cr1
+ nop
+ stb rCHR, 3(rMEMP)
+ blr
+
+/* Memset of 0-31 bytes. */
+ .align 5
+L(medium):
+ cmplwi cr1, rLEN, 16
+L(medium_tail2):
+ add rMEMP, rMEMP, rLEN
+L(medium_tail):
+ bt- 31, L(medium_31t)
+ bt- 30, L(medium_30t)
+L(medium_30f):
+ bt- 29, L(medium_29t)
+L(medium_29f):
+ bge- cr1, L(medium_27t)
+ bflr- 28
+ stw rCHR, -4(rMEMP) /* 8th instruction from .align */
+ stw rCHR, -8(rMEMP)
+ blr
+
+L(medium_31t):
+ stbu rCHR, -1(rMEMP)
+ bf- 30, L(medium_30f)
+L(medium_30t):
+ sthu rCHR, -2(rMEMP)
+ bf- 29, L(medium_29f)
+L(medium_29t):
+ stwu rCHR, -4(rMEMP)
+ blt- cr1, L(medium_27f) /* 16th instruction from .align */
+L(medium_27t):
+ stw rCHR, -4(rMEMP)
+ stw rCHR, -8(rMEMP)
+ stw rCHR, -12(rMEMP)
+ stwu rCHR, -16(rMEMP)
+L(medium_27f):
+ bflr- 28
+L(medium_28t):
+ stw rCHR, -4(rMEMP)
+ stw rCHR, -8(rMEMP)
+ blr
+
+L(checklinesize):
+#ifdef SHARED
+ mflr rTMP
+/* If the remaining length is less the 32 bytes then don't bother getting
+ the cache line size. */
+ beq L(medium)
+/* Establishes GOT addressability so we can load __cache_line_size
+ from static. This value was set from the aux vector during startup. */
+# ifdef HAVE_ASM_PPC_REL16
+ bcl 20,31,1f
+1: mflr rGOT
+ addis rGOT,rGOT,__cache_line_size-1b@ha
+ lwz rCLS,__cache_line_size-1b@l(rGOT)
+# else
+ bl _GLOBAL_OFFSET_TABLE_@local-4
+ mflr rGOT
+ lwz rGOT,__cache_line_size@got(rGOT)
+ lwz rCLS,0(rGOT)
+# endif
+ mtlr rTMP
+#else
+/* Load __cache_line_size from static. This value was set from the
+ aux vector during startup. */
+ lis rCLS,__cache_line_size@ha
+/* If the remaining length is less the 32 bytes then don't bother getting
+ the cache line size. */
+ beq L(medium)
+ lwz rCLS,__cache_line_size@l(rCLS)
+#endif
+
+/* If the cache line size was not set then goto to L(nondcbz), which is
+ safe for any cache line size. */
+ cmplwi cr1,rCLS,0
+ beq cr1,L(nondcbz)
+
+/* If the cache line size is 32 bytes then goto to L(zloopstart),
+ which is coded specificly for 32-byte lines (and 601). */
+ cmplwi cr1,rCLS,32
+ beq cr1,L(zloopstart)
+
+/* Now we know the cache line size and it is not 32-bytes. However
+ we may not yet be aligned to the cache line and may have a partial
+ line to fill. Touch it 1st to fetch the cache line. */
+ dcbtst 0,rMEMP
+
+ addi rCLM,rCLS,-1
+L(getCacheAligned):
+ cmplwi cr1,rLEN,32
+ and. rTMP,rCLM,rMEMP
+ blt cr1,L(handletail32)
+ beq L(cacheAligned)
+/* We are not aligned to start of a cache line yet. Store 32-byte
+ of data and test again. */
+ addi rMEMP,rMEMP,32
+ addi rLEN,rLEN,-32
+ stw rCHR,-32(rMEMP)
+ stw rCHR,-28(rMEMP)
+ stw rCHR,-24(rMEMP)
+ stw rCHR,-20(rMEMP)
+ stw rCHR,-16(rMEMP)
+ stw rCHR,-12(rMEMP)
+ stw rCHR,-8(rMEMP)
+ stw rCHR,-4(rMEMP)
+ b L(getCacheAligned)
+
+/* Now we are aligned to the cache line and can use dcbz. */
+L(cacheAligned):
+ cmplw cr1,rLEN,rCLS
+ blt cr1,L(handletail32)
+ dcbz 0,rMEMP
+ subf rLEN,rCLS,rLEN
+ add rMEMP,rMEMP,rCLS
+ b L(cacheAligned)
+
+/* We are here because; the cache line size was set, it was not
+ 32-bytes, and the remainder (rLEN) is now less than the actual cache
+ line size. Set up the preconditions for L(nondcbz) and go there to
+ store the remaining bytes. */
+L(handletail32):
+ clrrwi. rALIGN, rLEN, 5
+ b L(nondcbz)
+
+END (BP_SYM (memset))
+libc_hidden_builtin_def (memset)
diff --git a/libc/sysdeps/powerpc/powerpc32/mul_1.S b/libc/sysdeps/powerpc/powerpc32/mul_1.S
new file mode 100644
index 000000000..f0e008633
--- /dev/null
+++ b/libc/sysdeps/powerpc/powerpc32/mul_1.S
@@ -0,0 +1,53 @@
+/* Multiply a limb vector by a limb, for PowerPC.
+ Copyright (C) 1993-1995, 1997, 1999, 2000 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <sysdep.h>
+#include <bp-sym.h>
+#include <bp-asm.h>
+
+/* mp_limb_t mpn_mul_1 (mp_ptr res_ptr, mp_srcptr s1_ptr,
+ mp_size_t s1_size, mp_limb_t s2_limb)
+ Calculate s1*s2 and put result in res_ptr; return carry. */
+
+ENTRY (BP_SYM (__mpn_mul_1))
+#if __BOUNDED_POINTERS__
+ slwi r10,r5,2 /* convert limbs to bytes */
+ CHECK_BOUNDS_BOTH_WIDE (r3, r8, r9, r10)
+ CHECK_BOUNDS_BOTH_WIDE (r4, r8, r9, r10)
+#endif
+ mtctr r5
+
+ lwz r0,0(r4)
+ mullw r7,r0,r6
+ mulhwu r10,r0,r6
+ addi r3,r3,-4 # adjust res_ptr
+ addic r5,r5,0 # clear cy with dummy insn
+ bdz L(1)
+
+L(0): lwzu r0,4(r4)
+ stwu r7,4(r3)
+ mullw r8,r0,r6
+ adde r7,r8,r10
+ mulhwu r10,r0,r6
+ bdnz L(0)
+
+L(1): stw r7,4(r3)
+ addze r3,r10
+ blr
+END (BP_SYM (__mpn_mul_1))
diff --git a/libc/sysdeps/powerpc/powerpc32/ppc-mcount.S b/libc/sysdeps/powerpc/powerpc32/ppc-mcount.S
new file mode 100644
index 000000000..7e39acb55
--- /dev/null
+++ b/libc/sysdeps/powerpc/powerpc32/ppc-mcount.S
@@ -0,0 +1,81 @@
+/* PowerPC-specific implementation of profiling support.
+ Copyright (C) 1997, 1999, 2005, 2006 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA
+ 02110-1301 USA. */
+
+/* This would be bad. */
+#ifdef PROF
+#undef PROF
+#endif
+
+#include <sysdep.h>
+
+/* We do profiling as described in the SYSV ELF ABI, except that glibc
+ _mcount manages its own counters. The caller has put the address the
+ caller will return to in the usual place on the stack, 4(r1). _mcount
+ is responsible for ensuring that when it returns no argument-passing
+ registers are disturbed, and that the LR is set back to (what the
+ caller sees as) 4(r1).
+
+ This is intended so that the following code can be inserted at the
+ front of any routine without changing the routine:
+
+ .data
+ mflr r0
+ stw r0,4(r1)
+ bl _mcount
+*/
+
+ENTRY(_mcount)
+ stwu r1,-48(r1)
+ cfi_adjust_cfa_offset (48)
+/* We need to save the parameter-passing registers. */
+ stw r3, 12(r1)
+ stw r4, 16(r1)
+ stw r5, 20(r1)
+ stw r6, 24(r1)
+ mflr r4
+ lwz r3, 52(r1)
+ mfcr r5
+ stw r7, 28(r1)
+ stw r8, 32(r1)
+ stw r9, 36(r1)
+ stw r10,40(r1)
+ stw r4, 44(r1)
+ cfi_offset (lr, -4)
+ stw r5, 8(r1)
+ bl __mcount_internal@local
+ nop
+ /* Restore the registers... */
+ lwz r6, 8(r1)
+ lwz r0, 44(r1)
+ lwz r3, 12(r1)
+ mtctr r0
+ lwz r4, 16(r1)
+ mtcrf 0xff,r6
+ lwz r5, 20(r1)
+ lwz r6, 24(r1)
+ lwz r0, 52(r1)
+ lwz r7, 28(r1)
+ lwz r8, 32(r1)
+ mtlr r0
+ lwz r9, 36(r1)
+ lwz r10,40(r1)
+ /* ...unwind the stack frame, and return to your usual programming. */
+ addi r1,r1,48
+ bctr
+END(_mcount)
diff --git a/libc/sysdeps/powerpc/powerpc32/register-dump.h b/libc/sysdeps/powerpc/powerpc32/register-dump.h
new file mode 100644
index 000000000..d341eea8f
--- /dev/null
+++ b/libc/sysdeps/powerpc/powerpc32/register-dump.h
@@ -0,0 +1,121 @@
+/* Dump registers.
+ Copyright (C) 1998 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <sys/uio.h>
+#include <stdio-common/_itoa.h>
+
+/* This prints out the information in the following form: */
+static const char dumpform[] = "\
+Register dump:\n\
+fp0-3: 0000030%0000031% 0000032%0000033% 0000034%0000035% 0000036%0000037%\n\
+fp4-7: 0000038%0000039% 000003a%000003b% 000003c%000003d% 000003e%000003f%\n\
+fp8-11: 0000040%0000041% 0000042%0000043% 0000044%0000045% 0000046%0000047%\n\
+fp12-15: 0000048%0000049% 000004a%000004b% 000004c%000004d% 000004e%000004f%\n\
+fp16-19: 0000050%0000051% 0000052%0000053% 0000054%0000055% 0000056%0000057%\n\
+fp20-23: 0000058%0000059% 000005a%000005b% 000005c%000005d% 000005e%000005f%\n\
+fp24-27: 0000060%0000061% 0000062%0000063% 0000064%0000065% 0000066%0000067%\n\
+fp28-31: 0000068%0000069% 000006a%000006b% 000006c%000006d% 000006e%000006f%\n\
+r0 =0000000% sp =0000001% r2 =0000002% r3 =0000003% trap=0000028%\n\
+r4 =0000004% r5 =0000005% r6 =0000006% r7 =0000007% sr0=0000020% sr1=0000021%\n\
+r8 =0000008% r9 =0000009% r10=000000a% r11=000000b% dar=0000029% dsi=000002a%\n\
+r12=000000c% r13=000000d% r14=000000e% r15=000000f% r3*=0000022%\n\
+r16=0000010% r17=0000011% r18=0000012% r19=0000013%\n\
+r20=0000014% r21=0000015% r22=0000016% r23=0000017% lr=0000024% xer=0000025%\n\
+r24=0000018% r25=0000019% r26=000001a% r27=000001b% mq=0000027% ctr=0000023%\n\
+r28=000001c% r29=000001d% r30=000001e% r31=000001f% fscr=0000071% ccr=0000026%\n\
+";
+
+/* Most of the fields are self-explanatory. 'sr0' is the next
+ instruction to execute, from SRR0, which may have some relationship
+ with the instruction that caused the exception. 'r3*' is the value
+ that will be returned in register 3 when the current system call
+ returns. 'sr1' is SRR1, bits 16-31 of which are copied from the MSR:
+
+ 16 - External interrupt enable
+ 17 - Privilege level (1=user, 0=supervisor)
+ 18 - FP available
+ 19 - Machine check enable (if clear, processor locks up on machine check)
+ 20 - FP exception mode bit 0 (FP exceptions recoverable)
+ 21 - Single-step trace enable
+ 22 - Branch trace enable
+ 23 - FP exception mode bit 1
+ 25 - exception prefix (if set, exceptions are taken from 0xFFFnnnnn,
+ otherwise from 0x000nnnnn).
+ 26 - Instruction address translation enabled.
+ 27 - Data address translation enabled.
+ 30 - Exception is recoverable (otherwise, don't try to return).
+ 31 - Little-endian mode enable.
+
+ 'Trap' is the address of the exception:
+
+ 00200 - Machine check exception (memory parity error, for instance)
+ 00300 - Data access exception (memory not mapped, see dsisr for why)
+ 00400 - Instruction access exception (memory not mapped)
+ 00500 - External interrupt
+ 00600 - Alignment exception (see dsisr for more information)
+ 00700 - Program exception (illegal/trap instruction, FP exception)
+ 00800 - FP unavailable (should not be seen by user code)
+ 00900 - Decrementer exception (for instance, SIGALRM)
+ 00A00 - I/O controller interface exception
+ 00C00 - System call exception (for instance, kill(3)).
+ 00E00 - FP assist exception (optional FP instructions, etc.)
+
+ 'dar' is the memory location, for traps 00300, 00400, 00600, 00A00.
+ 'dsisr' has the following bits under trap 00300:
+ 0 - direct-store error exception
+ 1 - no page table entry for page
+ 4 - memory access not permitted
+ 5 - trying to access I/O controller space or using lwarx/stwcx on
+ non-write-cached memory
+ 6 - access was store
+ 9 - data access breakpoint hit
+ 10 - segment table search failed to find translation (64-bit ppcs only)
+ 11 - I/O controller instruction not permitted
+ For trap 00400, the same bits are set in SRR1 instead.
+ For trap 00600, bits 12-31 of the DSISR set to allow emulation of
+ the instruction without actually having to read it from memory.
+*/
+
+#define xtoi(x) (x >= 'a' ? x + 10 - 'a' : x - '0')
+
+static void
+register_dump (int fd, struct sigcontext *ctx)
+{
+ char buffer[sizeof(dumpform)];
+ char *bufferpos;
+ unsigned regno;
+ unsigned *regs = (unsigned *)(ctx->regs);
+
+ memcpy(buffer, dumpform, sizeof(dumpform));
+
+ /* Generate the output. */
+ while ((bufferpos = memchr (buffer, '%', sizeof(dumpform))))
+ {
+ regno = xtoi (bufferpos[-1]) | xtoi (bufferpos[-2]) << 4;
+ memset (bufferpos-2, '0', 3);
+ _itoa_word (regs[regno], bufferpos+1, 16, 0);
+ }
+
+ /* Write the output. */
+ write (fd, buffer, sizeof(buffer));
+}
+
+
+#define REGISTER_DUMP \
+ register_dump (fd, ctx)
diff --git a/libc/sysdeps/powerpc/powerpc32/rshift.S b/libc/sysdeps/powerpc/powerpc32/rshift.S
new file mode 100644
index 000000000..498b6c4a8
--- /dev/null
+++ b/libc/sysdeps/powerpc/powerpc32/rshift.S
@@ -0,0 +1,63 @@
+/* Shift a limb right, low level routine.
+ Copyright (C) 1995, 1997, 1999, 2000 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <sysdep.h>
+#include <bp-sym.h>
+#include <bp-asm.h>
+
+/* INPUT PARAMETERS
+ res_ptr r3
+ s1_ptr r4
+ size r5
+ cnt r6 */
+
+ENTRY (BP_SYM (__mpn_rshift))
+#if __BOUNDED_POINTERS__
+ slwi r10,r5,2 /* convert limbs to bytes */
+ CHECK_BOUNDS_BOTH_WIDE (r3, r8, r9, r10)
+ CHECK_BOUNDS_BOTH_WIDE (r4, r8, r9, r10)
+#endif
+ mtctr r5 # copy size into CTR
+ addi r7,r3,-4 # move adjusted res_ptr to free return reg
+ subfic r8,r6,32
+ lwz r11,0(r4) # load first s1 limb
+ slw r3,r11,r8 # compute function return value
+ bdz L(1)
+
+L(0): lwzu r10,4(r4)
+ srw r9,r11,r6
+ slw r12,r10,r8
+ or r9,r9,r12
+ stwu r9,4(r7)
+ bdz L(2)
+ lwzu r11,4(r4)
+ srw r9,r10,r6
+ slw r12,r11,r8
+ or r9,r9,r12
+ stwu r9,4(r7)
+ bdnz L(0)
+
+L(1): srw r0,r11,r6
+ stw r0,4(r7)
+ blr
+
+L(2): srw r0,r10,r6
+ stw r0,4(r7)
+ blr
+END (BP_SYM (__mpn_rshift))
diff --git a/libc/sysdeps/powerpc/powerpc32/setjmp-common.S b/libc/sysdeps/powerpc/powerpc32/setjmp-common.S
new file mode 100644
index 000000000..12ee14d78
--- /dev/null
+++ b/libc/sysdeps/powerpc/powerpc32/setjmp-common.S
@@ -0,0 +1,74 @@
+/* setjmp for PowerPC.
+ Copyright (C) 1995-1997,1999-2001,2003,2004,2005, 2006
+ Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <sysdep.h>
+#define _ASM
+#ifdef __NO_VMX__
+# include <novmxsetjmp.h>
+#else
+# include <jmpbuf-offsets.h>
+#endif
+#include <bp-sym.h>
+#include <bp-asm.h>
+
+
+ENTRY (BP_SYM (__sigsetjmp))
+ CHECK_BOUNDS_BOTH_WIDE_LIT (r3, r8, r9, JB_SIZE)
+
+#ifdef PTR_MANGLE
+ mr r5,r1
+ PTR_MANGLE(r5, r10)
+ stw r5,(JB_GPR1*4)(3)
+#else
+ stw r1,(JB_GPR1*4)(3)
+#endif
+ mflr r0
+ stw r14,((JB_GPRS+0)*4)(3)
+#ifdef PTR_MANGLE
+ PTR_MANGLE2 (r0, r10)
+ li r10,0
+#endif
+ stw r0,(JB_LR*4)(3)
+ stw r15,((JB_GPRS+1)*4)(3)
+ mfcr r0
+ stw r16,((JB_GPRS+2)*4)(3)
+ stw r0,(JB_CR*4)(3)
+ stw r17,((JB_GPRS+3)*4)(3)
+ stw r18,((JB_GPRS+4)*4)(3)
+ stw r19,((JB_GPRS+5)*4)(3)
+ stw r20,((JB_GPRS+6)*4)(3)
+ stw r21,((JB_GPRS+7)*4)(3)
+ stw r22,((JB_GPRS+8)*4)(3)
+ stw r23,((JB_GPRS+9)*4)(3)
+ stw r24,((JB_GPRS+10)*4)(3)
+ stw r25,((JB_GPRS+11)*4)(3)
+ stw r26,((JB_GPRS+12)*4)(3)
+ stw r27,((JB_GPRS+13)*4)(3)
+ stw r28,((JB_GPRS+14)*4)(3)
+ stw r29,((JB_GPRS+15)*4)(3)
+ stw r30,((JB_GPRS+16)*4)(3)
+ stw r31,((JB_GPRS+17)*4)(3)
+#if defined NOT_IN_libc && defined IS_IN_rtld
+ li r3,0
+ blr
+#else
+ b BP_SYM (__sigjmp_save@local)
+#endif
+END (BP_SYM (__sigsetjmp))
diff --git a/libc/sysdeps/powerpc/powerpc32/setjmp.S b/libc/sysdeps/powerpc/powerpc32/setjmp.S
new file mode 100644
index 000000000..ef3514a46
--- /dev/null
+++ b/libc/sysdeps/powerpc/powerpc32/setjmp.S
@@ -0,0 +1,44 @@
+/* non alitivec (old) version of setjmp for PowerPC.
+ Copyright (C) 1995-1997,1999-2001,2004 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <shlib-compat.h>
+#include <libc-symbols.h>
+
+#if defined NOT_IN_libc
+/* Build a non-versioned object for rtld-*. */
+# include "setjmp-common.S"
+
+#else /* !NOT_IN_libc */
+/* Build a versioned object for libc. */
+default_symbol_version (__vmx__sigsetjmp,__sigsetjmp,GLIBC_2.3.4)
+# define __sigsetjmp __vmx__sigsetjmp
+# define __sigjmp_save __vmx__sigjmp_save
+# include "setjmp-common.S"
+
+# if defined SHARED && SHLIB_COMPAT (libc, GLIBC_2_0, GLIBC_2_3_4)
+# define __NO_VMX__
+# undef __sigsetjmp
+# undef __sigjmp_save
+# undef JB_SIZE
+symbol_version (__novmx__sigsetjmp,__sigsetjmp,GLIBC_2.0)
+# define __sigsetjmp __novmx__sigsetjmp
+# define __sigjmp_save __novmx__sigjmp_save
+# include "setjmp-common.S"
+# endif
+#endif /* !NOT_IN_libc */
diff --git a/libc/sysdeps/powerpc/powerpc32/stpcpy.S b/libc/sysdeps/powerpc/powerpc32/stpcpy.S
new file mode 100644
index 000000000..819fcdb7e
--- /dev/null
+++ b/libc/sysdeps/powerpc/powerpc32/stpcpy.S
@@ -0,0 +1,122 @@
+/* Optimized stpcpy implementation for PowerPC.
+ Copyright (C) 1997, 1999, 2000, 2002, 2004 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <sysdep.h>
+#include <bp-sym.h>
+#include <bp-asm.h>
+
+/* See strlen.s for comments on how the end-of-string testing works. */
+
+/* char * [r3] stpcpy (char *dest [r3], const char *src [r4]) */
+
+EALIGN (BP_SYM (__stpcpy), 4, 0)
+
+#define rTMP r0
+#define rRTN r3
+#if __BOUNDED_POINTERS__
+# define rDEST r4 /* pointer to previous word in dest */
+# define rSRC r5 /* pointer to previous word in src */
+# define rLOW r11
+# define rHIGH r12
+#else
+# define rDEST r3 /* pointer to previous word in dest */
+# define rSRC r4 /* pointer to previous word in src */
+#endif
+#define rWORD r6 /* current word from src */
+#define rFEFE r7 /* 0xfefefeff */
+#define r7F7F r8 /* 0x7f7f7f7f */
+#define rNEG r9 /* ~(word in src | 0x7f7f7f7f) */
+#define rALT r10 /* alternate word from src */
+
+ CHECK_BOUNDS_LOW (rSRC, rLOW, rHIGH)
+ CHECK_BOUNDS_LOW (rDEST, rLOW, rHIGH)
+ STORE_RETURN_BOUNDS (rLOW, rHIGH)
+
+ or rTMP, rSRC, rDEST
+ clrlwi. rTMP, rTMP, 30
+ addi rDEST, rDEST, -4
+ bne L(unaligned)
+
+ lis rFEFE, -0x101
+ lis r7F7F, 0x7f7f
+ lwz rWORD, 0(rSRC)
+ addi rFEFE, rFEFE, -0x101
+ addi r7F7F, r7F7F, 0x7f7f
+ b L(g2)
+
+L(g0): lwzu rALT, 4(rSRC)
+ stwu rWORD, 4(rDEST)
+ add rTMP, rFEFE, rALT
+ nor rNEG, r7F7F, rALT
+ and. rTMP, rTMP, rNEG
+ bne- L(g1)
+ lwzu rWORD, 4(rSRC)
+ stwu rALT, 4(rDEST)
+L(g2): add rTMP, rFEFE, rWORD
+ nor rNEG, r7F7F, rWORD
+ and. rTMP, rTMP, rNEG
+ beq+ L(g0)
+
+ mr rALT, rWORD
+/* We've hit the end of the string. Do the rest byte-by-byte. */
+L(g1): rlwinm. rTMP, rALT, 8, 24, 31
+ stbu rTMP, 4(rDEST)
+ beqlr-
+ rlwinm. rTMP, rALT, 16, 24, 31
+ stbu rTMP, 1(rDEST)
+ beqlr-
+ rlwinm. rTMP, rALT, 24, 24, 31
+ stbu rTMP, 1(rDEST)
+ beqlr-
+ stbu rALT, 1(rDEST)
+ CHECK_BOUNDS_HIGH (rDEST, rHIGH, twlgt)
+ STORE_RETURN_VALUE (rDEST)
+ blr
+
+/* Oh well. In this case, we just do a byte-by-byte copy. */
+ .align 4
+ nop
+L(unaligned):
+ lbz rWORD, 0(rSRC)
+ addi rDEST, rDEST, 3
+ cmpwi rWORD, 0
+ beq- L(u2)
+
+L(u0): lbzu rALT, 1(rSRC)
+ stbu rWORD, 1(rDEST)
+ cmpwi rALT, 0
+ beq- L(u1)
+ nop /* Let 601 load start of loop. */
+ lbzu rWORD, 1(rSRC)
+ stbu rALT, 1(rDEST)
+ cmpwi rWORD, 0
+ bne+ L(u0)
+L(u2): stbu rWORD, 1(rDEST)
+ CHECK_BOUNDS_HIGH (rDEST, rHIGH, twlgt)
+ STORE_RETURN_VALUE (rDEST)
+ blr
+L(u1): stbu rALT, 1(rDEST)
+ CHECK_BOUNDS_HIGH (rDEST, rHIGH, twlgt)
+ STORE_RETURN_VALUE (rDEST)
+ blr
+END (BP_SYM (__stpcpy))
+
+weak_alias (BP_SYM (__stpcpy), BP_SYM (stpcpy))
+libc_hidden_def (__stpcpy)
+libc_hidden_builtin_def (stpcpy)
diff --git a/libc/sysdeps/powerpc/powerpc32/strchr.S b/libc/sysdeps/powerpc/powerpc32/strchr.S
new file mode 100644
index 000000000..0c6f4e9f5
--- /dev/null
+++ b/libc/sysdeps/powerpc/powerpc32/strchr.S
@@ -0,0 +1,131 @@
+/* Optimized strchr implementation for PowerPC.
+ Copyright (C) 1997, 1999, 2000, 2003 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <sysdep.h>
+#include <bp-sym.h>
+#include <bp-asm.h>
+
+/* See strlen.s for comments on how this works. */
+
+/* char * [r3] strchr (const char *s [r3] , int c [r4] ) */
+
+ENTRY (BP_SYM (strchr))
+
+#define rTMP1 r0
+#define rRTN r3 /* outgoing result */
+#if __BOUNDED_POINTERS__
+# define rSTR r4
+# define rCHR r5 /* byte we're looking for, spread over the whole word */
+# define rWORD r8 /* the current word */
+#else
+# define rSTR r8 /* current word pointer */
+# define rCHR r4 /* byte we're looking for, spread over the whole word */
+# define rWORD r5 /* the current word */
+#endif
+#define rCLZB rCHR /* leading zero byte count */
+#define rFEFE r6 /* constant 0xfefefeff (-0x01010101) */
+#define r7F7F r7 /* constant 0x7f7f7f7f */
+#define rTMP2 r9
+#define rIGN r10 /* number of bits we should ignore in the first word */
+#define rMASK r11 /* mask with the bits to ignore set to 0 */
+#define rTMP3 r12
+
+ CHECK_BOUNDS_LOW (rSTR, rTMP1, rTMP2)
+ STORE_RETURN_BOUNDS (rTMP1, rTMP2)
+
+ rlwimi rCHR, rCHR, 8, 16, 23
+ li rMASK, -1
+ rlwimi rCHR, rCHR, 16, 0, 15
+ rlwinm rIGN, rRTN, 3, 27, 28
+ lis rFEFE, -0x101
+ lis r7F7F, 0x7f7f
+ clrrwi rSTR, rRTN, 2
+ addi rFEFE, rFEFE, -0x101
+ addi r7F7F, r7F7F, 0x7f7f
+/* Test the first (partial?) word. */
+ lwz rWORD, 0(rSTR)
+ srw rMASK, rMASK, rIGN
+ orc rWORD, rWORD, rMASK
+ add rTMP1, rFEFE, rWORD
+ nor rTMP2, r7F7F, rWORD
+ and. rTMP1, rTMP1, rTMP2
+ xor rTMP3, rCHR, rWORD
+ orc rTMP3, rTMP3, rMASK
+ b L(loopentry)
+
+/* The loop. */
+
+L(loop):lwzu rWORD, 4(rSTR)
+ and. rTMP1, rTMP1, rTMP2
+/* Test for 0. */
+ add rTMP1, rFEFE, rWORD
+ nor rTMP2, r7F7F, rWORD
+ bne L(foundit)
+ and. rTMP1, rTMP1, rTMP2
+/* Start test for the bytes we're looking for. */
+ xor rTMP3, rCHR, rWORD
+L(loopentry):
+ add rTMP1, rFEFE, rTMP3
+ nor rTMP2, r7F7F, rTMP3
+ beq L(loop)
+/* There is a zero byte in the word, but may also be a matching byte (either
+ before or after the zero byte). In fact, we may be looking for a
+ zero byte, in which case we return a match. We guess that this hasn't
+ happened, though. */
+L(missed):
+ and. rTMP1, rTMP1, rTMP2
+ li rRTN, 0
+ STORE_RETURN_VALUE (rSTR)
+ beqlr
+/* It did happen. Decide which one was first...
+ I'm not sure if this is actually faster than a sequence of
+ rotates, compares, and branches (we use it anyway because it's shorter). */
+ and rFEFE, r7F7F, rWORD
+ or rMASK, r7F7F, rWORD
+ and rTMP1, r7F7F, rTMP3
+ or rIGN, r7F7F, rTMP3
+ add rFEFE, rFEFE, r7F7F
+ add rTMP1, rTMP1, r7F7F
+ nor rWORD, rMASK, rFEFE
+ nor rTMP2, rIGN, rTMP1
+ cmplw rWORD, rTMP2
+ bgtlr
+ cntlzw rCLZB, rTMP2
+ srwi rCLZB, rCLZB, 3
+ add rRTN, rSTR, rCLZB
+ CHECK_BOUNDS_HIGH_RTN (rSTR, rTMP2, twlge)
+ STORE_RETURN_VALUE (rSTR)
+ blr
+
+L(foundit):
+ and rTMP1, r7F7F, rTMP3
+ or rIGN, r7F7F, rTMP3
+ add rTMP1, rTMP1, r7F7F
+ nor rTMP2, rIGN, rTMP1
+ cntlzw rCLZB, rTMP2
+ subi rSTR, rSTR, 4
+ srwi rCLZB, rCLZB, 3
+ add rRTN, rSTR, rCLZB
+ CHECK_BOUNDS_HIGH_RTN (rSTR, rTMP2, twlge)
+ STORE_RETURN_VALUE (rSTR)
+ blr
+END (BP_SYM (strchr))
+
+weak_alias (BP_SYM (strchr), BP_SYM (index))
+libc_hidden_builtin_def (strchr)
diff --git a/libc/sysdeps/powerpc/powerpc32/strcmp.S b/libc/sysdeps/powerpc/powerpc32/strcmp.S
new file mode 100644
index 000000000..fa75eca3a
--- /dev/null
+++ b/libc/sysdeps/powerpc/powerpc32/strcmp.S
@@ -0,0 +1,127 @@
+/* Optimized strcmp implementation for PowerPC.
+ Copyright (C) 1997, 1999, 2000, 2003 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <sysdep.h>
+#include <bp-sym.h>
+#include <bp-asm.h>
+
+/* See strlen.s for comments on how the end-of-string testing works. */
+
+/* int [r3] strcmp (const char *s1 [r3], const char *s2 [r4]) */
+
+EALIGN (BP_SYM (strcmp), 4, 0)
+
+#define rTMP r0
+#define rRTN r3
+#define rSTR1 r3 /* first string arg */
+#define rSTR2 r4 /* second string arg */
+#if __BOUNDED_POINTERS__
+# define rHIGH1 r11
+# define rHIGH2 r12
+#endif
+#define rWORD1 r5 /* current word in s1 */
+#define rWORD2 r6 /* current word in s2 */
+#define rFEFE r7 /* constant 0xfefefeff (-0x01010101) */
+#define r7F7F r8 /* constant 0x7f7f7f7f */
+#define rNEG r9 /* ~(word in s1 | 0x7f7f7f7f) */
+#define rBITDIF r10 /* bits that differ in s1 & s2 words */
+
+ CHECK_BOUNDS_LOW (rSTR1, rTMP, rHIGH1)
+ CHECK_BOUNDS_LOW (rSTR2, rTMP, rHIGH2)
+
+ or rTMP, rSTR2, rSTR1
+ clrlwi. rTMP, rTMP, 30
+ lis rFEFE, -0x101
+ bne L(unaligned)
+
+ lwz rWORD1, 0(rSTR1)
+ lwz rWORD2, 0(rSTR2)
+ lis r7F7F, 0x7f7f
+ addi rFEFE, rFEFE, -0x101
+ addi r7F7F, r7F7F, 0x7f7f
+ b L(g1)
+
+L(g0): lwzu rWORD1, 4(rSTR1)
+ bne cr1, L(different)
+ lwzu rWORD2, 4(rSTR2)
+L(g1): add rTMP, rFEFE, rWORD1
+ nor rNEG, r7F7F, rWORD1
+ and. rTMP, rTMP, rNEG
+ cmpw cr1, rWORD1, rWORD2
+ beq+ L(g0)
+L(endstring):
+/* OK. We've hit the end of the string. We need to be careful that
+ we don't compare two strings as different because of gunk beyond
+ the end of the strings... */
+ and rTMP, r7F7F, rWORD1
+ beq cr1, L(equal)
+ add rTMP, rTMP, r7F7F
+ xor. rBITDIF, rWORD1, rWORD2
+ andc rNEG, rNEG, rTMP
+ blt- L(highbit)
+ cntlzw rBITDIF, rBITDIF
+ cntlzw rNEG, rNEG
+ addi rNEG, rNEG, 7
+ cmpw cr1, rNEG, rBITDIF
+ sub rRTN, rWORD1, rWORD2
+ bgelr+ cr1
+L(equal):
+ li rRTN, 0
+ /* GKM FIXME: check high bounds. */
+ blr
+
+L(different):
+ lwz rWORD1, -4(rSTR1)
+ xor. rBITDIF, rWORD1, rWORD2
+ sub rRTN, rWORD1, rWORD2
+ bgelr+
+L(highbit):
+ ori rRTN, rWORD2, 1
+ /* GKM FIXME: check high bounds. */
+ blr
+
+
+/* Oh well. In this case, we just do a byte-by-byte comparison. */
+ .align 4
+L(unaligned):
+ lbz rWORD1, 0(rSTR1)
+ lbz rWORD2, 0(rSTR2)
+ b L(u1)
+
+L(u0): lbzu rWORD1, 1(rSTR1)
+ bne- L(u4)
+ lbzu rWORD2, 1(rSTR2)
+L(u1): cmpwi cr1, rWORD1, 0
+ beq- cr1, L(u3)
+ cmpw rWORD1, rWORD2
+ bne- L(u3)
+ lbzu rWORD1, 1(rSTR1)
+ lbzu rWORD2, 1(rSTR2)
+ cmpwi cr1, rWORD1, 0
+ cmpw rWORD1, rWORD2
+ bne+ cr1, L(u0)
+L(u3): sub rRTN, rWORD1, rWORD2
+ /* GKM FIXME: check high bounds. */
+ blr
+L(u4): lbz rWORD1, -1(rSTR1)
+ sub rRTN, rWORD1, rWORD2
+ /* GKM FIXME: check high bounds. */
+ blr
+END (BP_SYM (strcmp))
+libc_hidden_builtin_def (strcmp)
diff --git a/libc/sysdeps/powerpc/powerpc32/strcpy.S b/libc/sysdeps/powerpc/powerpc32/strcpy.S
new file mode 100644
index 000000000..7fd89d2e4
--- /dev/null
+++ b/libc/sysdeps/powerpc/powerpc32/strcpy.S
@@ -0,0 +1,121 @@
+/* Optimized strcpy implementation for PowerPC.
+ Copyright (C) 1997, 1999, 2000, 2003 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <sysdep.h>
+#include <bp-sym.h>
+#include <bp-asm.h>
+
+/* See strlen.s for comments on how the end-of-string testing works. */
+
+/* char * [r3] strcpy (char *dest [r3], const char *src [r4]) */
+
+EALIGN (BP_SYM (strcpy), 4, 0)
+
+#define rTMP r0
+#define rRTN r3 /* incoming DEST arg preserved as result */
+#if __BOUNDED_POINTERS__
+# define rDEST r4 /* pointer to previous word in dest */
+# define rSRC r5 /* pointer to previous word in src */
+# define rLOW r11
+# define rHIGH r12
+#else
+# define rSRC r4 /* pointer to previous word in src */
+# define rDEST r5 /* pointer to previous word in dest */
+#endif
+#define rWORD r6 /* current word from src */
+#define rFEFE r7 /* constant 0xfefefeff (-0x01010101) */
+#define r7F7F r8 /* constant 0x7f7f7f7f */
+#define rNEG r9 /* ~(word in s1 | 0x7f7f7f7f) */
+#define rALT r10 /* alternate word from src */
+
+ CHECK_BOUNDS_LOW (rSRC, rLOW, rHIGH)
+ CHECK_BOUNDS_LOW (rDEST, rLOW, rHIGH)
+ STORE_RETURN_BOUNDS (rLOW, rHIGH)
+
+ or rTMP, rSRC, rRTN
+ clrlwi. rTMP, rTMP, 30
+#if __BOUNDED_POINTERS__
+ addi rDEST, rDEST, -4
+#else
+ addi rDEST, rRTN, -4
+#endif
+ bne L(unaligned)
+
+ lis rFEFE, -0x101
+ lis r7F7F, 0x7f7f
+ lwz rWORD, 0(rSRC)
+ addi rFEFE, rFEFE, -0x101
+ addi r7F7F, r7F7F, 0x7f7f
+ b L(g2)
+
+L(g0): lwzu rALT, 4(rSRC)
+ stwu rWORD, 4(rDEST)
+ add rTMP, rFEFE, rALT
+ nor rNEG, r7F7F, rALT
+ and. rTMP, rTMP, rNEG
+ bne- L(g1)
+ lwzu rWORD, 4(rSRC)
+ stwu rALT, 4(rDEST)
+L(g2): add rTMP, rFEFE, rWORD
+ nor rNEG, r7F7F, rWORD
+ and. rTMP, rTMP, rNEG
+ beq+ L(g0)
+
+ mr rALT, rWORD
+/* We've hit the end of the string. Do the rest byte-by-byte. */
+L(g1): rlwinm. rTMP, rALT, 8, 24, 31
+ stb rTMP, 4(rDEST)
+ beqlr-
+ rlwinm. rTMP, rALT, 16, 24, 31
+ stb rTMP, 5(rDEST)
+ beqlr-
+ rlwinm. rTMP, rALT, 24, 24, 31
+ stb rTMP, 6(rDEST)
+ beqlr-
+ stb rALT, 7(rDEST)
+ /* GKM FIXME: check high bound. */
+ blr
+
+/* Oh well. In this case, we just do a byte-by-byte copy. */
+ .align 4
+ nop
+L(unaligned):
+ lbz rWORD, 0(rSRC)
+ addi rDEST, rRTN, -1
+ cmpwi rWORD, 0
+ beq- L(u2)
+
+L(u0): lbzu rALT, 1(rSRC)
+ stbu rWORD, 1(rDEST)
+ cmpwi rALT, 0
+ beq- L(u1)
+ nop /* Let 601 load start of loop. */
+ lbzu rWORD, 1(rSRC)
+ stbu rALT, 1(rDEST)
+ cmpwi rWORD, 0
+ bne+ L(u0)
+L(u2): stb rWORD, 1(rDEST)
+ /* GKM FIXME: check high bound. */
+ blr
+L(u1): stb rALT, 1(rDEST)
+ /* GKM FIXME: check high bound. */
+ blr
+
+END (BP_SYM (strcpy))
+libc_hidden_builtin_def (strcpy)
diff --git a/libc/sysdeps/powerpc/powerpc32/strlen.S b/libc/sysdeps/powerpc/powerpc32/strlen.S
new file mode 100644
index 000000000..ec35d2309
--- /dev/null
+++ b/libc/sysdeps/powerpc/powerpc32/strlen.S
@@ -0,0 +1,160 @@
+/* Optimized strlen implementation for PowerPC.
+ Copyright (C) 1997, 1999, 2000, 2003 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <sysdep.h>
+#include <bp-sym.h>
+#include <bp-asm.h>
+
+/* The algorithm here uses the following techniques:
+
+ 1) Given a word 'x', we can test to see if it contains any 0 bytes
+ by subtracting 0x01010101, and seeing if any of the high bits of each
+ byte changed from 0 to 1. This works because the least significant
+ 0 byte must have had no incoming carry (otherwise it's not the least
+ significant), so it is 0x00 - 0x01 == 0xff. For all other
+ byte values, either they have the high bit set initially, or when
+ 1 is subtracted you get a value in the range 0x00-0x7f, none of which
+ have their high bit set. The expression here is
+ (x + 0xfefefeff) & ~(x | 0x7f7f7f7f), which gives 0x00000000 when
+ there were no 0x00 bytes in the word.
+
+ 2) Given a word 'x', we can test to see _which_ byte was zero by
+ calculating ~(((x & 0x7f7f7f7f) + 0x7f7f7f7f) | x | 0x7f7f7f7f).
+ This produces 0x80 in each byte that was zero, and 0x00 in all
+ the other bytes. The '| 0x7f7f7f7f' clears the low 7 bits in each
+ byte, and the '| x' part ensures that bytes with the high bit set
+ produce 0x00. The addition will carry into the high bit of each byte
+ iff that byte had one of its low 7 bits set. We can then just see
+ which was the most significant bit set and divide by 8 to find how
+ many to add to the index.
+ This is from the book 'The PowerPC Compiler Writer's Guide',
+ by Steve Hoxey, Faraydon Karim, Bill Hay and Hank Warren.
+
+ We deal with strings not aligned to a word boundary by taking the
+ first word and ensuring that bytes not part of the string
+ are treated as nonzero. To allow for memory latency, we unroll the
+ loop a few times, being careful to ensure that we do not read ahead
+ across cache line boundaries.
+
+ Questions to answer:
+ 1) How long are strings passed to strlen? If they're often really long,
+ we should probably use cache management instructions and/or unroll the
+ loop more. If they're often quite short, it might be better to use
+ fact (2) in the inner loop than have to recalculate it.
+ 2) How popular are bytes with the high bit set? If they are very rare,
+ on some processors it might be useful to use the simpler expression
+ ~((x - 0x01010101) | 0x7f7f7f7f) (that is, on processors with only one
+ ALU), but this fails when any character has its high bit set. */
+
+/* Some notes on register usage: Under the SVR4 ABI, we can use registers
+ 0 and 3 through 12 (so long as we don't call any procedures) without
+ saving them. We can also use registers 14 through 31 if we save them.
+ We can't use r1 (it's the stack pointer), r2 nor r13 because the user
+ program may expect them to hold their usual value if we get sent
+ a signal. Integer parameters are passed in r3 through r10.
+ We can use condition registers cr0, cr1, cr5, cr6, and cr7 without saving
+ them, the others we must save. */
+
+/* int [r3] strlen (char *s [r3]) */
+
+ENTRY (BP_SYM (strlen))
+
+#define rTMP1 r0
+#define rRTN r3 /* incoming STR arg, outgoing result */
+#define rSTR r4 /* current string position */
+#define rPADN r5 /* number of padding bits we prepend to the
+ string to make it start at a word boundary */
+#define rFEFE r6 /* constant 0xfefefeff (-0x01010101) */
+#define r7F7F r7 /* constant 0x7f7f7f7f */
+#define rWORD1 r8 /* current string word */
+#define rWORD2 r9 /* next string word */
+#define rMASK r9 /* mask for first string word */
+#define rTMP2 r10
+#define rTMP3 r11
+#define rTMP4 r12
+
+ CHECK_BOUNDS_LOW (rRTN, rTMP1, rTMP2)
+
+ clrrwi rSTR, rRTN, 2
+ lis r7F7F, 0x7f7f
+ rlwinm rPADN, rRTN, 3, 27, 28
+ lwz rWORD1, 0(rSTR)
+ li rMASK, -1
+ addi r7F7F, r7F7F, 0x7f7f
+/* That's the setup done, now do the first pair of words.
+ We make an exception and use method (2) on the first two words, to reduce
+ overhead. */
+ srw rMASK, rMASK, rPADN
+ and rTMP1, r7F7F, rWORD1
+ or rTMP2, r7F7F, rWORD1
+ add rTMP1, rTMP1, r7F7F
+ nor rTMP1, rTMP2, rTMP1
+ and. rWORD1, rTMP1, rMASK
+ mtcrf 0x01, rRTN
+ bne L(done0)
+ lis rFEFE, -0x101
+ addi rFEFE, rFEFE, -0x101
+/* Are we now aligned to a doubleword boundary? */
+ bt 29, L(loop)
+
+/* Handle second word of pair. */
+ lwzu rWORD1, 4(rSTR)
+ and rTMP1, r7F7F, rWORD1
+ or rTMP2, r7F7F, rWORD1
+ add rTMP1, rTMP1, r7F7F
+ nor. rWORD1, rTMP2, rTMP1
+ bne L(done0)
+
+/* The loop. */
+
+L(loop):
+ lwz rWORD1, 4(rSTR)
+ lwzu rWORD2, 8(rSTR)
+ add rTMP1, rFEFE, rWORD1
+ nor rTMP2, r7F7F, rWORD1
+ and. rTMP1, rTMP1, rTMP2
+ add rTMP3, rFEFE, rWORD2
+ nor rTMP4, r7F7F, rWORD2
+ bne L(done1)
+ and. rTMP1, rTMP3, rTMP4
+ beq L(loop)
+
+ and rTMP1, r7F7F, rWORD2
+ add rTMP1, rTMP1, r7F7F
+ andc rWORD1, rTMP4, rTMP1
+ b L(done0)
+
+L(done1):
+ and rTMP1, r7F7F, rWORD1
+ subi rSTR, rSTR, 4
+ add rTMP1, rTMP1, r7F7F
+ andc rWORD1, rTMP2, rTMP1
+
+/* When we get to here, rSTR points to the first word in the string that
+ contains a zero byte, and the most significant set bit in rWORD1 is in that
+ byte. */
+L(done0):
+ cntlzw rTMP3, rWORD1
+ subf rTMP1, rRTN, rSTR
+ srwi rTMP3, rTMP3, 3
+ add rRTN, rTMP1, rTMP3
+ /* GKM FIXME: check high bound. */
+ blr
+END (BP_SYM (strlen))
+libc_hidden_builtin_def (strlen)
diff --git a/libc/sysdeps/powerpc/powerpc32/strncmp.S b/libc/sysdeps/powerpc/powerpc32/strncmp.S
new file mode 100644
index 000000000..3e0fff5ac
--- /dev/null
+++ b/libc/sysdeps/powerpc/powerpc32/strncmp.S
@@ -0,0 +1,161 @@
+/* Optimized strcmp implementation for PowerPC32.
+ Copyright (C) 2003 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <sysdep.h>
+#include <bp-sym.h>
+#include <bp-asm.h>
+
+/* See strlen.s for comments on how the end-of-string testing works. */
+
+/* int [r3] strncmp (const char *s1 [r3], const char *s2 [r4], size_t size [r5]) */
+
+EALIGN (BP_SYM(strncmp), 4, 0)
+
+#define rTMP r0
+#define rRTN r3
+#define rSTR1 r3 /* first string arg */
+#define rSTR2 r4 /* second string arg */
+#define rN r5 /* max string length */
+/* Note: The Bounded pointer support in this code is broken. This code
+ was inherited from PPC32 and and that support was never completed.
+ Current PPC gcc does not support -fbounds-check or -fbounded-pointers. */
+#define rWORD1 r6 /* current word in s1 */
+#define rWORD2 r7 /* current word in s2 */
+#define rFEFE r8 /* constant 0xfefefeff (-0x01010101) */
+#define r7F7F r9 /* constant 0x7f7f7f7f */
+#define rNEG r10 /* ~(word in s1 | 0x7f7f7f7f) */
+#define rBITDIF r11 /* bits that differ in s1 & s2 words */
+
+ dcbt 0,rSTR1
+ or rTMP, rSTR2, rSTR1
+ lis r7F7F, 0x7f7f
+ dcbt 0,rSTR2
+ clrlwi. rTMP, rTMP, 30
+ cmplwi cr1, rN, 0
+ lis rFEFE, -0x101
+ bne L(unaligned)
+/* We are word alligned so set up for two loops. first a word
+ loop, then fall into the byte loop if any residual. */
+ srwi. rTMP, rN, 2
+ clrlwi rN, rN, 30
+ addi rFEFE, rFEFE, -0x101
+ addi r7F7F, r7F7F, 0x7f7f
+ cmplwi cr1, rN, 0
+ beq L(unaligned)
+
+ mtctr rTMP /* Power4 wants mtctr 1st in dispatch group. */
+ lwz rWORD1, 0(rSTR1)
+ lwz rWORD2, 0(rSTR2)
+ b L(g1)
+
+L(g0):
+ lwzu rWORD1, 4(rSTR1)
+ bne- cr1, L(different)
+ lwzu rWORD2, 4(rSTR2)
+L(g1): add rTMP, rFEFE, rWORD1
+ nor rNEG, r7F7F, rWORD1
+ bdz L(tail)
+ and. rTMP, rTMP, rNEG
+ cmpw cr1, rWORD1, rWORD2
+ beq+ L(g0)
+
+/* OK. We've hit the end of the string. We need to be careful that
+ we don't compare two strings as different because of gunk beyond
+ the end of the strings... */
+
+L(endstring):
+ and rTMP, r7F7F, rWORD1
+ beq cr1, L(equal)
+ add rTMP, rTMP, r7F7F
+ xor. rBITDIF, rWORD1, rWORD2
+
+ andc rNEG, rNEG, rTMP
+ blt- L(highbit)
+ cntlzw rBITDIF, rBITDIF
+ cntlzw rNEG, rNEG
+ addi rNEG, rNEG, 7
+ cmpw cr1, rNEG, rBITDIF
+ sub rRTN, rWORD1, rWORD2
+ blt- cr1, L(equal)
+ srawi rRTN, rRTN, 31
+ ori rRTN, rRTN, 1
+ blr
+L(equal):
+ li rRTN, 0
+ blr
+
+L(different):
+ lwzu rWORD1, -4(rSTR1)
+ xor. rBITDIF, rWORD1, rWORD2
+ sub rRTN, rWORD1, rWORD2
+ blt- L(highbit)
+ srawi rRTN, rRTN, 31
+ ori rRTN, rRTN, 1
+ blr
+L(highbit):
+ srwi rWORD2, rWORD2, 24
+ srwi rWORD1, rWORD1, 24
+ sub rRTN, rWORD1, rWORD2
+ blr
+
+
+/* Oh well. In this case, we just do a byte-by-byte comparison. */
+ .align 4
+L(tail):
+ and. rTMP, rTMP, rNEG
+ cmpw cr1, rWORD1, rWORD2
+ bne- L(endstring)
+ addi rSTR1, rSTR1, 4
+ bne- cr1, L(different)
+ addi rSTR2, rSTR2, 4
+ cmplwi cr1, rN, 0
+L(unaligned):
+ mtctr rN /* Power4 wants mtctr 1st in dispatch group */
+ bgt cr1, L(uz)
+L(ux):
+ li rRTN, 0
+ blr
+ .align 4
+L(uz):
+ lbz rWORD1, 0(rSTR1)
+ lbz rWORD2, 0(rSTR2)
+ nop
+ b L(u1)
+L(u0):
+ lbzu rWORD2, 1(rSTR2)
+L(u1):
+ bdz L(u3)
+ cmpwi cr1, rWORD1, 0
+ cmpw rWORD1, rWORD2
+ beq- cr1, L(u3)
+ lbzu rWORD1, 1(rSTR1)
+ bne- L(u2)
+ lbzu rWORD2, 1(rSTR2)
+ bdz L(u3)
+ cmpwi cr1, rWORD1, 0
+ cmpw rWORD1, rWORD2
+ bne- L(u3)
+ lbzu rWORD1, 1(rSTR1)
+ bne+ cr1, L(u0)
+
+L(u2): lbzu rWORD1, -1(rSTR1)
+L(u3): sub rRTN, rWORD1, rWORD2
+ blr
+END (BP_SYM (strncmp))
+libc_hidden_builtin_def (strncmp)
diff --git a/libc/sysdeps/powerpc/powerpc32/sub_n.S b/libc/sysdeps/powerpc/powerpc32/sub_n.S
new file mode 100644
index 000000000..3ebd22e30
--- /dev/null
+++ b/libc/sysdeps/powerpc/powerpc32/sub_n.S
@@ -0,0 +1,78 @@
+/* Subtract two limb vectors of equal, non-zero length for PowerPC.
+ Copyright (C) 1997, 1999, 2000 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <sysdep.h>
+#include <bp-sym.h>
+#include <bp-asm.h>
+
+/* mp_limb_t mpn_sub_n (mp_ptr res_ptr, mp_srcptr s1_ptr, mp_srcptr s2_ptr,
+ mp_size_t size)
+ Calculate s1-s2 and put result in res_ptr; return borrow, 0 or 1. */
+
+/* Note on optimisation: This code is optimal for the 601. Almost every other
+ possible 2-unrolled inner loop will not be. Also, watch out for the
+ alignment... */
+
+EALIGN (BP_SYM (__mpn_sub_n), 3, 1)
+
+#if __BOUNDED_POINTERS__
+ slwi r10,r6,2 /* convert limbs to bytes */
+ CHECK_BOUNDS_BOTH_WIDE (r3, r8, r9, r10)
+ CHECK_BOUNDS_BOTH_WIDE (r4, r8, r9, r10)
+ CHECK_BOUNDS_BOTH_WIDE (r5, r8, r9, r10)
+#endif
+
+/* Set up for loop below. */
+ mtcrf 0x01,r6
+ srwi. r7,r6,1
+ mtctr r7
+ bt 31,L(2)
+
+/* Set the carry (clear the borrow). */
+ subfc r0,r0,r0
+/* Adjust pointers for loop. */
+ addi r3,r3,-4
+ addi r4,r4,-4
+ addi r5,r5,-4
+ b L(0)
+
+L(2): lwz r7,0(r5)
+ lwz r6,0(r4)
+ subfc r6,r7,r6
+ stw r6,0(r3)
+ beq L(1)
+
+/* Align start of loop to an odd word boundary to guarantee that the
+ last two words can be fetched in one access (for 601). This turns
+ out to be important. */
+L(0):
+ lwz r9,4(r4)
+ lwz r8,4(r5)
+ lwzu r6,8(r4)
+ lwzu r7,8(r5)
+ subfe r8,r8,r9
+ stw r8,4(r3)
+ subfe r6,r7,r6
+ stwu r6,8(r3)
+ bdnz L(0)
+/* Return the borrow. */
+L(1): subfe r3,r3,r3
+ neg r3,r3
+ blr
+END (BP_SYM (__mpn_sub_n))
diff --git a/libc/sysdeps/powerpc/powerpc32/submul_1.S b/libc/sysdeps/powerpc/powerpc32/submul_1.S
new file mode 100644
index 000000000..6e45d1983
--- /dev/null
+++ b/libc/sysdeps/powerpc/powerpc32/submul_1.S
@@ -0,0 +1,59 @@
+/* Multiply a limb vector by a single limb, for PowerPC.
+ Copyright (C) 1993-1995, 1997, 1999, 2000 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <sysdep.h>
+#include <bp-sym.h>
+#include <bp-asm.h>
+
+/* mp_limb_t mpn_submul_1 (mp_ptr res_ptr, mp_srcptr s1_ptr,
+ mp_size_t s1_size, mp_limb_t s2_limb)
+ Calculate res-s1*s2 and put result back in res; return carry. */
+
+ENTRY (BP_SYM (__mpn_submul_1))
+#if __BOUNDED_POINTERS__
+ slwi r10,r5,2 /* convert limbs to bytes */
+ CHECK_BOUNDS_BOTH_WIDE (r3, r8, r9, r10)
+ CHECK_BOUNDS_BOTH_WIDE (r4, r8, r9, r10)
+#endif
+ mtctr r5
+
+ lwz r0,0(r4)
+ mullw r7,r0,r6
+ mulhwu r10,r0,r6
+ lwz r9,0(r3)
+ subf r8,r7,r9
+ addc r7,r7,r8 # invert cy (r7 is junk)
+ addi r3,r3,-4 # adjust res_ptr
+ bdz L(1)
+
+L(0): lwzu r0,4(r4)
+ stwu r8,4(r3)
+ mullw r8,r0,r6
+ adde r7,r8,r10
+ mulhwu r10,r0,r6
+ lwz r9,4(r3)
+ addze r10,r10
+ subf r8,r7,r9
+ addc r7,r7,r8 # invert cy (r7 is junk)
+ bdnz L(0)
+
+L(1): stw r8,4(r3)
+ addze r3,r10
+ blr
+END (BP_SYM (__mpn_submul_1))
diff --git a/libc/sysdeps/powerpc/powerpc32/sysdep.h b/libc/sysdeps/powerpc/powerpc32/sysdep.h
new file mode 100644
index 000000000..8fc624ebd
--- /dev/null
+++ b/libc/sysdeps/powerpc/powerpc32/sysdep.h
@@ -0,0 +1,153 @@
+/* Assembly macros for 32-bit PowerPC.
+ Copyright (C) 1999, 2001, 2002, 2003, 2006 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA
+ 02110-1301 USA. */
+
+#include <sysdeps/powerpc/sysdep.h>
+
+#ifdef __ASSEMBLER__
+
+#ifdef __ELF__
+
+/* If compiled for profiling, call `_mcount' at the start of each
+ function. */
+#ifdef PROF
+/* The mcount code relies on a the return address being on the stack
+ to locate our caller and so it can restore it; so store one just
+ for its benefit. */
+# define CALL_MCOUNT \
+ mflr r0; \
+ stw r0,4(r1); \
+ cfi_offset (lr, 4); \
+ bl JUMPTARGET(_mcount);
+#else /* PROF */
+# define CALL_MCOUNT /* Do nothing. */
+#endif /* PROF */
+
+#define ENTRY(name) \
+ ASM_GLOBAL_DIRECTIVE C_SYMBOL_NAME(name); \
+ ASM_TYPE_DIRECTIVE (C_SYMBOL_NAME(name),@function) \
+ .align ALIGNARG(2); \
+ C_LABEL(name) \
+ cfi_startproc; \
+ CALL_MCOUNT
+
+#define EALIGN_W_0 /* No words to insert. */
+#define EALIGN_W_1 nop
+#define EALIGN_W_2 nop;nop
+#define EALIGN_W_3 nop;nop;nop
+#define EALIGN_W_4 EALIGN_W_3;nop
+#define EALIGN_W_5 EALIGN_W_4;nop
+#define EALIGN_W_6 EALIGN_W_5;nop
+#define EALIGN_W_7 EALIGN_W_6;nop
+
+/* EALIGN is like ENTRY, but does alignment to 'words'*4 bytes
+ past a 2^align boundary. */
+#ifdef PROF
+# define EALIGN(name, alignt, words) \
+ ASM_GLOBAL_DIRECTIVE C_SYMBOL_NAME(name); \
+ ASM_TYPE_DIRECTIVE (C_SYMBOL_NAME(name),@function) \
+ .align ALIGNARG(2); \
+ C_LABEL(name) \
+ cfi_startproc; \
+ CALL_MCOUNT \
+ b 0f; \
+ .align ALIGNARG(alignt); \
+ EALIGN_W_##words; \
+ 0:
+#else /* PROF */
+# define EALIGN(name, alignt, words) \
+ ASM_GLOBAL_DIRECTIVE C_SYMBOL_NAME(name); \
+ ASM_TYPE_DIRECTIVE (C_SYMBOL_NAME(name),@function) \
+ .align ALIGNARG(alignt); \
+ EALIGN_W_##words; \
+ C_LABEL(name) \
+ cfi_startproc;
+#endif
+
+#undef END
+#define END(name) \
+ cfi_endproc; \
+ ASM_SIZE_DIRECTIVE(name)
+
+#define DO_CALL(syscall) \
+ li 0,syscall; \
+ sc
+
+#undef JUMPTARGET
+#ifdef PIC
+# define JUMPTARGET(name) name##@plt
+#else
+# define JUMPTARGET(name) name
+#endif
+
+#if defined SHARED && defined DO_VERSIONING && defined PIC \
+ && !defined HAVE_BROKEN_ALIAS_ATTRIBUTE && !defined NO_HIDDEN
+# undef HIDDEN_JUMPTARGET
+# define HIDDEN_JUMPTARGET(name) __GI_##name##@local
+#endif
+
+#define PSEUDO(name, syscall_name, args) \
+ .section ".text"; \
+ ENTRY (name) \
+ DO_CALL (SYS_ify (syscall_name));
+
+#define PSEUDO_RET \
+ bnslr+; \
+ b __syscall_error@local
+#define ret PSEUDO_RET
+
+#undef PSEUDO_END
+#define PSEUDO_END(name) \
+ END (name)
+
+#define PSEUDO_NOERRNO(name, syscall_name, args) \
+ .section ".text"; \
+ ENTRY (name) \
+ DO_CALL (SYS_ify (syscall_name));
+
+#define PSEUDO_RET_NOERRNO \
+ blr
+#define ret_NOERRNO PSEUDO_RET_NOERRNO
+
+#undef PSEUDO_END_NOERRNO
+#define PSEUDO_END_NOERRNO(name) \
+ END (name)
+
+#define PSEUDO_ERRVAL(name, syscall_name, args) \
+ .section ".text"; \
+ ENTRY (name) \
+ DO_CALL (SYS_ify (syscall_name));
+
+#define PSEUDO_RET_ERRVAL \
+ blr
+#define ret_ERRVAL PSEUDO_RET_ERRVAL
+
+#undef PSEUDO_END_ERRVAL
+#define PSEUDO_END_ERRVAL(name) \
+ END (name)
+
+/* Local labels stripped out by the linker. */
+#undef L
+#define L(x) .L##x
+
+/* Label in text section. */
+#define C_TEXT(name) name
+
+#endif /* __ELF__ */
+
+#endif /* __ASSEMBLER__ */