summaryrefslogtreecommitdiff
path: root/libc/sysdeps/powerpc
diff options
context:
space:
mode:
authorjoseph <joseph@7b3dc134-2b1b-0410-93df-9e9f96275f8d>2013-09-05 22:41:05 +0000
committerjoseph <joseph@7b3dc134-2b1b-0410-93df-9e9f96275f8d>2013-09-05 22:41:05 +0000
commit2c1d357a5fe5bdc4c9521a9d1af05ba1b5a4c238 (patch)
tree12e23fa555f1432064e8fdeeb063dce798764f50 /libc/sysdeps/powerpc
parent0d983ceeeee6bd20ae296d224ca2fd4bf27a47af (diff)
downloadeglibc2-2c1d357a5fe5bdc4c9521a9d1af05ba1b5a4c238.tar.gz
Merge changes between r23795 and r23927 from /fsf/trunk.
git-svn-id: svn://svn.eglibc.org/trunk@23928 7b3dc134-2b1b-0410-93df-9e9f96275f8d
Diffstat (limited to 'libc/sysdeps/powerpc')
-rw-r--r--libc/sysdeps/powerpc/powerpc32/backtrace.c66
-rw-r--r--libc/sysdeps/powerpc/powerpc32/cell/memcpy.S2
-rw-r--r--libc/sysdeps/powerpc/powerpc32/dl-machine.c7
-rw-r--r--libc/sysdeps/powerpc/powerpc32/fpu/setjmp.S2
-rw-r--r--libc/sysdeps/powerpc/powerpc32/power4/memcmp.S2
-rw-r--r--libc/sysdeps/powerpc/powerpc32/power6/memcpy.S18
-rw-r--r--libc/sysdeps/powerpc/powerpc32/power6/memset.S4
-rw-r--r--libc/sysdeps/powerpc/powerpc32/power7/memcmp.S2
-rw-r--r--libc/sysdeps/powerpc/powerpc32/setjmp.S2
-rw-r--r--libc/sysdeps/powerpc/powerpc32/sysdep.h3
-rw-r--r--libc/sysdeps/powerpc/powerpc64/backtrace.c36
-rw-r--r--libc/sysdeps/powerpc/powerpc64/cell/memcpy.S2
-rw-r--r--libc/sysdeps/powerpc/powerpc64/power6/memcpy.S10
-rw-r--r--libc/sysdeps/powerpc/powerpc64/power6/memset.S4
14 files changed, 125 insertions, 35 deletions
diff --git a/libc/sysdeps/powerpc/powerpc32/backtrace.c b/libc/sysdeps/powerpc/powerpc32/backtrace.c
index b4b11dd03..8d413e620 100644
--- a/libc/sysdeps/powerpc/powerpc32/backtrace.c
+++ b/libc/sysdeps/powerpc/powerpc32/backtrace.c
@@ -18,6 +18,9 @@
#include <execinfo.h>
#include <stddef.h>
+#include <string.h>
+#include <signal.h>
+#include <bits/libc-vdso.h>
/* This is the stack layout we see with every stack frame.
Note that every routine is required by the ABI to lay out the stack
@@ -35,6 +38,46 @@ struct layout
void *return_address;
};
+#define SIGNAL_FRAMESIZE 64
+
+/* Since the signal handler is just like any other function it needs to
+ save/restore its LR and it will save it into callers stack frame.
+ Since a signal handler doesn't have a caller, the kernel creates a
+ dummy frame to make it look like it has a caller. */
+struct signal_frame_32 {
+ char dummy[SIGNAL_FRAMESIZE];
+ struct sigcontext sctx;
+ mcontext_t mctx;
+ /* We don't care about the rest, since IP value is at 'mctx' field. */
+};
+
+static inline int
+is_sigtramp_address (unsigned int nip)
+{
+#ifdef SHARED
+ if (nip == (unsigned int)__vdso_sigtramp32)
+ return 1;
+#endif
+ return 0;
+}
+
+struct rt_signal_frame_32 {
+ char dummy[SIGNAL_FRAMESIZE + 16];
+ siginfo_t info;
+ struct ucontext uc;
+ /* We don't care about the rest, since IP value is at 'uc' field. */
+};
+
+static inline int
+is_sigtramp_address_rt (unsigned int nip)
+{
+#ifdef SHARED
+ if (nip == (unsigned int)__vdso_sigtramp_rt32)
+ return 1;
+#endif
+ return 0;
+}
+
int
__backtrace (void **array, int size)
{
@@ -50,7 +93,28 @@ __backtrace (void **array, int size)
for ( count = 0;
current != NULL && count < size;
current = current->next, count++)
- array[count] = current->return_address;
+ {
+ gregset_t *gregset = NULL;
+
+ array[count] = current->return_address;
+
+ /* Check if the symbol is the signal trampoline and get the interrupted
+ * symbol address from the trampoline saved area. */
+ if (is_sigtramp_address ((unsigned int)current->return_address))
+ {
+ struct signal_frame_32 *sigframe =
+ (struct signal_frame_32*) current;
+ gregset = &sigframe->mctx.gregs;
+ }
+ else if (is_sigtramp_address_rt ((unsigned int)current->return_address))
+ {
+ struct rt_signal_frame_32 *sigframe =
+ (struct rt_signal_frame_32*) current;
+ gregset = &sigframe->uc.uc_mcontext.uc_regs->gregs;
+ }
+ if (gregset)
+ array[++count] = (void*)((*gregset)[PT_NIP]);
+ }
/* It's possible the second-last stack frame can't return
(that is, it's __libc_start_main), in which case
diff --git a/libc/sysdeps/powerpc/powerpc32/cell/memcpy.S b/libc/sysdeps/powerpc/powerpc32/cell/memcpy.S
index f3605d790..24a0f1248 100644
--- a/libc/sysdeps/powerpc/powerpc32/cell/memcpy.S
+++ b/libc/sysdeps/powerpc/powerpc32/cell/memcpy.S
@@ -49,7 +49,7 @@ EALIGN (memcpy, 5, 0)
.Lbigcopy:
neg r8,r3 /* LS 3 bits = # bytes to 8-byte dest bdry */
- clrlwi r8,r8,32-4 /* aling to 16byte boundary */
+ clrlwi r8,r8,32-4 /* align to 16byte boundary */
sub r7,r4,r3
cmplwi cr0,r8,0
beq+ .Ldst_aligned
diff --git a/libc/sysdeps/powerpc/powerpc32/dl-machine.c b/libc/sysdeps/powerpc/powerpc32/dl-machine.c
index 188f72cdb..3e7202d86 100644
--- a/libc/sysdeps/powerpc/powerpc32/dl-machine.c
+++ b/libc/sysdeps/powerpc/powerpc32/dl-machine.c
@@ -29,13 +29,6 @@
by _dl_sysdep_start via DL_PLATFORM_INIT. */
extern int __cache_line_size attribute_hidden;
-/* Because ld.so is now versioned, these functions can be in their own file;
- no relocations need to be done to call them.
- Of course, if ld.so is not versioned... */
-#if defined SHARED && !(DO_VERSIONING - 0)
-#error This will not work with versioning turned off, sorry.
-#endif
-
/* Stuff for the PLT. */
#define PLT_INITIAL_ENTRY_WORDS 18
diff --git a/libc/sysdeps/powerpc/powerpc32/fpu/setjmp.S b/libc/sysdeps/powerpc/powerpc32/fpu/setjmp.S
index dc93db385..60cd35052 100644
--- a/libc/sysdeps/powerpc/powerpc32/fpu/setjmp.S
+++ b/libc/sysdeps/powerpc/powerpc32/fpu/setjmp.S
@@ -1,4 +1,4 @@
-/* non alitivec (old) version of setjmp for PowerPC.
+/* non altivec (old) version of setjmp for PowerPC.
Copyright (C) 1995-2013 Free Software Foundation, Inc.
This file is part of the GNU C Library.
diff --git a/libc/sysdeps/powerpc/powerpc32/power4/memcmp.S b/libc/sysdeps/powerpc/powerpc32/power4/memcmp.S
index d7050a2f7..9a455a3c6 100644
--- a/libc/sysdeps/powerpc/powerpc32/power4/memcmp.S
+++ b/libc/sysdeps/powerpc/powerpc32/power4/memcmp.S
@@ -512,7 +512,7 @@ L(zeroLength):
of rBITDIF to 0. If rBITDIF == 0 then rStr1 is word aligned and can
perform the Wunaligned loop.
- Otherwise we know that rSTR1 is not aready word aligned yet.
+ Otherwise we know that rSTR1 is not already word aligned yet.
So we can force the string addresses to the next lower word
boundary and special case this first word using shift left to
eliminate bits preceding the first byte. Since we want to join the
diff --git a/libc/sysdeps/powerpc/powerpc32/power6/memcpy.S b/libc/sysdeps/powerpc/powerpc32/power6/memcpy.S
index c3d55b768..a76f71e04 100644
--- a/libc/sysdeps/powerpc/powerpc32/power6/memcpy.S
+++ b/libc/sysdeps/powerpc/powerpc32/power6/memcpy.S
@@ -269,7 +269,7 @@ L(wus_tail16): /* Move 16 bytes. */
addi 1,1,32
blr
.align 4
-L(wus_tail16p8): /* less then 8 bytes left. */
+L(wus_tail16p8): /* less than 8 bytes left. */
beq cr1,L(wus_tailX) /* exactly 16 bytes, early exit. */
cmplwi cr1,10,20
bf 29,L(wus_tail16p2)
@@ -283,7 +283,7 @@ L(wus_tail16p8): /* less then 8 bytes left. */
addi 1,1,32
blr
.align 4
-L(wus_tail16p4): /* less then 4 bytes left. */
+L(wus_tail16p4): /* less than 4 bytes left. */
addi 12,12,24
addi 11,11,24
bgt cr0,L(wus_tail2)
@@ -291,7 +291,7 @@ L(wus_tail16p4): /* less then 4 bytes left. */
addi 1,1,32
blr
.align 4
-L(wus_tail16p2): /* 16 bytes moved, less then 4 bytes left. */
+L(wus_tail16p2): /* 16 bytes moved, less than 4 bytes left. */
addi 12,12,16
addi 11,11,16
b L(wus_tail2)
@@ -315,7 +315,7 @@ L(wus_tail8): /* Move 8 bytes. */
addi 1,1,32
blr
.align 4
-L(wus_tail8p4): /* less then 4 bytes left. */
+L(wus_tail8p4): /* less than 4 bytes left. */
addi 12,12,8
addi 11,11,8
bgt cr1,L(wus_tail2)
@@ -326,7 +326,7 @@ L(wus_tail8p4): /* less then 4 bytes left. */
.align 4
L(wus_tail4): /* Move 4 bytes. */
/* r6 already loaded speculatively. If we are here we know there is
- more then 4 bytes left. So there is no need to test. */
+ more than 4 bytes left. So there is no need to test. */
addi 12,12,4
stw 6,0(11)
addi 11,11,4
@@ -426,14 +426,14 @@ L(wdu):
First we need to copy word up to but not crossing the next 32-byte
boundary. Then perform aligned loads just before and just after
the boundary and use shifts and or to generate the next aligned
- word for dst. If more then 32 bytes remain we copy (unaligned src)
- the next 7 words and repeat the loop until less then 32-bytes
+ word for dst. If more than 32 bytes remain we copy (unaligned src)
+ the next 7 words and repeat the loop until less than 32-bytes
remain.
- Then if more then 4 bytes remain we again use aligned loads,
+ Then if more than 4 bytes remain we again use aligned loads,
shifts and or to generate the next dst word. We then process the
remaining words using unaligned loads as needed. Finally we check
- if there more then 0 bytes (1-3) bytes remaining and use
+ if there are more than 0 bytes (1-3) bytes remaining and use
halfword and or byte load/stores to complete the copy.
*/
mr 4,12 /* restore unaligned adjusted src ptr */
diff --git a/libc/sysdeps/powerpc/powerpc32/power6/memset.S b/libc/sysdeps/powerpc/powerpc32/power6/memset.S
index ce0663001..8c23c8d13 100644
--- a/libc/sysdeps/powerpc/powerpc32/power6/memset.S
+++ b/libc/sysdeps/powerpc/powerpc32/power6/memset.S
@@ -101,7 +101,7 @@ L(nondcbz):
boundary may not be at cache line (128-byte) boundary. */
L(nzloopstart):
/* memset in 32-byte chunks until we get to a cache line boundary.
- If rLEN is less then the distance to the next cache-line boundary use
+ If rLEN is less than the distance to the next cache-line boundary use
cacheAligned1 code to finish the tail. */
cmplwi cr1,rLEN,128
@@ -306,7 +306,7 @@ L(nzCacheAligned256):
block zero instruction. */
L(zloopstart):
/* memset in 32-byte chunks until we get to a cache line boundary.
- If rLEN is less then the distance to the next cache-line boundary use
+ If rLEN is less than the distance to the next cache-line boundary use
cacheAligned1 code to finish the tail. */
cmplwi cr1,rLEN,128
beq L(medium)
diff --git a/libc/sysdeps/powerpc/powerpc32/power7/memcmp.S b/libc/sysdeps/powerpc/powerpc32/power7/memcmp.S
index f764b7ce3..075e19f14 100644
--- a/libc/sysdeps/powerpc/powerpc32/power7/memcmp.S
+++ b/libc/sysdeps/powerpc/powerpc32/power7/memcmp.S
@@ -515,7 +515,7 @@ L(zeroLength):
of rBITDIF to 0. If rBITDIF == 0 then rStr1 is word aligned and can
perform the Wunaligned loop.
- Otherwise we know that rSTR1 is not aready word aligned yet.
+ Otherwise we know that rSTR1 is not already word aligned yet.
So we can force the string addresses to the next lower word
boundary and special case this first word using shift left to
eliminate bits preceding the first byte. Since we want to join the
diff --git a/libc/sysdeps/powerpc/powerpc32/setjmp.S b/libc/sysdeps/powerpc/powerpc32/setjmp.S
index 851a5b908..8a8cf0d6e 100644
--- a/libc/sysdeps/powerpc/powerpc32/setjmp.S
+++ b/libc/sysdeps/powerpc/powerpc32/setjmp.S
@@ -1,4 +1,4 @@
-/* non alitivec (old) version of setjmp for PowerPC.
+/* non altivec (old) version of setjmp for PowerPC.
Copyright (C) 1995-2013 Free Software Foundation, Inc.
This file is part of the GNU C Library.
diff --git a/libc/sysdeps/powerpc/powerpc32/sysdep.h b/libc/sysdeps/powerpc/powerpc32/sysdep.h
index 47d02a804..78f54f91c 100644
--- a/libc/sysdeps/powerpc/powerpc32/sysdep.h
+++ b/libc/sysdeps/powerpc/powerpc32/sysdep.h
@@ -99,8 +99,7 @@ GOT_LABEL: ; \
# define JUMPTARGET(name) name
#endif
-#if defined SHARED && defined DO_VERSIONING && defined PIC \
- && !defined NO_HIDDEN
+#if defined SHARED && defined PIC && !defined NO_HIDDEN
# undef HIDDEN_JUMPTARGET
# define HIDDEN_JUMPTARGET(name) __GI_##name##@local
#endif
diff --git a/libc/sysdeps/powerpc/powerpc64/backtrace.c b/libc/sysdeps/powerpc/powerpc64/backtrace.c
index 2d3e051cb..9b9a9f19d 100644
--- a/libc/sysdeps/powerpc/powerpc64/backtrace.c
+++ b/libc/sysdeps/powerpc/powerpc64/backtrace.c
@@ -18,6 +18,9 @@
#include <execinfo.h>
#include <stddef.h>
+#include <string.h>
+#include <signal.h>
+#include <bits/libc-vdso.h>
/* This is the stack layout we see with every stack frame.
Note that every routine is required by the ABI to lay out the stack
@@ -38,6 +41,27 @@ struct layout
void *return_address;
};
+/* Since the signal handler is just like any other function it needs to
+ save/restore its LR and it will save it into callers stack frame.
+ Since a signal handler doesn't have a caller, the kernel creates a
+ dummy frame to make it look like it has a caller. */
+struct signal_frame_64 {
+#define SIGNAL_FRAMESIZE 128
+ char dummy[SIGNAL_FRAMESIZE];
+ struct ucontext uc;
+ /* We don't care about the rest, since the IP value is at 'uc' field. */
+};
+
+static inline int
+is_sigtramp_address (unsigned long nip)
+{
+#ifdef SHARED
+ if (nip == (unsigned long)__vdso_sigtramp_rt64)
+ return 1;
+#endif
+ return 0;
+}
+
int
__backtrace (void **array, int size)
{
@@ -53,7 +77,17 @@ __backtrace (void **array, int size)
for ( count = 0;
current != NULL && count < size;
current = current->next, count++)
- array[count] = current->return_address;
+ {
+ array[count] = current->return_address;
+
+ /* Check if the symbol is the signal trampoline and get the interrupted
+ * symbol address from the trampoline saved area. */
+ if (is_sigtramp_address ((unsigned long)current->return_address))
+ {
+ struct signal_frame_64 *sigframe = (struct signal_frame_64*) current;
+ array[++count] = (void*)sigframe->uc.uc_mcontext.gp_regs[PT_NIP];
+ }
+ }
/* It's possible the second-last stack frame can't return
(that is, it's __libc_start_main), in which case
diff --git a/libc/sysdeps/powerpc/powerpc64/cell/memcpy.S b/libc/sysdeps/powerpc/powerpc64/cell/memcpy.S
index a271965dd..104900ea5 100644
--- a/libc/sysdeps/powerpc/powerpc64/cell/memcpy.S
+++ b/libc/sysdeps/powerpc/powerpc64/cell/memcpy.S
@@ -49,7 +49,7 @@ EALIGN (memcpy, 5, 0)
.Lbigcopy:
neg r8,r3 /* LS 3 bits = # bytes to 8-byte dest bdry */
- clrldi r8,r8,64-4 /* aling to 16byte boundary */
+ clrldi r8,r8,64-4 /* align to 16byte boundary */
sub r7,r4,r3
cmpldi cr0,r8,0
beq+ .Ldst_aligned
diff --git a/libc/sysdeps/powerpc/powerpc64/power6/memcpy.S b/libc/sysdeps/powerpc/powerpc64/power6/memcpy.S
index db29e2b06..d6d242d29 100644
--- a/libc/sysdeps/powerpc/powerpc64/power6/memcpy.S
+++ b/libc/sysdeps/powerpc/powerpc64/power6/memcpy.S
@@ -450,7 +450,7 @@ L(dus_tail16): /* Move 16 bytes. */
ld 3,-16(1)
blr
.align 4
-L(dus_tail16p8): /* less then 8 bytes left. */
+L(dus_tail16p8): /* less than 8 bytes left. */
beq cr1,L(dus_tailX) /* exactly 16 bytes, early exit. */
cmpldi cr1,10,20
bf 29,L(dus_tail16p2)
@@ -464,7 +464,7 @@ L(dus_tail16p8): /* less then 8 bytes left. */
ld 3,-16(1)
blr
.align 4
-L(dus_tail16p4): /* less then 4 bytes left. */
+L(dus_tail16p4): /* less than 4 bytes left. */
addi 12,12,24
addi 3,3,24
bgt cr0,L(dus_tail2)
@@ -472,7 +472,7 @@ L(dus_tail16p4): /* less then 4 bytes left. */
ld 3,-16(1)
blr
.align 4
-L(dus_tail16p2): /* 16 bytes moved, less then 4 bytes left. */
+L(dus_tail16p2): /* 16 bytes moved, less than 4 bytes left. */
addi 12,12,16
addi 3,3,16
b L(dus_tail2)
@@ -497,7 +497,7 @@ L(dus_tail8): /* Move 8 bytes. */
ld 3,-16(1)
blr
.align 4
-L(dus_tail8p4): /* less then 4 bytes left. */
+L(dus_tail8p4): /* less than 4 bytes left. */
addi 12,12,8
addi 3,3,8
bgt cr1,L(dus_tail2)
@@ -508,7 +508,7 @@ L(dus_tail8p4): /* less then 4 bytes left. */
.align 4
L(dus_tail4): /* Move 4 bytes. */
/* r6 already loaded speculatively. If we are here we know there is
- more then 4 bytes left. So there is no need to test. */
+ more than 4 bytes left. So there is no need to test. */
addi 12,12,4
stw 6,0(3)
addi 3,3,4
diff --git a/libc/sysdeps/powerpc/powerpc64/power6/memset.S b/libc/sysdeps/powerpc/powerpc64/power6/memset.S
index 541a45fd3..3e8ae2d25 100644
--- a/libc/sysdeps/powerpc/powerpc64/power6/memset.S
+++ b/libc/sysdeps/powerpc/powerpc64/power6/memset.S
@@ -110,7 +110,7 @@ L(caligned):
boundary may not be at cache line (128-byte) boundary. */
L(nzloopstart):
/* memset in 32-byte chunks until we get to a cache line boundary.
- If rLEN is less then the distance to the next cache-line boundary use
+ If rLEN is less than the distance to the next cache-line boundary use
cacheAligned1 code to finish the tail. */
cmpldi cr1,rLEN,128
@@ -186,7 +186,7 @@ L(nzCacheAligned128):
block zero instruction. */
L(zloopstart):
/* memset in 32-byte chunks until we get to a cache line boundary.
- If rLEN is less then the distance to the next cache-line boundary use
+ If rLEN is less than the distance to the next cache-line boundary use
cacheAligned1 code to finish the tail. */
cmpldi cr1,rLEN,128
beq L(medium)