diff options
author | Jan Kratochvil <jan.kratochvil@redhat.com> | 2010-02-17 11:37:23 +0000 |
---|---|---|
committer | Jan Kratochvil <jan.kratochvil@redhat.com> | 2010-02-17 11:37:23 +0000 |
commit | ef5d914ca7b0ff7b3118a4afa95ef0a9c96c72ed (patch) | |
tree | d7fc147dcfa4e02d948499fbbafc9d5958947fb3 /gdb/solib-svr4.c | |
parent | 70f7b24844aa36c3609628187e4cf8b961ad0637 (diff) | |
download | gdb-ef5d914ca7b0ff7b3118a4afa95ef0a9c96c72ed.tar.gz |
gdb/
* solib-svr4.c (enable_break <target_auxv_search>): New variable
addr_bit. Adjust LOAD_ADDR sign for cross-arch inferiors.
Diffstat (limited to 'gdb/solib-svr4.c')
-rw-r--r-- | gdb/solib-svr4.c | 27 |
1 files changed, 26 insertions, 1 deletions
diff --git a/gdb/solib-svr4.c b/gdb/solib-svr4.c index f8e8e846318..b3b9e00bde1 100644 --- a/gdb/solib-svr4.c +++ b/gdb/solib-svr4.c @@ -1451,7 +1451,32 @@ enable_break (struct svr4_info *info, int from_tty) from our so_list, then try using the AT_BASE auxilliary entry. */ if (!load_addr_found) if (target_auxv_search (¤t_target, AT_BASE, &load_addr) > 0) - load_addr_found = 1; + { + int addr_bit = gdbarch_addr_bit (target_gdbarch); + + /* Ensure LOAD_ADDR has proper sign in its possible upper bits so + that `+ load_addr' will overflow CORE_ADDR width not creating + invalid addresses like 0x101234567 for 32bit inferiors on 64bit + GDB. */ + + if (addr_bit < (sizeof (ULONGEST) * HOST_CHAR_BIT)) + { + CORE_ADDR space_size = (ULONGEST) 1 << addr_bit; + CORE_ADDR tmp_entry_point = exec_entry_point (tmp_bfd, + tmp_bfd_target); + + gdb_assert (load_addr < space_size); + + /* TMP_ENTRY_POINT exceeding SPACE_SIZE would be for prelinked + 64bit ld.so with 32bit executable, it should not happen. */ + + if (tmp_entry_point < space_size + && tmp_entry_point + load_addr >= space_size) + load_addr -= space_size; + } + + load_addr_found = 1; + } /* Otherwise we find the dynamic linker's base address by examining the current pc (which should point at the entry point for the |