summaryrefslogtreecommitdiff
path: root/bfd/elf32-arm.c
diff options
context:
space:
mode:
authorRoland McGrath <roland@gnu.org>2013-04-24 16:29:54 +0000
committerRoland McGrath <roland@gnu.org>2013-04-24 16:29:54 +0000
commit5e70500ef81d076091fc3ce059269df2d1ba7759 (patch)
tree115b278c61c14a2484be715caeff1060bd018e80 /bfd/elf32-arm.c
parente8c96a5b8dbfec194549c9b881dc5fa5edbc0046 (diff)
downloadbinutils-redhat-5e70500ef81d076091fc3ce059269df2d1ba7759.tar.gz
bfd/
* elf32-arm.c (elf32_arm_allocate_plt_entry): If HTAB->nacl_p, allocate space for PLT header even if IS_IPLT_ENTRY. (arm_nacl_put_plt0): New function, broken out of ... (elf32_arm_finish_dynamic_sections): ... here. Call it. If HTAB->nacl_p, set up the PLT header in .iplt too. (elf32_arm_output_arch_local_syms): If HTAB->nacl_p, write a mapping symbol for the start of .iplt too.
Diffstat (limited to 'bfd/elf32-arm.c')
-rw-r--r--bfd/elf32-arm.c2953
1 files changed, 1493 insertions, 1460 deletions
diff --git a/bfd/elf32-arm.c b/bfd/elf32-arm.c
index 9fff630d6b..6201e60735 100644
--- a/bfd/elf32-arm.c
+++ b/bfd/elf32-arm.c
@@ -1,6 +1,6 @@
/* 32-bit ELF support for ARM
Copyright 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007,
- 2008, 2009, 2010, 2011, 2012 Free Software Foundation, Inc.
+ 2008, 2009, 2010, 2011, 2012, 2013 Free Software Foundation, Inc.
This file is part of BFD, the Binary File Descriptor library.
@@ -327,160 +327,160 @@ static reloc_howto_type elf32_arm_howto_table_1[] =
/* Dynamic TLS relocations. */
HOWTO (R_ARM_TLS_DTPMOD32, /* type */
- 0, /* rightshift */
- 2, /* size (0 = byte, 1 = short, 2 = long) */
- 32, /* bitsize */
- FALSE, /* pc_relative */
- 0, /* bitpos */
- complain_overflow_bitfield,/* complain_on_overflow */
- bfd_elf_generic_reloc, /* special_function */
- "R_ARM_TLS_DTPMOD32", /* name */
- TRUE, /* partial_inplace */
- 0xffffffff, /* src_mask */
- 0xffffffff, /* dst_mask */
- FALSE), /* pcrel_offset */
+ 0, /* rightshift */
+ 2, /* size (0 = byte, 1 = short, 2 = long) */
+ 32, /* bitsize */
+ FALSE, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_bitfield,/* complain_on_overflow */
+ bfd_elf_generic_reloc, /* special_function */
+ "R_ARM_TLS_DTPMOD32", /* name */
+ TRUE, /* partial_inplace */
+ 0xffffffff, /* src_mask */
+ 0xffffffff, /* dst_mask */
+ FALSE), /* pcrel_offset */
HOWTO (R_ARM_TLS_DTPOFF32, /* type */
- 0, /* rightshift */
- 2, /* size (0 = byte, 1 = short, 2 = long) */
- 32, /* bitsize */
- FALSE, /* pc_relative */
- 0, /* bitpos */
- complain_overflow_bitfield,/* complain_on_overflow */
- bfd_elf_generic_reloc, /* special_function */
- "R_ARM_TLS_DTPOFF32", /* name */
- TRUE, /* partial_inplace */
- 0xffffffff, /* src_mask */
- 0xffffffff, /* dst_mask */
- FALSE), /* pcrel_offset */
+ 0, /* rightshift */
+ 2, /* size (0 = byte, 1 = short, 2 = long) */
+ 32, /* bitsize */
+ FALSE, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_bitfield,/* complain_on_overflow */
+ bfd_elf_generic_reloc, /* special_function */
+ "R_ARM_TLS_DTPOFF32", /* name */
+ TRUE, /* partial_inplace */
+ 0xffffffff, /* src_mask */
+ 0xffffffff, /* dst_mask */
+ FALSE), /* pcrel_offset */
HOWTO (R_ARM_TLS_TPOFF32, /* type */
- 0, /* rightshift */
- 2, /* size (0 = byte, 1 = short, 2 = long) */
- 32, /* bitsize */
- FALSE, /* pc_relative */
- 0, /* bitpos */
- complain_overflow_bitfield,/* complain_on_overflow */
- bfd_elf_generic_reloc, /* special_function */
- "R_ARM_TLS_TPOFF32", /* name */
- TRUE, /* partial_inplace */
- 0xffffffff, /* src_mask */
- 0xffffffff, /* dst_mask */
- FALSE), /* pcrel_offset */
+ 0, /* rightshift */
+ 2, /* size (0 = byte, 1 = short, 2 = long) */
+ 32, /* bitsize */
+ FALSE, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_bitfield,/* complain_on_overflow */
+ bfd_elf_generic_reloc, /* special_function */
+ "R_ARM_TLS_TPOFF32", /* name */
+ TRUE, /* partial_inplace */
+ 0xffffffff, /* src_mask */
+ 0xffffffff, /* dst_mask */
+ FALSE), /* pcrel_offset */
/* Relocs used in ARM Linux */
HOWTO (R_ARM_COPY, /* type */
- 0, /* rightshift */
- 2, /* size (0 = byte, 1 = short, 2 = long) */
- 32, /* bitsize */
- FALSE, /* pc_relative */
- 0, /* bitpos */
- complain_overflow_bitfield,/* complain_on_overflow */
- bfd_elf_generic_reloc, /* special_function */
- "R_ARM_COPY", /* name */
- TRUE, /* partial_inplace */
- 0xffffffff, /* src_mask */
- 0xffffffff, /* dst_mask */
- FALSE), /* pcrel_offset */
+ 0, /* rightshift */
+ 2, /* size (0 = byte, 1 = short, 2 = long) */
+ 32, /* bitsize */
+ FALSE, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_bitfield,/* complain_on_overflow */
+ bfd_elf_generic_reloc, /* special_function */
+ "R_ARM_COPY", /* name */
+ TRUE, /* partial_inplace */
+ 0xffffffff, /* src_mask */
+ 0xffffffff, /* dst_mask */
+ FALSE), /* pcrel_offset */
HOWTO (R_ARM_GLOB_DAT, /* type */
- 0, /* rightshift */
- 2, /* size (0 = byte, 1 = short, 2 = long) */
- 32, /* bitsize */
- FALSE, /* pc_relative */
- 0, /* bitpos */
- complain_overflow_bitfield,/* complain_on_overflow */
- bfd_elf_generic_reloc, /* special_function */
- "R_ARM_GLOB_DAT", /* name */
- TRUE, /* partial_inplace */
- 0xffffffff, /* src_mask */
- 0xffffffff, /* dst_mask */
- FALSE), /* pcrel_offset */
+ 0, /* rightshift */
+ 2, /* size (0 = byte, 1 = short, 2 = long) */
+ 32, /* bitsize */
+ FALSE, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_bitfield,/* complain_on_overflow */
+ bfd_elf_generic_reloc, /* special_function */
+ "R_ARM_GLOB_DAT", /* name */
+ TRUE, /* partial_inplace */
+ 0xffffffff, /* src_mask */
+ 0xffffffff, /* dst_mask */
+ FALSE), /* pcrel_offset */
HOWTO (R_ARM_JUMP_SLOT, /* type */
- 0, /* rightshift */
- 2, /* size (0 = byte, 1 = short, 2 = long) */
- 32, /* bitsize */
- FALSE, /* pc_relative */
- 0, /* bitpos */
- complain_overflow_bitfield,/* complain_on_overflow */
- bfd_elf_generic_reloc, /* special_function */
- "R_ARM_JUMP_SLOT", /* name */
- TRUE, /* partial_inplace */
- 0xffffffff, /* src_mask */
- 0xffffffff, /* dst_mask */
- FALSE), /* pcrel_offset */
+ 0, /* rightshift */
+ 2, /* size (0 = byte, 1 = short, 2 = long) */
+ 32, /* bitsize */
+ FALSE, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_bitfield,/* complain_on_overflow */
+ bfd_elf_generic_reloc, /* special_function */
+ "R_ARM_JUMP_SLOT", /* name */
+ TRUE, /* partial_inplace */
+ 0xffffffff, /* src_mask */
+ 0xffffffff, /* dst_mask */
+ FALSE), /* pcrel_offset */
HOWTO (R_ARM_RELATIVE, /* type */
- 0, /* rightshift */
- 2, /* size (0 = byte, 1 = short, 2 = long) */
- 32, /* bitsize */
- FALSE, /* pc_relative */
- 0, /* bitpos */
- complain_overflow_bitfield,/* complain_on_overflow */
- bfd_elf_generic_reloc, /* special_function */
- "R_ARM_RELATIVE", /* name */
- TRUE, /* partial_inplace */
- 0xffffffff, /* src_mask */
- 0xffffffff, /* dst_mask */
- FALSE), /* pcrel_offset */
+ 0, /* rightshift */
+ 2, /* size (0 = byte, 1 = short, 2 = long) */
+ 32, /* bitsize */
+ FALSE, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_bitfield,/* complain_on_overflow */
+ bfd_elf_generic_reloc, /* special_function */
+ "R_ARM_RELATIVE", /* name */
+ TRUE, /* partial_inplace */
+ 0xffffffff, /* src_mask */
+ 0xffffffff, /* dst_mask */
+ FALSE), /* pcrel_offset */
HOWTO (R_ARM_GOTOFF32, /* type */
- 0, /* rightshift */
- 2, /* size (0 = byte, 1 = short, 2 = long) */
- 32, /* bitsize */
- FALSE, /* pc_relative */
- 0, /* bitpos */
- complain_overflow_bitfield,/* complain_on_overflow */
- bfd_elf_generic_reloc, /* special_function */
- "R_ARM_GOTOFF32", /* name */
- TRUE, /* partial_inplace */
- 0xffffffff, /* src_mask */
- 0xffffffff, /* dst_mask */
- FALSE), /* pcrel_offset */
+ 0, /* rightshift */
+ 2, /* size (0 = byte, 1 = short, 2 = long) */
+ 32, /* bitsize */
+ FALSE, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_bitfield,/* complain_on_overflow */
+ bfd_elf_generic_reloc, /* special_function */
+ "R_ARM_GOTOFF32", /* name */
+ TRUE, /* partial_inplace */
+ 0xffffffff, /* src_mask */
+ 0xffffffff, /* dst_mask */
+ FALSE), /* pcrel_offset */
HOWTO (R_ARM_GOTPC, /* type */
- 0, /* rightshift */
- 2, /* size (0 = byte, 1 = short, 2 = long) */
- 32, /* bitsize */
- TRUE, /* pc_relative */
- 0, /* bitpos */
- complain_overflow_bitfield,/* complain_on_overflow */
- bfd_elf_generic_reloc, /* special_function */
- "R_ARM_GOTPC", /* name */
- TRUE, /* partial_inplace */
- 0xffffffff, /* src_mask */
- 0xffffffff, /* dst_mask */
- TRUE), /* pcrel_offset */
+ 0, /* rightshift */
+ 2, /* size (0 = byte, 1 = short, 2 = long) */
+ 32, /* bitsize */
+ TRUE, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_bitfield,/* complain_on_overflow */
+ bfd_elf_generic_reloc, /* special_function */
+ "R_ARM_GOTPC", /* name */
+ TRUE, /* partial_inplace */
+ 0xffffffff, /* src_mask */
+ 0xffffffff, /* dst_mask */
+ TRUE), /* pcrel_offset */
HOWTO (R_ARM_GOT32, /* type */
- 0, /* rightshift */
- 2, /* size (0 = byte, 1 = short, 2 = long) */
- 32, /* bitsize */
- FALSE, /* pc_relative */
- 0, /* bitpos */
- complain_overflow_bitfield,/* complain_on_overflow */
- bfd_elf_generic_reloc, /* special_function */
- "R_ARM_GOT32", /* name */
- TRUE, /* partial_inplace */
- 0xffffffff, /* src_mask */
- 0xffffffff, /* dst_mask */
- FALSE), /* pcrel_offset */
+ 0, /* rightshift */
+ 2, /* size (0 = byte, 1 = short, 2 = long) */
+ 32, /* bitsize */
+ FALSE, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_bitfield,/* complain_on_overflow */
+ bfd_elf_generic_reloc, /* special_function */
+ "R_ARM_GOT32", /* name */
+ TRUE, /* partial_inplace */
+ 0xffffffff, /* src_mask */
+ 0xffffffff, /* dst_mask */
+ FALSE), /* pcrel_offset */
HOWTO (R_ARM_PLT32, /* type */
- 2, /* rightshift */
- 2, /* size (0 = byte, 1 = short, 2 = long) */
- 24, /* bitsize */
- TRUE, /* pc_relative */
- 0, /* bitpos */
- complain_overflow_bitfield,/* complain_on_overflow */
- bfd_elf_generic_reloc, /* special_function */
- "R_ARM_PLT32", /* name */
- FALSE, /* partial_inplace */
- 0x00ffffff, /* src_mask */
- 0x00ffffff, /* dst_mask */
- TRUE), /* pcrel_offset */
+ 2, /* rightshift */
+ 2, /* size (0 = byte, 1 = short, 2 = long) */
+ 24, /* bitsize */
+ TRUE, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_bitfield,/* complain_on_overflow */
+ bfd_elf_generic_reloc, /* special_function */
+ "R_ARM_PLT32", /* name */
+ FALSE, /* partial_inplace */
+ 0x00ffffff, /* src_mask */
+ 0x00ffffff, /* dst_mask */
+ TRUE), /* pcrel_offset */
HOWTO (R_ARM_CALL, /* type */
2, /* rightshift */
@@ -1487,33 +1487,33 @@ static reloc_howto_type elf32_arm_howto_table_1[] =
/* GNU extension to record C++ vtable member usage */
HOWTO (R_ARM_GNU_VTENTRY, /* type */
- 0, /* rightshift */
- 2, /* size (0 = byte, 1 = short, 2 = long) */
- 0, /* bitsize */
- FALSE, /* pc_relative */
- 0, /* bitpos */
- complain_overflow_dont, /* complain_on_overflow */
- _bfd_elf_rel_vtable_reloc_fn, /* special_function */
- "R_ARM_GNU_VTENTRY", /* name */
- FALSE, /* partial_inplace */
- 0, /* src_mask */
- 0, /* dst_mask */
- FALSE), /* pcrel_offset */
+ 0, /* rightshift */
+ 2, /* size (0 = byte, 1 = short, 2 = long) */
+ 0, /* bitsize */
+ FALSE, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_dont, /* complain_on_overflow */
+ _bfd_elf_rel_vtable_reloc_fn, /* special_function */
+ "R_ARM_GNU_VTENTRY", /* name */
+ FALSE, /* partial_inplace */
+ 0, /* src_mask */
+ 0, /* dst_mask */
+ FALSE), /* pcrel_offset */
/* GNU extension to record C++ vtable hierarchy */
HOWTO (R_ARM_GNU_VTINHERIT, /* type */
- 0, /* rightshift */
- 2, /* size (0 = byte, 1 = short, 2 = long) */
- 0, /* bitsize */
- FALSE, /* pc_relative */
- 0, /* bitpos */
- complain_overflow_dont, /* complain_on_overflow */
- NULL, /* special_function */
- "R_ARM_GNU_VTINHERIT", /* name */
- FALSE, /* partial_inplace */
- 0, /* src_mask */
- 0, /* dst_mask */
- FALSE), /* pcrel_offset */
+ 0, /* rightshift */
+ 2, /* size (0 = byte, 1 = short, 2 = long) */
+ 0, /* bitsize */
+ FALSE, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_dont, /* complain_on_overflow */
+ NULL, /* special_function */
+ "R_ARM_GNU_VTINHERIT", /* name */
+ FALSE, /* partial_inplace */
+ 0, /* src_mask */
+ 0, /* dst_mask */
+ FALSE), /* pcrel_offset */
HOWTO (R_ARM_THM_JUMP11, /* type */
1, /* rightshift */
@@ -1545,74 +1545,74 @@ static reloc_howto_type elf32_arm_howto_table_1[] =
/* TLS relocations */
HOWTO (R_ARM_TLS_GD32, /* type */
- 0, /* rightshift */
- 2, /* size (0 = byte, 1 = short, 2 = long) */
- 32, /* bitsize */
- FALSE, /* pc_relative */
- 0, /* bitpos */
- complain_overflow_bitfield,/* complain_on_overflow */
- NULL, /* special_function */
- "R_ARM_TLS_GD32", /* name */
- TRUE, /* partial_inplace */
- 0xffffffff, /* src_mask */
- 0xffffffff, /* dst_mask */
- FALSE), /* pcrel_offset */
+ 0, /* rightshift */
+ 2, /* size (0 = byte, 1 = short, 2 = long) */
+ 32, /* bitsize */
+ FALSE, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_bitfield,/* complain_on_overflow */
+ NULL, /* special_function */
+ "R_ARM_TLS_GD32", /* name */
+ TRUE, /* partial_inplace */
+ 0xffffffff, /* src_mask */
+ 0xffffffff, /* dst_mask */
+ FALSE), /* pcrel_offset */
HOWTO (R_ARM_TLS_LDM32, /* type */
- 0, /* rightshift */
- 2, /* size (0 = byte, 1 = short, 2 = long) */
- 32, /* bitsize */
- FALSE, /* pc_relative */
- 0, /* bitpos */
- complain_overflow_bitfield,/* complain_on_overflow */
- bfd_elf_generic_reloc, /* special_function */
- "R_ARM_TLS_LDM32", /* name */
- TRUE, /* partial_inplace */
- 0xffffffff, /* src_mask */
- 0xffffffff, /* dst_mask */
- FALSE), /* pcrel_offset */
+ 0, /* rightshift */
+ 2, /* size (0 = byte, 1 = short, 2 = long) */
+ 32, /* bitsize */
+ FALSE, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_bitfield,/* complain_on_overflow */
+ bfd_elf_generic_reloc, /* special_function */
+ "R_ARM_TLS_LDM32", /* name */
+ TRUE, /* partial_inplace */
+ 0xffffffff, /* src_mask */
+ 0xffffffff, /* dst_mask */
+ FALSE), /* pcrel_offset */
HOWTO (R_ARM_TLS_LDO32, /* type */
- 0, /* rightshift */
- 2, /* size (0 = byte, 1 = short, 2 = long) */
- 32, /* bitsize */
- FALSE, /* pc_relative */
- 0, /* bitpos */
- complain_overflow_bitfield,/* complain_on_overflow */
- bfd_elf_generic_reloc, /* special_function */
- "R_ARM_TLS_LDO32", /* name */
- TRUE, /* partial_inplace */
- 0xffffffff, /* src_mask */
- 0xffffffff, /* dst_mask */
- FALSE), /* pcrel_offset */
+ 0, /* rightshift */
+ 2, /* size (0 = byte, 1 = short, 2 = long) */
+ 32, /* bitsize */
+ FALSE, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_bitfield,/* complain_on_overflow */
+ bfd_elf_generic_reloc, /* special_function */
+ "R_ARM_TLS_LDO32", /* name */
+ TRUE, /* partial_inplace */
+ 0xffffffff, /* src_mask */
+ 0xffffffff, /* dst_mask */
+ FALSE), /* pcrel_offset */
HOWTO (R_ARM_TLS_IE32, /* type */
- 0, /* rightshift */
- 2, /* size (0 = byte, 1 = short, 2 = long) */
- 32, /* bitsize */
- FALSE, /* pc_relative */
- 0, /* bitpos */
- complain_overflow_bitfield,/* complain_on_overflow */
- NULL, /* special_function */
- "R_ARM_TLS_IE32", /* name */
- TRUE, /* partial_inplace */
- 0xffffffff, /* src_mask */
- 0xffffffff, /* dst_mask */
- FALSE), /* pcrel_offset */
+ 0, /* rightshift */
+ 2, /* size (0 = byte, 1 = short, 2 = long) */
+ 32, /* bitsize */
+ FALSE, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_bitfield,/* complain_on_overflow */
+ NULL, /* special_function */
+ "R_ARM_TLS_IE32", /* name */
+ TRUE, /* partial_inplace */
+ 0xffffffff, /* src_mask */
+ 0xffffffff, /* dst_mask */
+ FALSE), /* pcrel_offset */
HOWTO (R_ARM_TLS_LE32, /* type */
- 0, /* rightshift */
- 2, /* size (0 = byte, 1 = short, 2 = long) */
- 32, /* bitsize */
- FALSE, /* pc_relative */
- 0, /* bitpos */
- complain_overflow_bitfield,/* complain_on_overflow */
- bfd_elf_generic_reloc, /* special_function */
- "R_ARM_TLS_LE32", /* name */
- TRUE, /* partial_inplace */
- 0xffffffff, /* src_mask */
- 0xffffffff, /* dst_mask */
- FALSE), /* pcrel_offset */
+ 0, /* rightshift */
+ 2, /* size (0 = byte, 1 = short, 2 = long) */
+ 32, /* bitsize */
+ FALSE, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_bitfield,/* complain_on_overflow */
+ bfd_elf_generic_reloc, /* special_function */
+ "R_ARM_TLS_LE32", /* name */
+ TRUE, /* partial_inplace */
+ 0xffffffff, /* src_mask */
+ 0xffffffff, /* dst_mask */
+ FALSE), /* pcrel_offset */
HOWTO (R_ARM_TLS_LDO12, /* type */
0, /* rightshift */
@@ -1696,18 +1696,18 @@ static reloc_howto_type elf32_arm_howto_table_1[] =
static reloc_howto_type elf32_arm_howto_table_2[1] =
{
HOWTO (R_ARM_IRELATIVE, /* type */
- 0, /* rightshift */
- 2, /* size (0 = byte, 1 = short, 2 = long) */
- 32, /* bitsize */
- FALSE, /* pc_relative */
- 0, /* bitpos */
- complain_overflow_bitfield,/* complain_on_overflow */
- bfd_elf_generic_reloc, /* special_function */
- "R_ARM_IRELATIVE", /* name */
- TRUE, /* partial_inplace */
- 0xffffffff, /* src_mask */
- 0xffffffff, /* dst_mask */
- FALSE) /* pcrel_offset */
+ 0, /* rightshift */
+ 2, /* size (0 = byte, 1 = short, 2 = long) */
+ 32, /* bitsize */
+ FALSE, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_bitfield,/* complain_on_overflow */
+ bfd_elf_generic_reloc, /* special_function */
+ "R_ARM_IRELATIVE", /* name */
+ TRUE, /* partial_inplace */
+ 0xffffffff, /* src_mask */
+ 0xffffffff, /* dst_mask */
+ FALSE) /* pcrel_offset */
};
/* 249-255 extended, currently unused, relocations: */
@@ -2098,7 +2098,7 @@ static const unsigned long dl_tlsdesc_lazy_trampoline [] =
0xe081100f, /* 2: add r1, pc */
0xe12fff12, /* bx r2 */
0x00000014, /* 3: .word _GLOBAL_OFFSET_TABLE_ - 1b - 8
- + dl_tlsdesc_lazy_resolver(GOT) */
+ + dl_tlsdesc_lazy_resolver(GOT) */
0x00000018, /* 4: .word _GLOBAL_OFFSET_TABLE_ - 2b - 8 */
};
@@ -3004,8 +3004,8 @@ struct elf32_arm_link_hash_table
static struct bfd_hash_entry *
elf32_arm_link_hash_newfunc (struct bfd_hash_entry * entry,
- struct bfd_hash_table * table,
- const char * string)
+ struct bfd_hash_table * table,
+ const char * string)
{
struct elf32_arm_link_hash_entry * ret =
(struct elf32_arm_link_hash_entry *) entry;
@@ -3014,7 +3014,7 @@ elf32_arm_link_hash_newfunc (struct bfd_hash_entry * entry,
subclass. */
if (ret == NULL)
ret = (struct elf32_arm_link_hash_entry *)
- bfd_hash_allocate (table, sizeof (struct elf32_arm_link_hash_entry));
+ bfd_hash_allocate (table, sizeof (struct elf32_arm_link_hash_entry));
if (ret == NULL)
return (struct bfd_hash_entry *) ret;
@@ -3188,7 +3188,7 @@ stub_hash_newfunc (struct bfd_hash_entry *entry,
if (entry == NULL)
{
entry = (struct bfd_hash_entry *)
- bfd_hash_allocate (table, sizeof (struct elf32_arm_stub_hash_entry));
+ bfd_hash_allocate (table, sizeof (struct elf32_arm_stub_hash_entry));
if (entry == NULL)
return entry;
}
@@ -3625,11 +3625,11 @@ arm_type_of_stub (struct bfd_link_info *info,
{
/* Handle cases where:
- this call goes too far (different Thumb/Thumb2 max
- distance)
+ distance)
- it's a Thumb->Arm call and blx is not available, or it's a
- Thumb->Arm branch (not bl). A stub is needed in this case,
- but only if this call is not through a PLT entry. Indeed,
- PLT stubs handle mode switching already.
+ Thumb->Arm branch (not bl). A stub is needed in this case,
+ but only if this call is not through a PLT entry. Indeed,
+ PLT stubs handle mode switching already.
*/
if ((!thumb2
&& (branch_offset > THM_MAX_FWD_BRANCH_OFFSET
@@ -4108,11 +4108,11 @@ arm_build_one_stub (struct bfd_hash_entry *gen_entry,
bfd_vma data = (bfd_vma) template_sequence[i].data;
if (template_sequence[i].reloc_addend != 0)
{
- /* We've borrowed the reloc_addend field to mean we should
- insert a condition code into this (Thumb-1 branch)
- instruction. See THUMB16_BCOND_INSN. */
- BFD_ASSERT ((data & 0xff00) == 0xd000);
- data |= ((stub_entry->orig_insn >> 22) & 0xf) << 8;
+ /* We've borrowed the reloc_addend field to mean we should
+ insert a condition code into this (Thumb-1 branch)
+ instruction. See THUMB16_BCOND_INSN. */
+ BFD_ASSERT ((data & 0xff00) == 0xd000);
+ data |= ((stub_entry->orig_insn >> 22) & 0xf) << 8;
}
bfd_put_16 (stub_bfd, data, loc + size);
size += 2;
@@ -4125,13 +4125,13 @@ arm_build_one_stub (struct bfd_hash_entry *gen_entry,
loc + size);
bfd_put_16 (stub_bfd, template_sequence[i].data & 0xffff,
loc + size + 2);
- if (template_sequence[i].r_type != R_ARM_NONE)
- {
- stub_reloc_idx[nrelocs] = i;
- stub_reloc_offset[nrelocs++] = size;
- }
- size += 4;
- break;
+ if (template_sequence[i].r_type != R_ARM_NONE)
+ {
+ stub_reloc_idx[nrelocs] = i;
+ stub_reloc_offset[nrelocs++] = size;
+ }
+ size += 4;
+ break;
case ARM_TYPE:
bfd_put_32 (stub_bfd, template_sequence[i].data,
@@ -4189,7 +4189,7 @@ arm_build_one_stub (struct bfd_hash_entry *gen_entry,
rel.r_offset = stub_entry->stub_offset + stub_reloc_offset[i];
rel.r_info = ELF32_R_INFO (0,
- template_sequence[stub_reloc_idx[i]].r_type);
+ template_sequence[stub_reloc_idx[i]].r_type);
rel.r_addend = template_sequence[stub_reloc_idx[i]].reloc_addend;
if (stub_entry->stub_type == arm_stub_a8_veneer_b_cond && i == 0)
@@ -4222,7 +4222,7 @@ arm_build_one_stub (struct bfd_hash_entry *gen_entry,
rel.r_offset = stub_entry->stub_offset + stub_reloc_offset[i];
rel.r_info = ELF32_R_INFO (0,
- template_sequence[stub_reloc_idx[i]].r_type);
+ template_sequence[stub_reloc_idx[i]].r_type);
rel.r_addend = 0;
elf32_arm_final_link_relocate (elf32_arm_howto_from_type
@@ -4452,15 +4452,15 @@ group_sections (struct elf32_arm_link_hash_table *htab,
#define NEXT_SEC PREV_SEC
head = NULL;
while (tail != NULL)
- {
- /* Pop from tail. */
- asection *item = tail;
- tail = PREV_SEC (item);
+ {
+ /* Pop from tail. */
+ asection *item = tail;
+ tail = PREV_SEC (item);
- /* Push on head. */
- NEXT_SEC (item) = head;
- head = item;
- }
+ /* Push on head. */
+ NEXT_SEC (item) = head;
+ head = item;
+ }
while (head != NULL)
{
@@ -4579,65 +4579,65 @@ cortex_a8_erratum_scan (bfd *input_bfd,
bfd_vma base_vma;
if (elf_section_type (section) != SHT_PROGBITS
- || (elf_section_flags (section) & SHF_EXECINSTR) == 0
- || (section->flags & SEC_EXCLUDE) != 0
- || (section->sec_info_type == SEC_INFO_TYPE_JUST_SYMS)
- || (section->output_section == bfd_abs_section_ptr))
- continue;
+ || (elf_section_flags (section) & SHF_EXECINSTR) == 0
+ || (section->flags & SEC_EXCLUDE) != 0
+ || (section->sec_info_type == SEC_INFO_TYPE_JUST_SYMS)
+ || (section->output_section == bfd_abs_section_ptr))
+ continue;
base_vma = section->output_section->vma + section->output_offset;
if (elf_section_data (section)->this_hdr.contents != NULL)
- contents = elf_section_data (section)->this_hdr.contents;
+ contents = elf_section_data (section)->this_hdr.contents;
else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
- return TRUE;
+ return TRUE;
sec_data = elf32_arm_section_data (section);
for (span = 0; span < sec_data->mapcount; span++)
- {
- unsigned int span_start = sec_data->map[span].vma;
- unsigned int span_end = (span == sec_data->mapcount - 1)
- ? section->size : sec_data->map[span + 1].vma;
- unsigned int i;
- char span_type = sec_data->map[span].type;
- bfd_boolean last_was_32bit = FALSE, last_was_branch = FALSE;
-
- if (span_type != 't')
- continue;
-
- /* Span is entirely within a single 4KB region: skip scanning. */
- if (((base_vma + span_start) & ~0xfff)
+ {
+ unsigned int span_start = sec_data->map[span].vma;
+ unsigned int span_end = (span == sec_data->mapcount - 1)
+ ? section->size : sec_data->map[span + 1].vma;
+ unsigned int i;
+ char span_type = sec_data->map[span].type;
+ bfd_boolean last_was_32bit = FALSE, last_was_branch = FALSE;
+
+ if (span_type != 't')
+ continue;
+
+ /* Span is entirely within a single 4KB region: skip scanning. */
+ if (((base_vma + span_start) & ~0xfff)
== ((base_vma + span_end) & ~0xfff))
- continue;
-
- /* Scan for 32-bit Thumb-2 branches which span two 4K regions, where:
-
- * The opcode is BLX.W, BL.W, B.W, Bcc.W
- * The branch target is in the same 4KB region as the
- first half of the branch.
- * The instruction before the branch is a 32-bit
- length non-branch instruction. */
- for (i = span_start; i < span_end;)
- {
- unsigned int insn = bfd_getl16 (&contents[i]);
- bfd_boolean insn_32bit = FALSE, is_blx = FALSE, is_b = FALSE;
+ continue;
+
+ /* Scan for 32-bit Thumb-2 branches which span two 4K regions, where:
+
+ * The opcode is BLX.W, BL.W, B.W, Bcc.W
+ * The branch target is in the same 4KB region as the
+ first half of the branch.
+ * The instruction before the branch is a 32-bit
+ length non-branch instruction. */
+ for (i = span_start; i < span_end;)
+ {
+ unsigned int insn = bfd_getl16 (&contents[i]);
+ bfd_boolean insn_32bit = FALSE, is_blx = FALSE, is_b = FALSE;
bfd_boolean is_bl = FALSE, is_bcc = FALSE, is_32bit_branch;
- if ((insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000)
- insn_32bit = TRUE;
+ if ((insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000)
+ insn_32bit = TRUE;
if (insn_32bit)
- {
- /* Load the rest of the insn (in manual-friendly order). */
- insn = (insn << 16) | bfd_getl16 (&contents[i + 2]);
-
- /* Encoding T4: B<c>.W. */
- is_b = (insn & 0xf800d000) == 0xf0009000;
- /* Encoding T1: BL<c>.W. */
- is_bl = (insn & 0xf800d000) == 0xf000d000;
- /* Encoding T2: BLX<c>.W. */
- is_blx = (insn & 0xf800d000) == 0xf000c000;
+ {
+ /* Load the rest of the insn (in manual-friendly order). */
+ insn = (insn << 16) | bfd_getl16 (&contents[i + 2]);
+
+ /* Encoding T4: B<c>.W. */
+ is_b = (insn & 0xf800d000) == 0xf0009000;
+ /* Encoding T1: BL<c>.W. */
+ is_bl = (insn & 0xf800d000) == 0xf000d000;
+ /* Encoding T2: BLX<c>.W. */
+ is_blx = (insn & 0xf800d000) == 0xf000c000;
/* Encoding T3: B<c>.W (not permitted in IT block). */
is_bcc = (insn & 0xf800d000) == 0xf0008000
&& (insn & 0x07f00000) != 0x03800000;
@@ -4645,25 +4645,25 @@ cortex_a8_erratum_scan (bfd *input_bfd,
is_32bit_branch = is_b || is_bl || is_blx || is_bcc;
- if (((base_vma + i) & 0xfff) == 0xffe
+ if (((base_vma + i) & 0xfff) == 0xffe
&& insn_32bit
&& is_32bit_branch
&& last_was_32bit
&& ! last_was_branch)
- {
- bfd_signed_vma offset = 0;
- bfd_boolean force_target_arm = FALSE;
+ {
+ bfd_signed_vma offset = 0;
+ bfd_boolean force_target_arm = FALSE;
bfd_boolean force_target_thumb = FALSE;
- bfd_vma target;
- enum elf32_arm_stub_type stub_type = arm_stub_none;
- struct a8_erratum_reloc key, *found;
- bfd_boolean use_plt = FALSE;
+ bfd_vma target;
+ enum elf32_arm_stub_type stub_type = arm_stub_none;
+ struct a8_erratum_reloc key, *found;
+ bfd_boolean use_plt = FALSE;
- key.from = base_vma + i;
- found = (struct a8_erratum_reloc *)
- bsearch (&key, a8_relocs, num_a8_relocs,
- sizeof (struct a8_erratum_reloc),
- &a8_reloc_compare);
+ key.from = base_vma + i;
+ found = (struct a8_erratum_reloc *)
+ bsearch (&key, a8_relocs, num_a8_relocs,
+ sizeof (struct a8_erratum_reloc),
+ &a8_reloc_compare);
if (found)
{
@@ -4671,7 +4671,7 @@ cortex_a8_erratum_scan (bfd *input_bfd,
struct elf_link_hash_entry *entry;
/* We don't care about the error returned from this
- function, only if there is glue or not. */
+ function, only if there is glue or not. */
entry = find_thumb_glue (info, found->sym_name,
&error_message);
@@ -4693,7 +4693,7 @@ cortex_a8_erratum_scan (bfd *input_bfd,
}
}
- /* Check if we have an offending branch instruction. */
+ /* Check if we have an offending branch instruction. */
if (found && found->non_a8_stub)
/* We've already made a stub for this instruction, e.g.
@@ -4701,46 +4701,46 @@ cortex_a8_erratum_scan (bfd *input_bfd,
stub will suffice to work around the A8 erratum (see
setting of always_after_branch above). */
;
- else if (is_bcc)
- {
- offset = (insn & 0x7ff) << 1;
- offset |= (insn & 0x3f0000) >> 4;
- offset |= (insn & 0x2000) ? 0x40000 : 0;
- offset |= (insn & 0x800) ? 0x80000 : 0;
- offset |= (insn & 0x4000000) ? 0x100000 : 0;
- if (offset & 0x100000)
- offset |= ~ ((bfd_signed_vma) 0xfffff);
- stub_type = arm_stub_a8_veneer_b_cond;
- }
- else if (is_b || is_bl || is_blx)
- {
- int s = (insn & 0x4000000) != 0;
- int j1 = (insn & 0x2000) != 0;
- int j2 = (insn & 0x800) != 0;
- int i1 = !(j1 ^ s);
- int i2 = !(j2 ^ s);
-
- offset = (insn & 0x7ff) << 1;
- offset |= (insn & 0x3ff0000) >> 4;
- offset |= i2 << 22;
- offset |= i1 << 23;
- offset |= s << 24;
- if (offset & 0x1000000)
- offset |= ~ ((bfd_signed_vma) 0xffffff);
-
- if (is_blx)
- offset &= ~ ((bfd_signed_vma) 3);
-
- stub_type = is_blx ? arm_stub_a8_veneer_blx :
- is_bl ? arm_stub_a8_veneer_bl : arm_stub_a8_veneer_b;
- }
-
- if (stub_type != arm_stub_none)
- {
- bfd_vma pc_for_insn = base_vma + i + 4;
+ else if (is_bcc)
+ {
+ offset = (insn & 0x7ff) << 1;
+ offset |= (insn & 0x3f0000) >> 4;
+ offset |= (insn & 0x2000) ? 0x40000 : 0;
+ offset |= (insn & 0x800) ? 0x80000 : 0;
+ offset |= (insn & 0x4000000) ? 0x100000 : 0;
+ if (offset & 0x100000)
+ offset |= ~ ((bfd_signed_vma) 0xfffff);
+ stub_type = arm_stub_a8_veneer_b_cond;
+ }
+ else if (is_b || is_bl || is_blx)
+ {
+ int s = (insn & 0x4000000) != 0;
+ int j1 = (insn & 0x2000) != 0;
+ int j2 = (insn & 0x800) != 0;
+ int i1 = !(j1 ^ s);
+ int i2 = !(j2 ^ s);
+
+ offset = (insn & 0x7ff) << 1;
+ offset |= (insn & 0x3ff0000) >> 4;
+ offset |= i2 << 22;
+ offset |= i1 << 23;
+ offset |= s << 24;
+ if (offset & 0x1000000)
+ offset |= ~ ((bfd_signed_vma) 0xffffff);
+
+ if (is_blx)
+ offset &= ~ ((bfd_signed_vma) 3);
+
+ stub_type = is_blx ? arm_stub_a8_veneer_blx :
+ is_bl ? arm_stub_a8_veneer_bl : arm_stub_a8_veneer_b;
+ }
+
+ if (stub_type != arm_stub_none)
+ {
+ bfd_vma pc_for_insn = base_vma + i + 4;
/* The original instruction is a BL, but the target is
- an ARM instruction. If we were not making a stub,
+ an ARM instruction. If we were not making a stub,
the BL would have been converted to a BLX. Use the
BLX stub instead in that case. */
if (htab->use_blx && force_target_arm
@@ -4761,43 +4761,43 @@ cortex_a8_erratum_scan (bfd *input_bfd,
is_bl = TRUE;
}
- if (is_blx)
- pc_for_insn &= ~ ((bfd_vma) 3);
+ if (is_blx)
+ pc_for_insn &= ~ ((bfd_vma) 3);
- /* If we found a relocation, use the proper destination,
- not the offset in the (unrelocated) instruction.
+ /* If we found a relocation, use the proper destination,
+ not the offset in the (unrelocated) instruction.
Note this is always done if we switched the stub type
above. */
- if (found)
- offset =
+ if (found)
+ offset =
(bfd_signed_vma) (found->destination - pc_for_insn);
- /* If the stub will use a Thumb-mode branch to a
- PLT target, redirect it to the preceding Thumb
- entry point. */
- if (stub_type != arm_stub_a8_veneer_blx && use_plt)
- offset -= PLT_THUMB_STUB_SIZE;
+ /* If the stub will use a Thumb-mode branch to a
+ PLT target, redirect it to the preceding Thumb
+ entry point. */
+ if (stub_type != arm_stub_a8_veneer_blx && use_plt)
+ offset -= PLT_THUMB_STUB_SIZE;
- target = pc_for_insn + offset;
+ target = pc_for_insn + offset;
- /* The BLX stub is ARM-mode code. Adjust the offset to
- take the different PC value (+8 instead of +4) into
+ /* The BLX stub is ARM-mode code. Adjust the offset to
+ take the different PC value (+8 instead of +4) into
account. */
- if (stub_type == arm_stub_a8_veneer_blx)
- offset += 4;
-
- if (((base_vma + i) & ~0xfff) == (target & ~0xfff))
- {
- char *stub_name = NULL;
-
- if (num_a8_fixes == a8_fix_table_size)
- {
- a8_fix_table_size *= 2;
- a8_fixes = (struct a8_erratum_fix *)
- bfd_realloc (a8_fixes,
- sizeof (struct a8_erratum_fix)
- * a8_fix_table_size);
- }
+ if (stub_type == arm_stub_a8_veneer_blx)
+ offset += 4;
+
+ if (((base_vma + i) & ~0xfff) == (target & ~0xfff))
+ {
+ char *stub_name = NULL;
+
+ if (num_a8_fixes == a8_fix_table_size)
+ {
+ a8_fix_table_size *= 2;
+ a8_fixes = (struct a8_erratum_fix *)
+ bfd_realloc (a8_fixes,
+ sizeof (struct a8_erratum_fix)
+ * a8_fix_table_size);
+ }
if (num_a8_fixes < prev_num_a8_fixes)
{
@@ -4822,29 +4822,29 @@ cortex_a8_erratum_scan (bfd *input_bfd,
sprintf (stub_name, "%x:%x", section->id, i);
}
- a8_fixes[num_a8_fixes].input_bfd = input_bfd;
- a8_fixes[num_a8_fixes].section = section;
- a8_fixes[num_a8_fixes].offset = i;
- a8_fixes[num_a8_fixes].addend = offset;
- a8_fixes[num_a8_fixes].orig_insn = insn;
- a8_fixes[num_a8_fixes].stub_name = stub_name;
- a8_fixes[num_a8_fixes].stub_type = stub_type;
- a8_fixes[num_a8_fixes].branch_type =
+ a8_fixes[num_a8_fixes].input_bfd = input_bfd;
+ a8_fixes[num_a8_fixes].section = section;
+ a8_fixes[num_a8_fixes].offset = i;
+ a8_fixes[num_a8_fixes].addend = offset;
+ a8_fixes[num_a8_fixes].orig_insn = insn;
+ a8_fixes[num_a8_fixes].stub_name = stub_name;
+ a8_fixes[num_a8_fixes].stub_type = stub_type;
+ a8_fixes[num_a8_fixes].branch_type =
is_blx ? ST_BRANCH_TO_ARM : ST_BRANCH_TO_THUMB;
- num_a8_fixes++;
- }
- }
- }
+ num_a8_fixes++;
+ }
+ }
+ }
- i += insn_32bit ? 4 : 2;
- last_was_32bit = insn_32bit;
+ i += insn_32bit ? 4 : 2;
+ last_was_32bit = insn_32bit;
last_was_branch = is_32bit_branch;
- }
- }
+ }
+ }
if (elf_section_data (section)->this_hdr.contents == NULL)
- free (contents);
+ free (contents);
}
*a8_fixes_p = a8_fixes;
@@ -4882,9 +4882,9 @@ elf32_arm_size_stubs (bfd *output_bfd,
if (htab->fix_cortex_a8)
{
a8_fixes = (struct a8_erratum_fix *)
- bfd_zmalloc (sizeof (struct a8_erratum_fix) * a8_fix_table_size);
+ bfd_zmalloc (sizeof (struct a8_erratum_fix) * a8_fix_table_size);
a8_relocs = (struct a8_erratum_reloc *)
- bfd_zmalloc (sizeof (struct a8_erratum_reloc) * a8_reloc_table_size);
+ bfd_zmalloc (sizeof (struct a8_erratum_reloc) * a8_reloc_table_size);
}
/* Propagate mach to stub bfd, because it may not have been
@@ -4952,8 +4952,8 @@ elf32_arm_size_stubs (bfd *output_bfd,
asection *section;
Elf_Internal_Sym *local_syms = NULL;
- if (!is_arm_elf (input_bfd))
- continue;
+ if (!is_arm_elf (input_bfd))
+ continue;
num_a8_relocs = 0;
@@ -5226,99 +5226,99 @@ elf32_arm_size_stubs (bfd *output_bfd,
goto error_ret_free_internal;
}
- stub_entry->target_value = sym_value;
- stub_entry->target_section = sym_sec;
- stub_entry->stub_type = stub_type;
- stub_entry->h = hash;
- stub_entry->branch_type = branch_type;
-
- if (sym_name == NULL)
- sym_name = "unnamed";
- stub_entry->output_name = (char *)
- bfd_alloc (htab->stub_bfd,
- sizeof (THUMB2ARM_GLUE_ENTRY_NAME)
- + strlen (sym_name));
- if (stub_entry->output_name == NULL)
- {
- free (stub_name);
- goto error_ret_free_internal;
- }
-
- /* For historical reasons, use the existing names for
- ARM-to-Thumb and Thumb-to-ARM stubs. */
- if ((r_type == (unsigned int) R_ARM_THM_CALL
+ stub_entry->target_value = sym_value;
+ stub_entry->target_section = sym_sec;
+ stub_entry->stub_type = stub_type;
+ stub_entry->h = hash;
+ stub_entry->branch_type = branch_type;
+
+ if (sym_name == NULL)
+ sym_name = "unnamed";
+ stub_entry->output_name = (char *)
+ bfd_alloc (htab->stub_bfd,
+ sizeof (THUMB2ARM_GLUE_ENTRY_NAME)
+ + strlen (sym_name));
+ if (stub_entry->output_name == NULL)
+ {
+ free (stub_name);
+ goto error_ret_free_internal;
+ }
+
+ /* For historical reasons, use the existing names for
+ ARM-to-Thumb and Thumb-to-ARM stubs. */
+ if ((r_type == (unsigned int) R_ARM_THM_CALL
|| r_type == (unsigned int) R_ARM_THM_JUMP24)
&& branch_type == ST_BRANCH_TO_ARM)
- sprintf (stub_entry->output_name,
- THUMB2ARM_GLUE_ENTRY_NAME, sym_name);
- else if ((r_type == (unsigned int) R_ARM_CALL
+ sprintf (stub_entry->output_name,
+ THUMB2ARM_GLUE_ENTRY_NAME, sym_name);
+ else if ((r_type == (unsigned int) R_ARM_CALL
|| r_type == (unsigned int) R_ARM_JUMP24)
&& branch_type == ST_BRANCH_TO_THUMB)
- sprintf (stub_entry->output_name,
- ARM2THUMB_GLUE_ENTRY_NAME, sym_name);
- else
- sprintf (stub_entry->output_name, STUB_ENTRY_NAME,
- sym_name);
-
- stub_changed = TRUE;
- }
- while (0);
-
- /* Look for relocations which might trigger Cortex-A8
- erratum. */
- if (htab->fix_cortex_a8
- && (r_type == (unsigned int) R_ARM_THM_JUMP24
- || r_type == (unsigned int) R_ARM_THM_JUMP19
- || r_type == (unsigned int) R_ARM_THM_CALL
- || r_type == (unsigned int) R_ARM_THM_XPC22))
- {
- bfd_vma from = section->output_section->vma
- + section->output_offset
- + irela->r_offset;
-
- if ((from & 0xfff) == 0xffe)
- {
- /* Found a candidate. Note we haven't checked the
- destination is within 4K here: if we do so (and
- don't create an entry in a8_relocs) we can't tell
- that a branch should have been relocated when
- scanning later. */
- if (num_a8_relocs == a8_reloc_table_size)
- {
- a8_reloc_table_size *= 2;
- a8_relocs = (struct a8_erratum_reloc *)
- bfd_realloc (a8_relocs,
- sizeof (struct a8_erratum_reloc)
- * a8_reloc_table_size);
- }
-
- a8_relocs[num_a8_relocs].from = from;
- a8_relocs[num_a8_relocs].destination = destination;
- a8_relocs[num_a8_relocs].r_type = r_type;
- a8_relocs[num_a8_relocs].branch_type = branch_type;
- a8_relocs[num_a8_relocs].sym_name = sym_name;
- a8_relocs[num_a8_relocs].non_a8_stub = created_stub;
- a8_relocs[num_a8_relocs].hash = hash;
-
- num_a8_relocs++;
- }
- }
+ sprintf (stub_entry->output_name,
+ ARM2THUMB_GLUE_ENTRY_NAME, sym_name);
+ else
+ sprintf (stub_entry->output_name, STUB_ENTRY_NAME,
+ sym_name);
+
+ stub_changed = TRUE;
+ }
+ while (0);
+
+ /* Look for relocations which might trigger Cortex-A8
+ erratum. */
+ if (htab->fix_cortex_a8
+ && (r_type == (unsigned int) R_ARM_THM_JUMP24
+ || r_type == (unsigned int) R_ARM_THM_JUMP19
+ || r_type == (unsigned int) R_ARM_THM_CALL
+ || r_type == (unsigned int) R_ARM_THM_XPC22))
+ {
+ bfd_vma from = section->output_section->vma
+ + section->output_offset
+ + irela->r_offset;
+
+ if ((from & 0xfff) == 0xffe)
+ {
+ /* Found a candidate. Note we haven't checked the
+ destination is within 4K here: if we do so (and
+ don't create an entry in a8_relocs) we can't tell
+ that a branch should have been relocated when
+ scanning later. */
+ if (num_a8_relocs == a8_reloc_table_size)
+ {
+ a8_reloc_table_size *= 2;
+ a8_relocs = (struct a8_erratum_reloc *)
+ bfd_realloc (a8_relocs,
+ sizeof (struct a8_erratum_reloc)
+ * a8_reloc_table_size);
+ }
+
+ a8_relocs[num_a8_relocs].from = from;
+ a8_relocs[num_a8_relocs].destination = destination;
+ a8_relocs[num_a8_relocs].r_type = r_type;
+ a8_relocs[num_a8_relocs].branch_type = branch_type;
+ a8_relocs[num_a8_relocs].sym_name = sym_name;
+ a8_relocs[num_a8_relocs].non_a8_stub = created_stub;
+ a8_relocs[num_a8_relocs].hash = hash;
+
+ num_a8_relocs++;
+ }
+ }
}
- /* We're done with the internal relocs, free them. */
- if (elf_section_data (section)->relocs == NULL)
- free (internal_relocs);
- }
+ /* We're done with the internal relocs, free them. */
+ if (elf_section_data (section)->relocs == NULL)
+ free (internal_relocs);
+ }
- if (htab->fix_cortex_a8)
+ if (htab->fix_cortex_a8)
{
- /* Sort relocs which might apply to Cortex-A8 erratum. */
- qsort (a8_relocs, num_a8_relocs,
+ /* Sort relocs which might apply to Cortex-A8 erratum. */
+ qsort (a8_relocs, num_a8_relocs,
sizeof (struct a8_erratum_reloc),
- &a8_reloc_compare);
+ &a8_reloc_compare);
- /* Scan for branches which might trigger Cortex-A8 erratum. */
- if (cortex_a8_erratum_scan (input_bfd, info, &a8_fixes,
+ /* Scan for branches which might trigger Cortex-A8 erratum. */
+ if (cortex_a8_erratum_scan (input_bfd, info, &a8_fixes,
&num_a8_fixes, &a8_fix_table_size,
a8_relocs, num_a8_relocs,
prev_num_a8_fixes, &stub_changed)
@@ -5328,7 +5328,7 @@ elf32_arm_size_stubs (bfd *output_bfd,
}
if (prev_num_a8_fixes != num_a8_fixes)
- stub_changed = TRUE;
+ stub_changed = TRUE;
if (!stub_changed)
break;
@@ -5350,18 +5350,18 @@ elf32_arm_size_stubs (bfd *output_bfd,
/* Add Cortex-A8 erratum veneers to stub section sizes too. */
if (htab->fix_cortex_a8)
- for (i = 0; i < num_a8_fixes; i++)
- {
+ for (i = 0; i < num_a8_fixes; i++)
+ {
stub_sec = elf32_arm_create_or_find_stub_sec (NULL,
a8_fixes[i].section, htab);
if (stub_sec == NULL)
goto error_ret_free_local;
- stub_sec->size
- += find_stub_size_and_template (a8_fixes[i].stub_type, NULL,
- NULL);
- }
+ stub_sec->size
+ += find_stub_size_and_template (a8_fixes[i].stub_type, NULL,
+ NULL);
+ }
/* Ask the linker to do its stuff. */
@@ -5372,47 +5372,47 @@ elf32_arm_size_stubs (bfd *output_bfd,
if (htab->fix_cortex_a8)
{
for (i = 0; i < num_a8_fixes; i++)
- {
- struct elf32_arm_stub_hash_entry *stub_entry;
- char *stub_name = a8_fixes[i].stub_name;
- asection *section = a8_fixes[i].section;
- unsigned int section_id = a8_fixes[i].section->id;
- asection *link_sec = htab->stub_group[section_id].link_sec;
- asection *stub_sec = htab->stub_group[section_id].stub_sec;
- const insn_sequence *template_sequence;
- int template_size, size = 0;
-
- stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
- TRUE, FALSE);
- if (stub_entry == NULL)
- {
- (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
- section->owner,
- stub_name);
- return FALSE;
- }
-
- stub_entry->stub_sec = stub_sec;
- stub_entry->stub_offset = 0;
- stub_entry->id_sec = link_sec;
- stub_entry->stub_type = a8_fixes[i].stub_type;
- stub_entry->target_section = a8_fixes[i].section;
- stub_entry->target_value = a8_fixes[i].offset;
- stub_entry->target_addend = a8_fixes[i].addend;
- stub_entry->orig_insn = a8_fixes[i].orig_insn;
+ {
+ struct elf32_arm_stub_hash_entry *stub_entry;
+ char *stub_name = a8_fixes[i].stub_name;
+ asection *section = a8_fixes[i].section;
+ unsigned int section_id = a8_fixes[i].section->id;
+ asection *link_sec = htab->stub_group[section_id].link_sec;
+ asection *stub_sec = htab->stub_group[section_id].stub_sec;
+ const insn_sequence *template_sequence;
+ int template_size, size = 0;
+
+ stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
+ TRUE, FALSE);
+ if (stub_entry == NULL)
+ {
+ (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
+ section->owner,
+ stub_name);
+ return FALSE;
+ }
+
+ stub_entry->stub_sec = stub_sec;
+ stub_entry->stub_offset = 0;
+ stub_entry->id_sec = link_sec;
+ stub_entry->stub_type = a8_fixes[i].stub_type;
+ stub_entry->target_section = a8_fixes[i].section;
+ stub_entry->target_value = a8_fixes[i].offset;
+ stub_entry->target_addend = a8_fixes[i].addend;
+ stub_entry->orig_insn = a8_fixes[i].orig_insn;
stub_entry->branch_type = a8_fixes[i].branch_type;
- size = find_stub_size_and_template (a8_fixes[i].stub_type,
- &template_sequence,
- &template_size);
+ size = find_stub_size_and_template (a8_fixes[i].stub_type,
+ &template_sequence,
+ &template_size);
- stub_entry->stub_size = size;
- stub_entry->stub_template = template_sequence;
- stub_entry->stub_template_size = template_size;
- }
+ stub_entry->stub_size = size;
+ stub_entry->stub_template = template_sequence;
+ stub_entry->stub_template_size = template_size;
+ }
/* Stash the Cortex-A8 erratum fix array for use later in
- elf32_arm_write_section(). */
+ elf32_arm_write_section(). */
htab->a8_erratum_fixes = a8_fixes;
htab->num_a8_erratum_fixes = num_a8_fixes;
}
@@ -5492,7 +5492,7 @@ find_thumb_glue (struct bfd_link_info *link_info,
return NULL;
tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
- + strlen (THUMB2ARM_GLUE_ENTRY_NAME) + 1);
+ + strlen (THUMB2ARM_GLUE_ENTRY_NAME) + 1);
BFD_ASSERT (tmp_name);
@@ -5528,7 +5528,7 @@ find_arm_glue (struct bfd_link_info *link_info,
return NULL;
tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
- + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
+ + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
BFD_ASSERT (tmp_name);
@@ -5595,12 +5595,12 @@ static const insn32 a2t3p_bx_r12_insn = 0xe12fff1c;
nop ldr r6, __func_addr
.arm mov lr, pc
b func bx r6
- .arm
- ;; back_to_thumb
- ldmia r13! {r6, lr}
- bx lr
- __func_addr:
- .word func */
+ .arm
+ ;; back_to_thumb
+ ldmia r13! {r6, lr}
+ bx lr
+ __func_addr:
+ .word func */
#define THUMB2ARM_GLUE_SIZE 8
static const insn16 t2a1_bx_pc_insn = 0x4778;
@@ -5697,7 +5697,7 @@ record_arm_to_thumb_glue (struct bfd_link_info * link_info,
BFD_ASSERT (s != NULL);
tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
- + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
+ + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
BFD_ASSERT (tmp_name);
@@ -5788,8 +5788,8 @@ record_arm_bx_glue (struct bfd_link_info * link_info, int reg)
bh = NULL;
val = globals->bx_glue_size;
_bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
- tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
- NULL, TRUE, FALSE, &bh);
+ tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
+ NULL, TRUE, FALSE, &bh);
myh = (struct elf_link_hash_entry *) bh;
myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
@@ -5812,7 +5812,7 @@ elf32_arm_section_map_add (asection *sec, char type, bfd_vma vma)
if (sec_data->map == NULL)
{
sec_data->map = (elf32_arm_section_map *)
- bfd_malloc (sizeof (elf32_arm_section_map));
+ bfd_malloc (sizeof (elf32_arm_section_map));
sec_data->mapcount = 0;
sec_data->mapsize = 1;
}
@@ -5823,8 +5823,8 @@ elf32_arm_section_map_add (asection *sec, char type, bfd_vma vma)
{
sec_data->mapsize *= 2;
sec_data->map = (elf32_arm_section_map *)
- bfd_realloc_or_free (sec_data->map, sec_data->mapsize
- * sizeof (elf32_arm_section_map));
+ bfd_realloc_or_free (sec_data->map, sec_data->mapsize
+ * sizeof (elf32_arm_section_map));
}
if (sec_data->map)
@@ -5840,10 +5840,10 @@ elf32_arm_section_map_add (asection *sec, char type, bfd_vma vma)
static bfd_vma
record_vfp11_erratum_veneer (struct bfd_link_info *link_info,
- elf32_vfp11_erratum_list *branch,
- bfd *branch_bfd,
- asection *branch_sec,
- unsigned int offset)
+ elf32_vfp11_erratum_list *branch,
+ bfd *branch_bfd,
+ asection *branch_sec,
+ unsigned int offset)
{
asection *s;
struct elf32_arm_link_hash_table *hash_table;
@@ -5866,7 +5866,7 @@ record_vfp11_erratum_veneer (struct bfd_link_info *link_info,
BFD_ASSERT (s != NULL);
tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
- (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
+ (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
BFD_ASSERT (tmp_name);
@@ -5881,8 +5881,8 @@ record_vfp11_erratum_veneer (struct bfd_link_info *link_info,
bh = NULL;
val = hash_table->vfp11_erratum_glue_size;
_bfd_generic_link_add_one_symbol (link_info, hash_table->bfd_of_glue_owner,
- tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
- NULL, TRUE, FALSE, &bh);
+ tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
+ NULL, TRUE, FALSE, &bh);
myh = (struct elf_link_hash_entry *) bh;
myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
@@ -5929,20 +5929,20 @@ record_vfp11_erratum_veneer (struct bfd_link_info *link_info,
{
bh = NULL;
/* FIXME: Creates an ARM symbol. Thumb mode will need attention if it
- ever requires this erratum fix. */
+ ever requires this erratum fix. */
_bfd_generic_link_add_one_symbol (link_info,
hash_table->bfd_of_glue_owner, "$a",
BSF_LOCAL, s, 0, NULL,
- TRUE, FALSE, &bh);
+ TRUE, FALSE, &bh);
myh = (struct elf_link_hash_entry *) bh;
myh->type = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
myh->forced_local = 1;
/* The elf32_arm_init_maps function only cares about symbols from input
- BFDs. We must make a note of this generated mapping symbol
- ourselves so that code byteswapping works properly in
- elf32_arm_write_section. */
+ BFDs. We must make a note of this generated mapping symbol
+ ourselves so that code byteswapping works properly in
+ elf32_arm_write_section. */
elf32_arm_section_map_add (s, 'a', 0);
}
@@ -6175,8 +6175,8 @@ bfd_elf32_arm_process_before_allocation (bfd *abfd,
{
case R_ARM_PC24:
/* This one is a call from arm code. We need to look up
- the target of the call. If it is a thumb target, we
- insert glue. */
+ the target of the call. If it is a thumb target, we
+ insert glue. */
if (h->target_internal == ST_BRANCH_TO_THUMB)
record_arm_to_thumb_glue (link_info, h);
break;
@@ -6248,15 +6248,15 @@ bfd_elf32_arm_init_maps (bfd *abfd)
const char *name;
if (sec != NULL
- && ELF_ST_BIND (isym->st_info) == STB_LOCAL)
- {
- name = bfd_elf_string_from_elf_section (abfd,
- hdr->sh_link, isym->st_name);
+ && ELF_ST_BIND (isym->st_info) == STB_LOCAL)
+ {
+ name = bfd_elf_string_from_elf_section (abfd,
+ hdr->sh_link, isym->st_name);
- if (bfd_is_arm_special_symbol_name (name,
+ if (bfd_is_arm_special_symbol_name (name,
BFD_ARM_SPECIAL_SYM_TYPE_MAP))
- elf32_arm_section_map_add (sec, name[1], isym->st_value);
- }
+ elf32_arm_section_map_add (sec, name[1], isym->st_value);
+ }
}
}
@@ -6298,17 +6298,17 @@ bfd_elf32_arm_set_vfp11_fix (bfd *obfd, struct bfd_link_info *link_info)
if (out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V7)
{
switch (globals->vfp11_fix)
- {
- case BFD_ARM_VFP11_FIX_DEFAULT:
- case BFD_ARM_VFP11_FIX_NONE:
- globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
- break;
-
- default:
- /* Give a warning, but do as the user requests anyway. */
- (*_bfd_error_handler) (_("%B: warning: selected VFP11 erratum "
- "workaround is not necessary for target architecture"), obfd);
- }
+ {
+ case BFD_ARM_VFP11_FIX_DEFAULT:
+ case BFD_ARM_VFP11_FIX_NONE:
+ globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
+ break;
+
+ default:
+ /* Give a warning, but do as the user requests anyway. */
+ (*_bfd_error_handler) (_("%B: warning: selected VFP11 erratum "
+ "workaround is not necessary for target architecture"), obfd);
+ }
}
else if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_DEFAULT)
/* For earlier architectures, we might need the workaround, but do not
@@ -6340,7 +6340,7 @@ enum bfd_arm_vfp11_pipe
static unsigned int
bfd_arm_vfp11_regno (unsigned int insn, bfd_boolean is_double, unsigned int rx,
- unsigned int x)
+ unsigned int x)
{
if (is_double)
return (((insn >> rx) & 0xf) | (((insn >> x) & 1) << 4)) + 32;
@@ -6372,15 +6372,15 @@ bfd_arm_vfp11_antidependency (unsigned int wmask, int *regs, int numregs)
unsigned int reg = regs[i];
if (reg < 32 && (wmask & (1 << reg)) != 0)
- return TRUE;
+ return TRUE;
reg -= 32;
if (reg >= 16)
- continue;
+ continue;
if ((wmask & (3 << (reg * 2))) != 0)
- return TRUE;
+ return TRUE;
}
return FALSE;
@@ -6396,7 +6396,7 @@ bfd_arm_vfp11_antidependency (unsigned int wmask, int *regs, int numregs)
static enum bfd_arm_vfp11_pipe
bfd_arm_vfp11_insn_decode (unsigned int insn, unsigned int *destmask, int *regs,
- int *numregs)
+ int *numregs)
{
enum bfd_arm_vfp11_pipe vpipe = VFP11_BAD;
bfd_boolean is_double = ((insn & 0xf00) == 0xb00) ? 1 : 0;
@@ -6408,96 +6408,96 @@ bfd_arm_vfp11_insn_decode (unsigned int insn, unsigned int *destmask, int *regs,
unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
pqrs = ((insn & 0x00800000) >> 20)
- | ((insn & 0x00300000) >> 19)
- | ((insn & 0x00000040) >> 6);
+ | ((insn & 0x00300000) >> 19)
+ | ((insn & 0x00000040) >> 6);
switch (pqrs)
- {
- case 0: /* fmac[sd]. */
- case 1: /* fnmac[sd]. */
- case 2: /* fmsc[sd]. */
- case 3: /* fnmsc[sd]. */
- vpipe = VFP11_FMAC;
- bfd_arm_vfp11_write_mask (destmask, fd);
- regs[0] = fd;
- regs[1] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
- regs[2] = fm;
- *numregs = 3;
- break;
-
- case 4: /* fmul[sd]. */
- case 5: /* fnmul[sd]. */
- case 6: /* fadd[sd]. */
- case 7: /* fsub[sd]. */
- vpipe = VFP11_FMAC;
- goto vfp_binop;
-
- case 8: /* fdiv[sd]. */
- vpipe = VFP11_DS;
- vfp_binop:
- bfd_arm_vfp11_write_mask (destmask, fd);
- regs[0] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
- regs[1] = fm;
- *numregs = 2;
- break;
-
- case 15: /* extended opcode. */
- {
- unsigned int extn = ((insn >> 15) & 0x1e)
- | ((insn >> 7) & 1);
-
- switch (extn)
- {
- case 0: /* fcpy[sd]. */
- case 1: /* fabs[sd]. */
- case 2: /* fneg[sd]. */
- case 8: /* fcmp[sd]. */
- case 9: /* fcmpe[sd]. */
- case 10: /* fcmpz[sd]. */
- case 11: /* fcmpez[sd]. */
- case 16: /* fuito[sd]. */
- case 17: /* fsito[sd]. */
- case 24: /* ftoui[sd]. */
- case 25: /* ftouiz[sd]. */
- case 26: /* ftosi[sd]. */
- case 27: /* ftosiz[sd]. */
- /* These instructions will not bounce due to underflow. */
- *numregs = 0;
- vpipe = VFP11_FMAC;
- break;
-
- case 3: /* fsqrt[sd]. */
- /* fsqrt cannot underflow, but it can (perhaps) overwrite
- registers to cause the erratum in previous instructions. */
- bfd_arm_vfp11_write_mask (destmask, fd);
- vpipe = VFP11_DS;
- break;
-
- case 15: /* fcvt{ds,sd}. */
- {
- int rnum = 0;
-
- bfd_arm_vfp11_write_mask (destmask, fd);
+ {
+ case 0: /* fmac[sd]. */
+ case 1: /* fnmac[sd]. */
+ case 2: /* fmsc[sd]. */
+ case 3: /* fnmsc[sd]. */
+ vpipe = VFP11_FMAC;
+ bfd_arm_vfp11_write_mask (destmask, fd);
+ regs[0] = fd;
+ regs[1] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
+ regs[2] = fm;
+ *numregs = 3;
+ break;
+
+ case 4: /* fmul[sd]. */
+ case 5: /* fnmul[sd]. */
+ case 6: /* fadd[sd]. */
+ case 7: /* fsub[sd]. */
+ vpipe = VFP11_FMAC;
+ goto vfp_binop;
+
+ case 8: /* fdiv[sd]. */
+ vpipe = VFP11_DS;
+ vfp_binop:
+ bfd_arm_vfp11_write_mask (destmask, fd);
+ regs[0] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
+ regs[1] = fm;
+ *numregs = 2;
+ break;
+
+ case 15: /* extended opcode. */
+ {
+ unsigned int extn = ((insn >> 15) & 0x1e)
+ | ((insn >> 7) & 1);
+
+ switch (extn)
+ {
+ case 0: /* fcpy[sd]. */
+ case 1: /* fabs[sd]. */
+ case 2: /* fneg[sd]. */
+ case 8: /* fcmp[sd]. */
+ case 9: /* fcmpe[sd]. */
+ case 10: /* fcmpz[sd]. */
+ case 11: /* fcmpez[sd]. */
+ case 16: /* fuito[sd]. */
+ case 17: /* fsito[sd]. */
+ case 24: /* ftoui[sd]. */
+ case 25: /* ftouiz[sd]. */
+ case 26: /* ftosi[sd]. */
+ case 27: /* ftosiz[sd]. */
+ /* These instructions will not bounce due to underflow. */
+ *numregs = 0;
+ vpipe = VFP11_FMAC;
+ break;
+
+ case 3: /* fsqrt[sd]. */
+ /* fsqrt cannot underflow, but it can (perhaps) overwrite
+ registers to cause the erratum in previous instructions. */
+ bfd_arm_vfp11_write_mask (destmask, fd);
+ vpipe = VFP11_DS;
+ break;
+
+ case 15: /* fcvt{ds,sd}. */
+ {
+ int rnum = 0;
+
+ bfd_arm_vfp11_write_mask (destmask, fd);
/* Only FCVTSD can underflow. */
- if ((insn & 0x100) != 0)
- regs[rnum++] = fm;
+ if ((insn & 0x100) != 0)
+ regs[rnum++] = fm;
- *numregs = rnum;
+ *numregs = rnum;
- vpipe = VFP11_FMAC;
- }
- break;
+ vpipe = VFP11_FMAC;
+ }
+ break;
- default:
- return VFP11_BAD;
- }
- }
- break;
+ default:
+ return VFP11_BAD;
+ }
+ }
+ break;
- default:
- return VFP11_BAD;
- }
+ default:
+ return VFP11_BAD;
+ }
}
/* Two-register transfer. */
else if ((insn & 0x0fe00ed0) == 0x0c400a10)
@@ -6506,13 +6506,13 @@ bfd_arm_vfp11_insn_decode (unsigned int insn, unsigned int *destmask, int *regs,
if ((insn & 0x100000) == 0)
{
- if (is_double)
- bfd_arm_vfp11_write_mask (destmask, fm);
- else
- {
- bfd_arm_vfp11_write_mask (destmask, fm);
- bfd_arm_vfp11_write_mask (destmask, fm + 1);
- }
+ if (is_double)
+ bfd_arm_vfp11_write_mask (destmask, fm);
+ else
+ {
+ bfd_arm_vfp11_write_mask (destmask, fm);
+ bfd_arm_vfp11_write_mask (destmask, fm + 1);
+ }
}
vpipe = VFP11_LS;
@@ -6523,32 +6523,32 @@ bfd_arm_vfp11_insn_decode (unsigned int insn, unsigned int *destmask, int *regs,
unsigned int puw = ((insn >> 21) & 0x1) | (((insn >> 23) & 3) << 1);
switch (puw)
- {
- case 0: /* Two-reg transfer. We should catch these above. */
- abort ();
+ {
+ case 0: /* Two-reg transfer. We should catch these above. */
+ abort ();
- case 2: /* fldm[sdx]. */
- case 3:
- case 5:
- {
- unsigned int i, offset = insn & 0xff;
+ case 2: /* fldm[sdx]. */
+ case 3:
+ case 5:
+ {
+ unsigned int i, offset = insn & 0xff;
- if (is_double)
- offset >>= 1;
+ if (is_double)
+ offset >>= 1;
- for (i = fd; i < fd + offset; i++)
- bfd_arm_vfp11_write_mask (destmask, i);
- }
- break;
+ for (i = fd; i < fd + offset; i++)
+ bfd_arm_vfp11_write_mask (destmask, i);
+ }
+ break;
- case 4: /* fld[sd]. */
- case 6:
- bfd_arm_vfp11_write_mask (destmask, fd);
- break;
+ case 4: /* fld[sd]. */
+ case 6:
+ bfd_arm_vfp11_write_mask (destmask, fd);
+ break;
- default:
- return VFP11_BAD;
- }
+ default:
+ return VFP11_BAD;
+ }
vpipe = VFP11_LS;
}
@@ -6559,18 +6559,18 @@ bfd_arm_vfp11_insn_decode (unsigned int insn, unsigned int *destmask, int *regs,
unsigned int fn = bfd_arm_vfp11_regno (insn, is_double, 16, 7);
switch (opcode)
- {
- case 0: /* fmsr/fmdlr. */
- case 1: /* fmdhr. */
- /* Mark fmdhr and fmdlr as writing to the whole of the DP
- destination register. I don't know if this is exactly right,
- but it is the conservative choice. */
- bfd_arm_vfp11_write_mask (destmask, fn);
- break;
-
- case 7: /* fmxr. */
- break;
- }
+ {
+ case 0: /* fmsr/fmdlr. */
+ case 1: /* fmdhr. */
+ /* Mark fmdhr and fmdlr as writing to the whole of the DP
+ destination register. I don't know if this is exactly right,
+ but it is the conservative choice. */
+ bfd_arm_vfp11_write_mask (destmask, fn);
+ break;
+
+ case 7: /* fmxr. */
+ break;
+ }
vpipe = VFP11_LS;
}
@@ -6604,23 +6604,23 @@ bfd_elf32_arm_vfp11_erratum_scan (bfd *abfd, struct bfd_link_info *link_info)
The states transition as follows:
0 -> 1 (vector) or 0 -> 2 (scalar)
- A VFP FMAC-pipeline instruction has been seen. Fill
- regs[0]..regs[numregs-1] with its input operands. Remember this
- instruction in 'first_fmac'.
+ A VFP FMAC-pipeline instruction has been seen. Fill
+ regs[0]..regs[numregs-1] with its input operands. Remember this
+ instruction in 'first_fmac'.
1 -> 2
- Any instruction, except for a VFP instruction which overwrites
- regs[*].
+ Any instruction, except for a VFP instruction which overwrites
+ regs[*].
1 -> 3 [ -> 0 ] or
2 -> 3 [ -> 0 ]
- A VFP instruction has been seen which overwrites any of regs[*].
- We must make a veneer! Reset state to 0 before examining next
- instruction.
+ A VFP instruction has been seen which overwrites any of regs[*].
+ We must make a veneer! Reset state to 0 before examining next
+ instruction.
2 -> 0
- If we fail to match anything in state 2, reset to state 0 and reset
- the instruction pointer to the instruction after 'first_fmac'.
+ If we fail to match anything in state 2, reset to state 0 and reset
+ the instruction pointer to the instruction after 'first_fmac'.
If the VFP11 vector mode is in use, there must be at least two unrelated
instructions between anti-dependent VFP11 instructions to properly avoid
@@ -6651,19 +6651,19 @@ bfd_elf32_arm_vfp11_erratum_scan (bfd *abfd, struct bfd_link_info *link_info)
struct _arm_elf_section_data *sec_data;
/* If we don't have executable progbits, we're not interested in this
- section. Also skip if section is to be excluded. */
+ section. Also skip if section is to be excluded. */
if (elf_section_type (sec) != SHT_PROGBITS
- || (elf_section_flags (sec) & SHF_EXECINSTR) == 0
- || (sec->flags & SEC_EXCLUDE) != 0
+ || (elf_section_flags (sec) & SHF_EXECINSTR) == 0
+ || (sec->flags & SEC_EXCLUDE) != 0
|| sec->sec_info_type == SEC_INFO_TYPE_JUST_SYMS
|| sec->output_section == bfd_abs_section_ptr
- || strcmp (sec->name, VFP11_ERRATUM_VENEER_SECTION_NAME) == 0)
- continue;
+ || strcmp (sec->name, VFP11_ERRATUM_VENEER_SECTION_NAME) == 0)
+ continue;
sec_data = elf32_arm_section_data (sec);
if (sec_data->mapcount == 0)
- continue;
+ continue;
if (elf_section_data (sec)->this_hdr.contents != NULL)
contents = elf_section_data (sec)->this_hdr.contents;
@@ -6674,122 +6674,122 @@ bfd_elf32_arm_vfp11_erratum_scan (bfd *abfd, struct bfd_link_info *link_info)
elf32_arm_compare_mapping);
for (span = 0; span < sec_data->mapcount; span++)
- {
- unsigned int span_start = sec_data->map[span].vma;
- unsigned int span_end = (span == sec_data->mapcount - 1)
+ {
+ unsigned int span_start = sec_data->map[span].vma;
+ unsigned int span_end = (span == sec_data->mapcount - 1)
? sec->size : sec_data->map[span + 1].vma;
- char span_type = sec_data->map[span].type;
-
- /* FIXME: Only ARM mode is supported at present. We may need to
- support Thumb-2 mode also at some point. */
- if (span_type != 'a')
- continue;
-
- for (i = span_start; i < span_end;)
- {
- unsigned int next_i = i + 4;
- unsigned int insn = bfd_big_endian (abfd)
- ? (contents[i] << 24)
- | (contents[i + 1] << 16)
- | (contents[i + 2] << 8)
- | contents[i + 3]
- : (contents[i + 3] << 24)
- | (contents[i + 2] << 16)
- | (contents[i + 1] << 8)
- | contents[i];
- unsigned int writemask = 0;
- enum bfd_arm_vfp11_pipe vpipe;
-
- switch (state)
- {
- case 0:
- vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask, regs,
- &numregs);
- /* I'm assuming the VFP11 erratum can trigger with denorm
- operands on either the FMAC or the DS pipeline. This might
- lead to slightly overenthusiastic veneer insertion. */
- if (vpipe == VFP11_FMAC || vpipe == VFP11_DS)
- {
- state = use_vector ? 1 : 2;
- first_fmac = i;
- veneer_of_insn = insn;
- }
- break;
-
- case 1:
- {
- int other_regs[3], other_numregs;
- vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
+ char span_type = sec_data->map[span].type;
+
+ /* FIXME: Only ARM mode is supported at present. We may need to
+ support Thumb-2 mode also at some point. */
+ if (span_type != 'a')
+ continue;
+
+ for (i = span_start; i < span_end;)
+ {
+ unsigned int next_i = i + 4;
+ unsigned int insn = bfd_big_endian (abfd)
+ ? (contents[i] << 24)
+ | (contents[i + 1] << 16)
+ | (contents[i + 2] << 8)
+ | contents[i + 3]
+ : (contents[i + 3] << 24)
+ | (contents[i + 2] << 16)
+ | (contents[i + 1] << 8)
+ | contents[i];
+ unsigned int writemask = 0;
+ enum bfd_arm_vfp11_pipe vpipe;
+
+ switch (state)
+ {
+ case 0:
+ vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask, regs,
+ &numregs);
+ /* I'm assuming the VFP11 erratum can trigger with denorm
+ operands on either the FMAC or the DS pipeline. This might
+ lead to slightly overenthusiastic veneer insertion. */
+ if (vpipe == VFP11_FMAC || vpipe == VFP11_DS)
+ {
+ state = use_vector ? 1 : 2;
+ first_fmac = i;
+ veneer_of_insn = insn;
+ }
+ break;
+
+ case 1:
+ {
+ int other_regs[3], other_numregs;
+ vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
other_regs,
- &other_numregs);
- if (vpipe != VFP11_BAD
- && bfd_arm_vfp11_antidependency (writemask, regs,
+ &other_numregs);
+ if (vpipe != VFP11_BAD
+ && bfd_arm_vfp11_antidependency (writemask, regs,
numregs))
- state = 3;
- else
- state = 2;
- }
- break;
-
- case 2:
- {
- int other_regs[3], other_numregs;
- vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
+ state = 3;
+ else
+ state = 2;
+ }
+ break;
+
+ case 2:
+ {
+ int other_regs[3], other_numregs;
+ vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
other_regs,
- &other_numregs);
- if (vpipe != VFP11_BAD
- && bfd_arm_vfp11_antidependency (writemask, regs,
+ &other_numregs);
+ if (vpipe != VFP11_BAD
+ && bfd_arm_vfp11_antidependency (writemask, regs,
numregs))
- state = 3;
- else
- {
- state = 0;
- next_i = first_fmac + 4;
- }
- }
- break;
-
- case 3:
- abort (); /* Should be unreachable. */
- }
-
- if (state == 3)
- {
- elf32_vfp11_erratum_list *newerr =(elf32_vfp11_erratum_list *)
- bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
-
- elf32_arm_section_data (sec)->erratumcount += 1;
-
- newerr->u.b.vfp_insn = veneer_of_insn;
-
- switch (span_type)
- {
- case 'a':
- newerr->type = VFP11_ERRATUM_BRANCH_TO_ARM_VENEER;
- break;
-
- default:
- abort ();
- }
-
- record_vfp11_erratum_veneer (link_info, newerr, abfd, sec,
+ state = 3;
+ else
+ {
+ state = 0;
+ next_i = first_fmac + 4;
+ }
+ }
+ break;
+
+ case 3:
+ abort (); /* Should be unreachable. */
+ }
+
+ if (state == 3)
+ {
+ elf32_vfp11_erratum_list *newerr =(elf32_vfp11_erratum_list *)
+ bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
+
+ elf32_arm_section_data (sec)->erratumcount += 1;
+
+ newerr->u.b.vfp_insn = veneer_of_insn;
+
+ switch (span_type)
+ {
+ case 'a':
+ newerr->type = VFP11_ERRATUM_BRANCH_TO_ARM_VENEER;
+ break;
+
+ default:
+ abort ();
+ }
+
+ record_vfp11_erratum_veneer (link_info, newerr, abfd, sec,
first_fmac);
- newerr->vma = -1;
+ newerr->vma = -1;
- newerr->next = sec_data->erratumlist;
- sec_data->erratumlist = newerr;
+ newerr->next = sec_data->erratumlist;
+ sec_data->erratumlist = newerr;
- state = 0;
- }
+ state = 0;
+ }
- i = next_i;
- }
- }
+ i = next_i;
+ }
+ }
if (contents != NULL
- && elf_section_data (sec)->this_hdr.contents != contents)
- free (contents);
+ && elf_section_data (sec)->this_hdr.contents != contents)
+ free (contents);
contents = NULL;
}
@@ -6826,7 +6826,7 @@ bfd_elf32_arm_vfp11_fix_veneer_locations (bfd *abfd,
return;
tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
- (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
+ (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
for (sec = abfd->sections; sec != NULL; sec = sec->next)
{
@@ -6834,56 +6834,56 @@ bfd_elf32_arm_vfp11_fix_veneer_locations (bfd *abfd,
elf32_vfp11_erratum_list *errnode = sec_data->erratumlist;
for (; errnode != NULL; errnode = errnode->next)
- {
- struct elf_link_hash_entry *myh;
- bfd_vma vma;
-
- switch (errnode->type)
- {
- case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
- case VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER:
- /* Find veneer symbol. */
- sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
+ {
+ struct elf_link_hash_entry *myh;
+ bfd_vma vma;
+
+ switch (errnode->type)
+ {
+ case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
+ case VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER:
+ /* Find veneer symbol. */
+ sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
errnode->u.b.veneer->u.v.id);
- myh = elf_link_hash_lookup
- (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
+ myh = elf_link_hash_lookup
+ (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
- if (myh == NULL)
- (*_bfd_error_handler) (_("%B: unable to find VFP11 veneer "
- "`%s'"), abfd, tmp_name);
+ if (myh == NULL)
+ (*_bfd_error_handler) (_("%B: unable to find VFP11 veneer "
+ "`%s'"), abfd, tmp_name);
- vma = myh->root.u.def.section->output_section->vma
- + myh->root.u.def.section->output_offset
- + myh->root.u.def.value;
+ vma = myh->root.u.def.section->output_section->vma
+ + myh->root.u.def.section->output_offset
+ + myh->root.u.def.value;
- errnode->u.b.veneer->vma = vma;
- break;
+ errnode->u.b.veneer->vma = vma;
+ break;
case VFP11_ERRATUM_ARM_VENEER:
- case VFP11_ERRATUM_THUMB_VENEER:
- /* Find return location. */
- sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
- errnode->u.v.id);
+ case VFP11_ERRATUM_THUMB_VENEER:
+ /* Find return location. */
+ sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
+ errnode->u.v.id);
- myh = elf_link_hash_lookup
- (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
+ myh = elf_link_hash_lookup
+ (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
- if (myh == NULL)
- (*_bfd_error_handler) (_("%B: unable to find VFP11 veneer "
+ if (myh == NULL)
+ (*_bfd_error_handler) (_("%B: unable to find VFP11 veneer "
"`%s'"), abfd, tmp_name);
- vma = myh->root.u.def.section->output_section->vma
- + myh->root.u.def.section->output_offset
- + myh->root.u.def.value;
+ vma = myh->root.u.def.section->output_section->vma
+ + myh->root.u.def.section->output_offset
+ + myh->root.u.def.value;
- errnode->u.v.branch->vma = vma;
- break;
+ errnode->u.v.branch->vma = vma;
+ break;
- default:
- abort ();
- }
- }
+ default:
+ abort ();
+ }
+ }
}
free (tmp_name);
@@ -6897,9 +6897,9 @@ bfd_elf32_arm_set_target_relocs (struct bfd *output_bfd,
struct bfd_link_info *link_info,
int target1_is_rel,
char * target2_type,
- int fix_v4bx,
+ int fix_v4bx,
int use_blx,
- bfd_arm_vfp11_fix vfp11_fix,
+ bfd_arm_vfp11_fix vfp11_fix,
int no_enum_warn, int no_wchar_warn,
int pic_veneer, int fix_cortex_a8,
int fix_arm1176)
@@ -7394,6 +7394,10 @@ elf32_arm_allocate_plt_entry (struct bfd_link_info *info,
splt = htab->root.iplt;
sgotplt = htab->root.igotplt;
+ /* NaCl uses a special first entry in .iplt too. */
+ if (htab->nacl_p && splt->size == 0)
+ splt->size += htab->plt_header_size;
+
/* Allocate room for an R_ARM_IRELATIVE relocation in .rel.iplt. */
elf32_arm_allocate_irelocs (info, htab->root.irelplt, 1);
}
@@ -7616,7 +7620,7 @@ elf32_arm_populate_plt_entry (bfd *output_bfd, struct bfd_link_info *info,
in the GOT. The offset accounts for the value produced by
adding to pc in the penultimate instruction of the PLT stub. */
got_displacement = (got_address
- - (plt_address + htab->plt_entry_size));
+ - (plt_address + htab->plt_entry_size));
/* NaCl does not support interworking at all. */
BFD_ASSERT (!elf32_arm_plt_needs_thumb_stub_p (info, arm_plt));
@@ -7930,28 +7934,28 @@ calculate_group_reloc_mask (bfd_vma value, int n, bfd_vma *final_residual)
/* Calculate which part of the value to mask. */
if (residual == 0)
- shift = 0;
+ shift = 0;
else
- {
- int msb;
-
- /* Determine the most significant bit in the residual and
- align the resulting value to a 2-bit boundary. */
- for (msb = 30; msb >= 0; msb -= 2)
- if (residual & (3 << msb))
- break;
-
- /* The desired shift is now (msb - 6), or zero, whichever
- is the greater. */
- shift = msb - 6;
- if (shift < 0)
- shift = 0;
- }
+ {
+ int msb;
+
+ /* Determine the most significant bit in the residual and
+ align the resulting value to a 2-bit boundary. */
+ for (msb = 30; msb >= 0; msb -= 2)
+ if (residual & (3 << msb))
+ break;
+
+ /* The desired shift is now (msb - 6), or zero, whichever
+ is the greater. */
+ shift = msb - 6;
+ if (shift < 0)
+ shift = 0;
+ }
/* Calculate g_n in 32-bit as well as encoded constant+rotation form. */
g_n = residual & (0xff << shift);
encoded_g_n = (g_n >> shift)
- | ((g_n <= 0xff ? 0 : (32 - shift) / 2) << 8);
+ | ((g_n <= 0xff ? 0 : (32 - shift) / 2) << 8);
/* Calculate the residual for the next time around. */
residual &= ~g_n;
@@ -8163,7 +8167,7 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto,
branches in this object should go to it, except if the PLT is too
far away, in which case a long branch stub should be inserted. */
if ((r_type != R_ARM_ABS32 && r_type != R_ARM_REL32
- && r_type != R_ARM_ABS32_NOI && r_type != R_ARM_REL32_NOI
+ && r_type != R_ARM_ABS32_NOI && r_type != R_ARM_REL32_NOI
&& r_type != R_ARM_CALL
&& r_type != R_ARM_JUMP24
&& r_type != R_ARM_PLT32)
@@ -8580,36 +8584,36 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto,
bfd_signed_vma relocation;
insn = (bfd_get_16 (input_bfd, hit_data) << 16)
- | bfd_get_16 (input_bfd, hit_data + 2);
+ | bfd_get_16 (input_bfd, hit_data + 2);
- if (globals->use_rel)
- {
- signed_addend = (insn & 0xff) | ((insn & 0x7000) >> 4)
- | ((insn & (1 << 26)) >> 15);
- if (insn & 0xf00000)
- signed_addend = -signed_addend;
- }
+ if (globals->use_rel)
+ {
+ signed_addend = (insn & 0xff) | ((insn & 0x7000) >> 4)
+ | ((insn & (1 << 26)) >> 15);
+ if (insn & 0xf00000)
+ signed_addend = -signed_addend;
+ }
relocation = value + signed_addend;
relocation -= Pa (input_section->output_section->vma
- + input_section->output_offset
- + rel->r_offset);
+ + input_section->output_offset
+ + rel->r_offset);
- value = abs (relocation);
+ value = abs (relocation);
- if (value >= 0x1000)
- return bfd_reloc_overflow;
+ if (value >= 0x1000)
+ return bfd_reloc_overflow;
insn = (insn & 0xfb0f8f00) | (value & 0xff)
- | ((value & 0x700) << 4)
- | ((value & 0x800) << 15);
- if (relocation < 0)
- insn |= 0xa00000;
+ | ((value & 0x700) << 4)
+ | ((value & 0x800) << 15);
+ if (relocation < 0)
+ insn |= 0xa00000;
bfd_put_16 (input_bfd, insn >> 16, hit_data);
bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
- return bfd_reloc_ok;
+ return bfd_reloc_ok;
}
case R_ARM_THM_PC8:
@@ -8622,15 +8626,15 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto,
insn = bfd_get_16 (input_bfd, hit_data);
- if (globals->use_rel)
+ if (globals->use_rel)
addend = ((((insn & 0x00ff) << 2) + 4) & 0x3ff) -4;
relocation = value + addend;
relocation -= Pa (input_section->output_section->vma
- + input_section->output_offset
- + rel->r_offset);
+ + input_section->output_offset
+ + rel->r_offset);
- value = abs (relocation);
+ value = abs (relocation);
/* We do not check for overflow of this reloc. Although strictly
speaking this is incorrect, it appears to be necessary in order
@@ -8643,7 +8647,7 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto,
bfd_put_16 (input_bfd, insn, hit_data);
- return bfd_reloc_ok;
+ return bfd_reloc_ok;
}
case R_ARM_THM_PC12:
@@ -8653,33 +8657,33 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto,
bfd_signed_vma relocation;
insn = (bfd_get_16 (input_bfd, hit_data) << 16)
- | bfd_get_16 (input_bfd, hit_data + 2);
+ | bfd_get_16 (input_bfd, hit_data + 2);
- if (globals->use_rel)
- {
- signed_addend = insn & 0xfff;
- if (!(insn & (1 << 23)))
- signed_addend = -signed_addend;
- }
+ if (globals->use_rel)
+ {
+ signed_addend = insn & 0xfff;
+ if (!(insn & (1 << 23)))
+ signed_addend = -signed_addend;
+ }
relocation = value + signed_addend;
relocation -= Pa (input_section->output_section->vma
- + input_section->output_offset
- + rel->r_offset);
+ + input_section->output_offset
+ + rel->r_offset);
- value = abs (relocation);
+ value = abs (relocation);
- if (value >= 0x1000)
- return bfd_reloc_overflow;
+ if (value >= 0x1000)
+ return bfd_reloc_overflow;
insn = (insn & 0xff7ff000) | value;
- if (relocation >= 0)
- insn |= (1 << 23);
+ if (relocation >= 0)
+ insn |= (1 << 23);
bfd_put_16 (input_bfd, insn >> 16, hit_data);
bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
- return bfd_reloc_ok;
+ return bfd_reloc_ok;
}
case R_ARM_THM_XPC22:
@@ -8688,7 +8692,7 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto,
/* Thumb BL (branch long instruction). */
{
bfd_vma relocation;
- bfd_vma reloc_sign;
+ bfd_vma reloc_sign;
bfd_boolean overflow = FALSE;
bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
@@ -8720,20 +8724,20 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto,
}
/* Fetch the addend. We use the Thumb-2 encoding (backwards compatible
- with Thumb-1) involving the J1 and J2 bits. */
+ with Thumb-1) involving the J1 and J2 bits. */
if (globals->use_rel)
{
- bfd_vma s = (upper_insn & (1 << 10)) >> 10;
- bfd_vma upper = upper_insn & 0x3ff;
- bfd_vma lower = lower_insn & 0x7ff;
+ bfd_vma s = (upper_insn & (1 << 10)) >> 10;
+ bfd_vma upper = upper_insn & 0x3ff;
+ bfd_vma lower = lower_insn & 0x7ff;
bfd_vma j1 = (lower_insn & (1 << 13)) >> 13;
bfd_vma j2 = (lower_insn & (1 << 11)) >> 11;
- bfd_vma i1 = j1 ^ s ? 0 : 1;
- bfd_vma i2 = j2 ^ s ? 0 : 1;
+ bfd_vma i1 = j1 ^ s ? 0 : 1;
+ bfd_vma i2 = j2 ^ s ? 0 : 1;
- addend = (i1 << 23) | (i2 << 22) | (upper << 12) | (lower << 1);
- /* Sign extend. */
- addend = (addend | ((s ? 0 : 1) << 24)) - (1 << 24);
+ addend = (i1 << 23) | (i2 << 22) | (upper << 12) | (lower << 1);
+ /* Sign extend. */
+ addend = (addend | ((s ? 0 : 1) << 24)) - (1 << 24);
signed_addend = addend;
}
@@ -8890,14 +8894,14 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto,
/* Put RELOCATION back into the insn. Assumes two's complement.
We use the Thumb-2 encoding, which is safe even if dealing with
a Thumb-1 instruction by virtue of our overflow check above. */
- reloc_sign = (signed_check < 0) ? 1 : 0;
+ reloc_sign = (signed_check < 0) ? 1 : 0;
upper_insn = (upper_insn & ~(bfd_vma) 0x7ff)
- | ((relocation >> 12) & 0x3ff)
- | (reloc_sign << 10);
+ | ((relocation >> 12) & 0x3ff)
+ | (reloc_sign << 10);
lower_insn = (lower_insn & ~(bfd_vma) 0x2fff)
- | (((!((relocation >> 23) & 1)) ^ reloc_sign) << 13)
- | (((!((relocation >> 22) & 1)) ^ reloc_sign) << 11)
- | ((relocation >> 1) & 0x7ff);
+ | (((!((relocation >> 23) & 1)) ^ reloc_sign) << 13)
+ | (((!((relocation >> 22) & 1)) ^ reloc_sign) << 11)
+ | ((relocation >> 1) & 0x7ff);
/* Put the relocated value back in the object file: */
bfd_put_16 (input_bfd, upper_insn, hit_data);
@@ -8951,7 +8955,7 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto,
/* ??? Should handle interworking? GCC might someday try to
use this for tail calls. */
- relocation = value + signed_addend;
+ relocation = value + signed_addend;
relocation -= (input_section->output_section->vma
+ input_section->output_offset
+ rel->r_offset);
@@ -9066,11 +9070,11 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto,
case R_ARM_GOTOFF32:
/* Relocation is relative to the start of the
- global offset table. */
+ global offset table. */
BFD_ASSERT (sgot != NULL);
if (sgot == NULL)
- return bfd_reloc_notsupported;
+ return bfd_reloc_notsupported;
/* If we are addressing a Thumb function, we need to adjust the
address by one, so that attempts to call the function pointer will
@@ -9079,10 +9083,10 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto,
value += 1;
/* Note that sgot->output_offset is not involved in this
- calculation. We always want the start of .got. If we
- define _GLOBAL_OFFSET_TABLE in a different way, as is
- permitted by the ABI, we might have to change this
- calculation. */
+ calculation. We always want the start of .got. If we
+ define _GLOBAL_OFFSET_TABLE in a different way, as is
+ permitted by the ABI, we might have to change this
+ calculation. */
value -= sgot->output_section->vma;
return _bfd_final_link_relocate (howto, input_bfd, input_section,
contents, rel->r_offset, value,
@@ -9093,7 +9097,7 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto,
BFD_ASSERT (sgot != NULL);
if (sgot == NULL)
- return bfd_reloc_notsupported;
+ return bfd_reloc_notsupported;
*unresolved_reloc_p = FALSE;
value = sgot->output_section->vma;
@@ -9104,7 +9108,7 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto,
case R_ARM_GOT32:
case R_ARM_GOT_PREL:
/* Relocation is to the entry for this symbol in the
- global offset table. */
+ global offset table. */
if (sgot == NULL)
return bfd_reloc_notsupported;
@@ -9157,13 +9161,13 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto,
else
{
if (dynreloc_st_type == STT_GNU_IFUNC)
- outrel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
+ outrel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
else if (info->shared &&
(ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
|| h->root.type != bfd_link_hash_undefweak))
- outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
- else
- outrel.r_info = 0;
+ outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
+ else
+ outrel.r_info = 0;
outrel.r_addend = dynreloc_value;
}
@@ -9216,7 +9220,7 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto,
+ sgot->output_offset
+ off);
if (dynreloc_st_type == STT_GNU_IFUNC)
- outrel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
+ outrel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
else
outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
@@ -9362,7 +9366,7 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto,
BFD_ASSERT ((h && (h->root.type == bfd_link_hash_undefweak))
|| info->shared);
BFD_ASSERT (globals->sgotplt_jump_table_size + offplt + 8
- <= globals->root.sgotplt->size);
+ <= globals->root.sgotplt->size);
outrel.r_addend = 0;
outrel.r_offset = (globals->root.sgotplt->output_section->vma
@@ -9375,7 +9379,7 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto,
loc = sreloc->contents;
loc += globals->next_tls_desc_index++ * RELOC_SIZE (globals);
BFD_ASSERT (loc + RELOC_SIZE (globals)
- <= sreloc->contents + sreloc->size);
+ <= sreloc->contents + sreloc->size);
SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
@@ -9383,7 +9387,7 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto,
the relocation index and the top bit set, or zero,
if we're binding now. For locals, it gets the
symbol's offset in the tls section. */
- bfd_put_32 (output_bfd,
+ bfd_put_32 (output_bfd,
!h ? value - elf_hash_table (info)->tls_sec->vma
: info->flags & DF_BIND_NOW ? 0
: 0x80000000 | ELF32_R_SYM (outrel.r_info),
@@ -9391,7 +9395,7 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto,
+ globals->sgotplt_jump_table_size);
/* Second word in the relocation is always zero. */
- bfd_put_32 (output_bfd, 0,
+ bfd_put_32 (output_bfd, 0,
globals->root.sgotplt->contents + offplt
+ globals->sgotplt_jump_table_size + 4);
}
@@ -9707,13 +9711,13 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto,
+ input_section->output_offset + rel->r_offset);
if (r_type == R_ARM_MOVW_BREL && value >= 0x10000)
- return bfd_reloc_overflow;
+ return bfd_reloc_overflow;
if (branch_type == ST_BRANCH_TO_THUMB)
value |= 1;
if (r_type == R_ARM_MOVT_ABS || r_type == R_ARM_MOVT_PREL
- || r_type == R_ARM_MOVT_BREL)
+ || r_type == R_ARM_MOVT_BREL)
value >>= 16;
insn &= 0xfff0f000;
@@ -9757,13 +9761,13 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto,
+ input_section->output_offset + rel->r_offset);
if (r_type == R_ARM_THM_MOVW_BREL && value >= 0x10000)
- return bfd_reloc_overflow;
+ return bfd_reloc_overflow;
if (branch_type == ST_BRANCH_TO_THUMB)
value |= 1;
if (r_type == R_ARM_THM_MOVT_ABS || r_type == R_ARM_THM_MOVT_PREL
- || r_type == R_ARM_THM_MOVT_BREL)
+ || r_type == R_ARM_THM_MOVT_BREL)
value >>= 16;
insn &= 0xfbf08f00;
@@ -9789,129 +9793,129 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto,
case R_ARM_ALU_SB_G2:
{
bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
- bfd_vma pc = input_section->output_section->vma
+ bfd_vma pc = input_section->output_section->vma
+ input_section->output_offset + rel->r_offset;
- /* sb should be the origin of the *segment* containing the symbol.
- It is not clear how to obtain this OS-dependent value, so we
- make an arbitrary choice of zero. */
- bfd_vma sb = 0;
- bfd_vma residual;
- bfd_vma g_n;
+ /* sb should be the origin of the *segment* containing the symbol.
+ It is not clear how to obtain this OS-dependent value, so we
+ make an arbitrary choice of zero. */
+ bfd_vma sb = 0;
+ bfd_vma residual;
+ bfd_vma g_n;
bfd_signed_vma signed_value;
- int group = 0;
-
- /* Determine which group of bits to select. */
- switch (r_type)
- {
- case R_ARM_ALU_PC_G0_NC:
- case R_ARM_ALU_PC_G0:
- case R_ARM_ALU_SB_G0_NC:
- case R_ARM_ALU_SB_G0:
- group = 0;
- break;
-
- case R_ARM_ALU_PC_G1_NC:
- case R_ARM_ALU_PC_G1:
- case R_ARM_ALU_SB_G1_NC:
- case R_ARM_ALU_SB_G1:
- group = 1;
- break;
-
- case R_ARM_ALU_PC_G2:
- case R_ARM_ALU_SB_G2:
- group = 2;
- break;
-
- default:
- abort ();
- }
-
- /* If REL, extract the addend from the insn. If RELA, it will
- have already been fetched for us. */
+ int group = 0;
+
+ /* Determine which group of bits to select. */
+ switch (r_type)
+ {
+ case R_ARM_ALU_PC_G0_NC:
+ case R_ARM_ALU_PC_G0:
+ case R_ARM_ALU_SB_G0_NC:
+ case R_ARM_ALU_SB_G0:
+ group = 0;
+ break;
+
+ case R_ARM_ALU_PC_G1_NC:
+ case R_ARM_ALU_PC_G1:
+ case R_ARM_ALU_SB_G1_NC:
+ case R_ARM_ALU_SB_G1:
+ group = 1;
+ break;
+
+ case R_ARM_ALU_PC_G2:
+ case R_ARM_ALU_SB_G2:
+ group = 2;
+ break;
+
+ default:
+ abort ();
+ }
+
+ /* If REL, extract the addend from the insn. If RELA, it will
+ have already been fetched for us. */
if (globals->use_rel)
- {
- int negative;
- bfd_vma constant = insn & 0xff;
- bfd_vma rotation = (insn & 0xf00) >> 8;
-
- if (rotation == 0)
- signed_addend = constant;
- else
- {
- /* Compensate for the fact that in the instruction, the
- rotation is stored in multiples of 2 bits. */
- rotation *= 2;
-
- /* Rotate "constant" right by "rotation" bits. */
- signed_addend = (constant >> rotation) |
- (constant << (8 * sizeof (bfd_vma) - rotation));
- }
-
- /* Determine if the instruction is an ADD or a SUB.
- (For REL, this determines the sign of the addend.) */
- negative = identify_add_or_sub (insn);
- if (negative == 0)
- {
- (*_bfd_error_handler)
- (_("%B(%A+0x%lx): Only ADD or SUB instructions are allowed for ALU group relocations"),
- input_bfd, input_section,
- (long) rel->r_offset, howto->name);
- return bfd_reloc_overflow;
- }
-
- signed_addend *= negative;
- }
+ {
+ int negative;
+ bfd_vma constant = insn & 0xff;
+ bfd_vma rotation = (insn & 0xf00) >> 8;
+
+ if (rotation == 0)
+ signed_addend = constant;
+ else
+ {
+ /* Compensate for the fact that in the instruction, the
+ rotation is stored in multiples of 2 bits. */
+ rotation *= 2;
+
+ /* Rotate "constant" right by "rotation" bits. */
+ signed_addend = (constant >> rotation) |
+ (constant << (8 * sizeof (bfd_vma) - rotation));
+ }
+
+ /* Determine if the instruction is an ADD or a SUB.
+ (For REL, this determines the sign of the addend.) */
+ negative = identify_add_or_sub (insn);
+ if (negative == 0)
+ {
+ (*_bfd_error_handler)
+ (_("%B(%A+0x%lx): Only ADD or SUB instructions are allowed for ALU group relocations"),
+ input_bfd, input_section,
+ (long) rel->r_offset, howto->name);
+ return bfd_reloc_overflow;
+ }
+
+ signed_addend *= negative;
+ }
/* Compute the value (X) to go in the place. */
- if (r_type == R_ARM_ALU_PC_G0_NC
- || r_type == R_ARM_ALU_PC_G1_NC
- || r_type == R_ARM_ALU_PC_G0
- || r_type == R_ARM_ALU_PC_G1
- || r_type == R_ARM_ALU_PC_G2)
- /* PC relative. */
- signed_value = value - pc + signed_addend;
- else
- /* Section base relative. */
- signed_value = value - sb + signed_addend;
-
- /* If the target symbol is a Thumb function, then set the
- Thumb bit in the address. */
+ if (r_type == R_ARM_ALU_PC_G0_NC
+ || r_type == R_ARM_ALU_PC_G1_NC
+ || r_type == R_ARM_ALU_PC_G0
+ || r_type == R_ARM_ALU_PC_G1
+ || r_type == R_ARM_ALU_PC_G2)
+ /* PC relative. */
+ signed_value = value - pc + signed_addend;
+ else
+ /* Section base relative. */
+ signed_value = value - sb + signed_addend;
+
+ /* If the target symbol is a Thumb function, then set the
+ Thumb bit in the address. */
if (branch_type == ST_BRANCH_TO_THUMB)
signed_value |= 1;
- /* Calculate the value of the relevant G_n, in encoded
- constant-with-rotation format. */
- g_n = calculate_group_reloc_mask (abs (signed_value), group,
- &residual);
-
- /* Check for overflow if required. */
- if ((r_type == R_ARM_ALU_PC_G0
- || r_type == R_ARM_ALU_PC_G1
- || r_type == R_ARM_ALU_PC_G2
- || r_type == R_ARM_ALU_SB_G0
- || r_type == R_ARM_ALU_SB_G1
- || r_type == R_ARM_ALU_SB_G2) && residual != 0)
- {
- (*_bfd_error_handler)
- (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
- input_bfd, input_section,
- (long) rel->r_offset, abs (signed_value), howto->name);
- return bfd_reloc_overflow;
- }
-
- /* Mask out the value and the ADD/SUB part of the opcode; take care
- not to destroy the S bit. */
- insn &= 0xff1ff000;
-
- /* Set the opcode according to whether the value to go in the
- place is negative. */
- if (signed_value < 0)
- insn |= 1 << 22;
- else
- insn |= 1 << 23;
-
- /* Encode the offset. */
- insn |= g_n;
+ /* Calculate the value of the relevant G_n, in encoded
+ constant-with-rotation format. */
+ g_n = calculate_group_reloc_mask (abs (signed_value), group,
+ &residual);
+
+ /* Check for overflow if required. */
+ if ((r_type == R_ARM_ALU_PC_G0
+ || r_type == R_ARM_ALU_PC_G1
+ || r_type == R_ARM_ALU_PC_G2
+ || r_type == R_ARM_ALU_SB_G0
+ || r_type == R_ARM_ALU_SB_G1
+ || r_type == R_ARM_ALU_SB_G2) && residual != 0)
+ {
+ (*_bfd_error_handler)
+ (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
+ input_bfd, input_section,
+ (long) rel->r_offset, abs (signed_value), howto->name);
+ return bfd_reloc_overflow;
+ }
+
+ /* Mask out the value and the ADD/SUB part of the opcode; take care
+ not to destroy the S bit. */
+ insn &= 0xff1ff000;
+
+ /* Set the opcode according to whether the value to go in the
+ place is negative. */
+ if (signed_value < 0)
+ insn |= 1 << 22;
+ else
+ insn |= 1 << 23;
+
+ /* Encode the offset. */
+ insn |= g_n;
bfd_put_32 (input_bfd, insn, hit_data);
}
@@ -9925,76 +9929,76 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto,
case R_ARM_LDR_SB_G2:
{
bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
- bfd_vma pc = input_section->output_section->vma
+ bfd_vma pc = input_section->output_section->vma
+ input_section->output_offset + rel->r_offset;
- bfd_vma sb = 0; /* See note above. */
- bfd_vma residual;
+ bfd_vma sb = 0; /* See note above. */
+ bfd_vma residual;
bfd_signed_vma signed_value;
- int group = 0;
-
- /* Determine which groups of bits to calculate. */
- switch (r_type)
- {
- case R_ARM_LDR_PC_G0:
- case R_ARM_LDR_SB_G0:
- group = 0;
- break;
-
- case R_ARM_LDR_PC_G1:
- case R_ARM_LDR_SB_G1:
- group = 1;
- break;
-
- case R_ARM_LDR_PC_G2:
- case R_ARM_LDR_SB_G2:
- group = 2;
- break;
-
- default:
- abort ();
- }
-
- /* If REL, extract the addend from the insn. If RELA, it will
- have already been fetched for us. */
+ int group = 0;
+
+ /* Determine which groups of bits to calculate. */
+ switch (r_type)
+ {
+ case R_ARM_LDR_PC_G0:
+ case R_ARM_LDR_SB_G0:
+ group = 0;
+ break;
+
+ case R_ARM_LDR_PC_G1:
+ case R_ARM_LDR_SB_G1:
+ group = 1;
+ break;
+
+ case R_ARM_LDR_PC_G2:
+ case R_ARM_LDR_SB_G2:
+ group = 2;
+ break;
+
+ default:
+ abort ();
+ }
+
+ /* If REL, extract the addend from the insn. If RELA, it will
+ have already been fetched for us. */
if (globals->use_rel)
- {
- int negative = (insn & (1 << 23)) ? 1 : -1;
- signed_addend = negative * (insn & 0xfff);
- }
+ {
+ int negative = (insn & (1 << 23)) ? 1 : -1;
+ signed_addend = negative * (insn & 0xfff);
+ }
/* Compute the value (X) to go in the place. */
- if (r_type == R_ARM_LDR_PC_G0
- || r_type == R_ARM_LDR_PC_G1
- || r_type == R_ARM_LDR_PC_G2)
- /* PC relative. */
- signed_value = value - pc + signed_addend;
- else
- /* Section base relative. */
- signed_value = value - sb + signed_addend;
-
- /* Calculate the value of the relevant G_{n-1} to obtain
- the residual at that stage. */
- calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
-
- /* Check for overflow. */
- if (residual >= 0x1000)
- {
- (*_bfd_error_handler)
- (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
- input_bfd, input_section,
- (long) rel->r_offset, abs (signed_value), howto->name);
- return bfd_reloc_overflow;
- }
-
- /* Mask out the value and U bit. */
- insn &= 0xff7ff000;
-
- /* Set the U bit if the value to go in the place is non-negative. */
- if (signed_value >= 0)
- insn |= 1 << 23;
-
- /* Encode the offset. */
- insn |= residual;
+ if (r_type == R_ARM_LDR_PC_G0
+ || r_type == R_ARM_LDR_PC_G1
+ || r_type == R_ARM_LDR_PC_G2)
+ /* PC relative. */
+ signed_value = value - pc + signed_addend;
+ else
+ /* Section base relative. */
+ signed_value = value - sb + signed_addend;
+
+ /* Calculate the value of the relevant G_{n-1} to obtain
+ the residual at that stage. */
+ calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
+
+ /* Check for overflow. */
+ if (residual >= 0x1000)
+ {
+ (*_bfd_error_handler)
+ (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
+ input_bfd, input_section,
+ (long) rel->r_offset, abs (signed_value), howto->name);
+ return bfd_reloc_overflow;
+ }
+
+ /* Mask out the value and U bit. */
+ insn &= 0xff7ff000;
+
+ /* Set the U bit if the value to go in the place is non-negative. */
+ if (signed_value >= 0)
+ insn |= 1 << 23;
+
+ /* Encode the offset. */
+ insn |= residual;
bfd_put_32 (input_bfd, insn, hit_data);
}
@@ -10008,76 +10012,76 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto,
case R_ARM_LDRS_SB_G2:
{
bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
- bfd_vma pc = input_section->output_section->vma
+ bfd_vma pc = input_section->output_section->vma
+ input_section->output_offset + rel->r_offset;
- bfd_vma sb = 0; /* See note above. */
- bfd_vma residual;
+ bfd_vma sb = 0; /* See note above. */
+ bfd_vma residual;
bfd_signed_vma signed_value;
- int group = 0;
-
- /* Determine which groups of bits to calculate. */
- switch (r_type)
- {
- case R_ARM_LDRS_PC_G0:
- case R_ARM_LDRS_SB_G0:
- group = 0;
- break;
-
- case R_ARM_LDRS_PC_G1:
- case R_ARM_LDRS_SB_G1:
- group = 1;
- break;
-
- case R_ARM_LDRS_PC_G2:
- case R_ARM_LDRS_SB_G2:
- group = 2;
- break;
-
- default:
- abort ();
- }
-
- /* If REL, extract the addend from the insn. If RELA, it will
- have already been fetched for us. */
+ int group = 0;
+
+ /* Determine which groups of bits to calculate. */
+ switch (r_type)
+ {
+ case R_ARM_LDRS_PC_G0:
+ case R_ARM_LDRS_SB_G0:
+ group = 0;
+ break;
+
+ case R_ARM_LDRS_PC_G1:
+ case R_ARM_LDRS_SB_G1:
+ group = 1;
+ break;
+
+ case R_ARM_LDRS_PC_G2:
+ case R_ARM_LDRS_SB_G2:
+ group = 2;
+ break;
+
+ default:
+ abort ();
+ }
+
+ /* If REL, extract the addend from the insn. If RELA, it will
+ have already been fetched for us. */
if (globals->use_rel)
- {
- int negative = (insn & (1 << 23)) ? 1 : -1;
- signed_addend = negative * (((insn & 0xf00) >> 4) + (insn & 0xf));
- }
+ {
+ int negative = (insn & (1 << 23)) ? 1 : -1;
+ signed_addend = negative * (((insn & 0xf00) >> 4) + (insn & 0xf));
+ }
/* Compute the value (X) to go in the place. */
- if (r_type == R_ARM_LDRS_PC_G0
- || r_type == R_ARM_LDRS_PC_G1
- || r_type == R_ARM_LDRS_PC_G2)
- /* PC relative. */
- signed_value = value - pc + signed_addend;
- else
- /* Section base relative. */
- signed_value = value - sb + signed_addend;
-
- /* Calculate the value of the relevant G_{n-1} to obtain
- the residual at that stage. */
- calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
-
- /* Check for overflow. */
- if (residual >= 0x100)
- {
- (*_bfd_error_handler)
- (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
- input_bfd, input_section,
- (long) rel->r_offset, abs (signed_value), howto->name);
- return bfd_reloc_overflow;
- }
-
- /* Mask out the value and U bit. */
- insn &= 0xff7ff0f0;
-
- /* Set the U bit if the value to go in the place is non-negative. */
- if (signed_value >= 0)
- insn |= 1 << 23;
-
- /* Encode the offset. */
- insn |= ((residual & 0xf0) << 4) | (residual & 0xf);
+ if (r_type == R_ARM_LDRS_PC_G0
+ || r_type == R_ARM_LDRS_PC_G1
+ || r_type == R_ARM_LDRS_PC_G2)
+ /* PC relative. */
+ signed_value = value - pc + signed_addend;
+ else
+ /* Section base relative. */
+ signed_value = value - sb + signed_addend;
+
+ /* Calculate the value of the relevant G_{n-1} to obtain
+ the residual at that stage. */
+ calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
+
+ /* Check for overflow. */
+ if (residual >= 0x100)
+ {
+ (*_bfd_error_handler)
+ (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
+ input_bfd, input_section,
+ (long) rel->r_offset, abs (signed_value), howto->name);
+ return bfd_reloc_overflow;
+ }
+
+ /* Mask out the value and U bit. */
+ insn &= 0xff7ff0f0;
+
+ /* Set the U bit if the value to go in the place is non-negative. */
+ if (signed_value >= 0)
+ insn |= 1 << 23;
+
+ /* Encode the offset. */
+ insn |= ((residual & 0xf0) << 4) | (residual & 0xf);
bfd_put_32 (input_bfd, insn, hit_data);
}
@@ -10091,78 +10095,78 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto,
case R_ARM_LDC_SB_G2:
{
bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
- bfd_vma pc = input_section->output_section->vma
+ bfd_vma pc = input_section->output_section->vma
+ input_section->output_offset + rel->r_offset;
- bfd_vma sb = 0; /* See note above. */
- bfd_vma residual;
+ bfd_vma sb = 0; /* See note above. */
+ bfd_vma residual;
bfd_signed_vma signed_value;
- int group = 0;
-
- /* Determine which groups of bits to calculate. */
- switch (r_type)
- {
- case R_ARM_LDC_PC_G0:
- case R_ARM_LDC_SB_G0:
- group = 0;
- break;
-
- case R_ARM_LDC_PC_G1:
- case R_ARM_LDC_SB_G1:
- group = 1;
- break;
-
- case R_ARM_LDC_PC_G2:
- case R_ARM_LDC_SB_G2:
- group = 2;
- break;
-
- default:
- abort ();
- }
-
- /* If REL, extract the addend from the insn. If RELA, it will
- have already been fetched for us. */
+ int group = 0;
+
+ /* Determine which groups of bits to calculate. */
+ switch (r_type)
+ {
+ case R_ARM_LDC_PC_G0:
+ case R_ARM_LDC_SB_G0:
+ group = 0;
+ break;
+
+ case R_ARM_LDC_PC_G1:
+ case R_ARM_LDC_SB_G1:
+ group = 1;
+ break;
+
+ case R_ARM_LDC_PC_G2:
+ case R_ARM_LDC_SB_G2:
+ group = 2;
+ break;
+
+ default:
+ abort ();
+ }
+
+ /* If REL, extract the addend from the insn. If RELA, it will
+ have already been fetched for us. */
if (globals->use_rel)
- {
- int negative = (insn & (1 << 23)) ? 1 : -1;
- signed_addend = negative * ((insn & 0xff) << 2);
- }
+ {
+ int negative = (insn & (1 << 23)) ? 1 : -1;
+ signed_addend = negative * ((insn & 0xff) << 2);
+ }
/* Compute the value (X) to go in the place. */
- if (r_type == R_ARM_LDC_PC_G0
- || r_type == R_ARM_LDC_PC_G1
- || r_type == R_ARM_LDC_PC_G2)
- /* PC relative. */
- signed_value = value - pc + signed_addend;
- else
- /* Section base relative. */
- signed_value = value - sb + signed_addend;
-
- /* Calculate the value of the relevant G_{n-1} to obtain
- the residual at that stage. */
- calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
-
- /* Check for overflow. (The absolute value to go in the place must be
- divisible by four and, after having been divided by four, must
- fit in eight bits.) */
- if ((residual & 0x3) != 0 || residual >= 0x400)
- {
- (*_bfd_error_handler)
- (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
- input_bfd, input_section,
- (long) rel->r_offset, abs (signed_value), howto->name);
- return bfd_reloc_overflow;
- }
-
- /* Mask out the value and U bit. */
- insn &= 0xff7fff00;
-
- /* Set the U bit if the value to go in the place is non-negative. */
- if (signed_value >= 0)
- insn |= 1 << 23;
-
- /* Encode the offset. */
- insn |= residual >> 2;
+ if (r_type == R_ARM_LDC_PC_G0
+ || r_type == R_ARM_LDC_PC_G1
+ || r_type == R_ARM_LDC_PC_G2)
+ /* PC relative. */
+ signed_value = value - pc + signed_addend;
+ else
+ /* Section base relative. */
+ signed_value = value - sb + signed_addend;
+
+ /* Calculate the value of the relevant G_{n-1} to obtain
+ the residual at that stage. */
+ calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
+
+ /* Check for overflow. (The absolute value to go in the place must be
+ divisible by four and, after having been divided by four, must
+ fit in eight bits.) */
+ if ((residual & 0x3) != 0 || residual >= 0x400)
+ {
+ (*_bfd_error_handler)
+ (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
+ input_bfd, input_section,
+ (long) rel->r_offset, abs (signed_value), howto->name);
+ return bfd_reloc_overflow;
+ }
+
+ /* Mask out the value and U bit. */
+ insn &= 0xff7fff00;
+
+ /* Set the U bit if the value to go in the place is non-negative. */
+ if (signed_value >= 0)
+ insn |= 1 << 23;
+
+ /* Encode the offset. */
+ insn |= residual >> 2;
bfd_put_32 (input_bfd, insn, hit_data);
}
@@ -10314,8 +10318,8 @@ elf32_arm_relocate_section (bfd * output_bfd,
r_type = arm_real_reloc_type (globals, r_type);
if ( r_type == R_ARM_GNU_VTENTRY
- || r_type == R_ARM_GNU_VTINHERIT)
- continue;
+ || r_type == R_ARM_GNU_VTINHERIT)
+ continue;
bfd_reloc.howto = elf32_arm_howto_from_type (r_type);
howto = bfd_reloc.howto;
@@ -10508,10 +10512,10 @@ elf32_arm_relocate_section (bfd * output_bfd,
}
/* We call elf32_arm_final_link_relocate unless we're completely
- done, i.e., the relaxation produced the final output we want,
- and we won't let anybody mess with it. Also, we have to do
- addend adjustments in case of a R_ARM_TLS_GOTDESC relocation
- both in relaxed and non-relaxed cases */
+ done, i.e., the relaxation produced the final output we want,
+ and we won't let anybody mess with it. Also, we have to do
+ addend adjustments in case of a R_ARM_TLS_GOTDESC relocation
+ both in relaxed and non-relaxed cases */
if ((elf32_arm_tls_transition (info, r_type, h) != (unsigned)r_type)
|| (IS_ARM_TLS_GNU_RELOC (r_type)
&& !((h ? elf32_arm_hash_entry (h)->tls_type :
@@ -10539,8 +10543,8 @@ elf32_arm_relocate_section (bfd * output_bfd,
because such sections are not SEC_ALLOC and thus ld.so will
not process them. */
if (unresolved_reloc
- && !((input_section->flags & SEC_DEBUGGING) != 0
- && h->def_dynamic)
+ && !((input_section->flags & SEC_DEBUGGING) != 0
+ && h->def_dynamic)
&& _bfd_elf_section_offset (output_bfd, info, input_section,
rel->r_offset) != (bfd_vma) -1)
{
@@ -10687,7 +10691,7 @@ insert_cantunwind_after(asection *text_sec, asection *exidx_sec)
1. Regions without unwind data are marked with EXIDX_CANTUNWIND entries.
2. Duplicate entries are merged together (EXIDX_CANTUNWIND, or unwind
- codes which have been inlined into the index).
+ codes which have been inlined into the index).
If MERGE_EXIDX_ENTRIES is false, duplicate entries are not merged.
@@ -10713,7 +10717,7 @@ elf32_arm_fix_exidx_coverage (asection **text_section_order,
asection *sec;
for (sec = inp->sections; sec != NULL; sec = sec->next)
- {
+ {
struct bfd_elf_section_data *elf_sec = elf_section_data (sec);
Elf_Internal_Shdr *hdr = &elf_sec->this_hdr;
@@ -10723,15 +10727,15 @@ elf32_arm_fix_exidx_coverage (asection **text_section_order,
if (elf_sec->linked_to)
{
Elf_Internal_Shdr *linked_hdr
- = &elf_section_data (elf_sec->linked_to)->this_hdr;
+ = &elf_section_data (elf_sec->linked_to)->this_hdr;
struct _arm_elf_section_data *linked_sec_arm_data
- = get_arm_elf_section_data (linked_hdr->bfd_section);
+ = get_arm_elf_section_data (linked_hdr->bfd_section);
if (linked_sec_arm_data == NULL)
- continue;
+ continue;
/* Link this .ARM.exidx section back from the text section it
- describes. */
+ describes. */
linked_sec_arm_data->u.text.arm_exidx_sec = sec;
}
}
@@ -10756,7 +10760,7 @@ elf32_arm_fix_exidx_coverage (asection **text_section_order,
bfd *ibfd;
if (arm_data == NULL)
- continue;
+ continue;
exidx_sec = arm_data->u.text.arm_exidx_sec;
if (exidx_sec == NULL)
@@ -10780,11 +10784,11 @@ elf32_arm_fix_exidx_coverage (asection **text_section_order,
hdr = &elf_section_data (exidx_sec)->this_hdr;
if (hdr->sh_type != SHT_ARM_EXIDX)
- continue;
+ continue;
exidx_arm_data = get_arm_elf_section_data (exidx_sec);
if (exidx_arm_data == NULL)
- continue;
+ continue;
ibfd = exidx_sec->owner;
@@ -10834,7 +10838,7 @@ elf32_arm_fix_exidx_coverage (asection **text_section_order,
/* Free contents if we allocated it ourselves. */
if (contents != hdr->contents)
- free (contents);
+ free (contents);
/* Record edits to be applied later (in elf32_arm_write_section). */
exidx_arm_data->u.exidx.unwind_edit_list = unwind_edit_head;
@@ -11061,7 +11065,7 @@ elf32_arm_copy_private_bfd_data (bfd *ibfd, bfd *obfd)
return FALSE;
/* If the src and dest have different interworking flags
- then turn off the interworking bit. */
+ then turn off the interworking bit. */
if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
{
if (out_flags & EF_ARM_INTERWORK)
@@ -11493,7 +11497,7 @@ elf32_arm_merge_eabi_attributes (bfd *ibfd, bfd *obfd)
{
if (out_attr[Tag_MPextension_use].i != 0
&& out_attr[Tag_MPextension_use_legacy].i
- != out_attr[Tag_MPextension_use].i)
+ != out_attr[Tag_MPextension_use].i)
{
_bfd_error_handler
(_("Error: %B has both the current and legacy "
@@ -11681,7 +11685,7 @@ elf32_arm_merge_eabi_attributes (bfd *ibfd, bfd *obfd)
/* 0 will merge with anything.
'A' and 'S' merge to 'A'.
'R' and 'S' merge to 'R'.
- 'M' and 'A|R|S' is an error. */
+ 'M' and 'A|R|S' is an error. */
if (out_attr[i].i == 0
|| (out_attr[i].i == 'S'
&& (in_attr[i].i == 'A' || in_attr[i].i == 'R')))
@@ -11779,7 +11783,7 @@ elf32_arm_merge_eabi_attributes (bfd *ibfd, bfd *obfd)
if (regs < vfp_versions[out_attr[i].i].regs)
regs = vfp_versions[out_attr[i].i].regs;
/* This assumes all possible supersets are also a valid
- options. */
+ options. */
for (newval = VFP_VERSION_COUNT - 1; newval > 0; newval--)
{
if (regs == vfp_versions[newval].regs
@@ -11795,7 +11799,7 @@ elf32_arm_merge_eabi_attributes (bfd *ibfd, bfd *obfd)
else if (in_attr[i].i != 0 && out_attr[i].i != in_attr[i].i)
{
/* It's sometimes ok to mix different configs, so this is only
- a warning. */
+ a warning. */
_bfd_error_handler
(_("Warning: %B: Conflicting platform configuration"), ibfd);
}
@@ -12481,7 +12485,7 @@ elf32_arm_check_relocs (bfd *abfd, struct bfd_link_info *info,
/* Could be done earlier, if h were already available. */
r_type = elf32_arm_tls_transition (info, r_type, h);
switch (r_type)
- {
+ {
case R_ARM_GOT32:
case R_ARM_GOT_PREL:
case R_ARM_TLS_GD32:
@@ -12524,7 +12528,7 @@ elf32_arm_check_relocs (bfd *abfd, struct bfd_link_info *info,
}
/* If a variable is accessed with both tls methods, two
- slots may be created. */
+ slots may be created. */
if (GOT_TLS_GD_ANY_P (old_tls_type)
&& GOT_TLS_GD_ANY_P (tls_type))
tls_type |= old_tls_type;
@@ -12537,9 +12541,9 @@ elf32_arm_check_relocs (bfd *abfd, struct bfd_link_info *info,
tls_type |= old_tls_type;
/* If the symbol is accessed in both IE and GDESC
- method, we're able to relax. Turn off the GDESC flag,
- without messing up with any other kind of tls types
- that may be involved */
+ method, we're able to relax. Turn off the GDESC flag,
+ without messing up with any other kind of tls types
+ that may be involved */
if ((tls_type & GOT_TLS_IE) && (tls_type & GOT_TLS_GDESC))
tls_type &= ~GOT_TLS_GDESC;
@@ -12636,22 +12640,22 @@ elf32_arm_check_relocs (bfd *abfd, struct bfd_link_info *info,
may_need_local_target_p = TRUE;
break;
- /* This relocation describes the C++ object vtable hierarchy.
- Reconstruct it for later use during GC. */
- case R_ARM_GNU_VTINHERIT:
- if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
- return FALSE;
- break;
-
- /* This relocation describes which C++ vtable entries are actually
- used. Record for later use during GC. */
- case R_ARM_GNU_VTENTRY:
- BFD_ASSERT (h != NULL);
- if (h != NULL
- && !bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_offset))
- return FALSE;
- break;
- }
+ /* This relocation describes the C++ object vtable hierarchy.
+ Reconstruct it for later use during GC. */
+ case R_ARM_GNU_VTINHERIT:
+ if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
+ return FALSE;
+ break;
+
+ /* This relocation describes which C++ vtable entries are actually
+ used. Record for later use during GC. */
+ case R_ARM_GNU_VTENTRY:
+ BFD_ASSERT (h != NULL);
+ if (h != NULL
+ && !bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_offset))
+ return FALSE;
+ break;
+ }
if (h != NULL)
{
@@ -12909,7 +12913,7 @@ elf32_arm_find_nearest_line (bfd * abfd,
/* We skip _bfd_dwarf1_find_nearest_line since no known ARM toolchain uses it. */
if (_bfd_dwarf2_find_nearest_line (abfd, dwarf_debug_sections,
- section, symbols, offset,
+ section, symbols, offset,
filename_ptr, functionname_ptr,
line_ptr, NULL, 0,
& elf_tdata (abfd)->dwarf2_find_line_info))
@@ -13220,18 +13224,18 @@ allocate_dynrelocs_for_symbol (struct elf_link_hash_entry *h, void * inf)
s->size += 4;
else
{
- if (tls_type & GOT_TLS_GDESC)
- {
+ if (tls_type & GOT_TLS_GDESC)
+ {
/* R_ARM_TLS_DESC needs 2 GOT slots. */
- eh->tlsdesc_got
+ eh->tlsdesc_got
= (htab->root.sgotplt->size
- elf32_arm_compute_jump_table_size (htab));
- htab->root.sgotplt->size += 8;
- h->got.offset = (bfd_vma) -2;
+ htab->root.sgotplt->size += 8;
+ h->got.offset = (bfd_vma) -2;
/* plt.got_offset needs to know there's a TLS_DESC
reloc in the middle of .got.plt. */
- htab->num_tls_desc++;
- }
+ htab->num_tls_desc++;
+ }
if (tls_type & GOT_TLS_GD)
{
@@ -13344,11 +13348,11 @@ allocate_dynrelocs_for_symbol (struct elf_link_hash_entry *h, void * inf)
if (info->shared || htab->root.is_relocatable_executable)
{
/* The only relocs that use pc_count are R_ARM_REL32 and
- R_ARM_REL32_NOI, which will appear on something like
- ".long foo - .". We want calls to protected symbols to resolve
- directly to the function rather than going via the plt. If people
- want function pointer comparisons to work as expected then they
- should avoid writing assembly like ".long foo - .". */
+ R_ARM_REL32_NOI, which will appear on something like
+ ".long foo - .". We want calls to protected symbols to resolve
+ directly to the function rather than going via the plt. If people
+ want function pointer comparisons to work as expected then they
+ should avoid writing assembly like ".long foo - .". */
if (SYMBOL_CALLS_LOCAL (info, h))
{
struct elf_dyn_relocs **pp;
@@ -13378,7 +13382,7 @@ allocate_dynrelocs_for_symbol (struct elf_link_hash_entry *h, void * inf)
}
/* Also discard relocs on undefined weak syms with non-default
- visibility. */
+ visibility. */
if (eh->dyn_relocs != NULL
&& h->root.type == bfd_link_hash_undefweak)
{
@@ -13549,7 +13553,7 @@ elf32_arm_size_dynamic_sections (bfd * output_bfd ATTRIBUTE_UNUSED,
struct elf_dyn_relocs *p;
for (p = (struct elf_dyn_relocs *)
- elf_section_data (s)->local_dynrel; p != NULL; p = p->next)
+ elf_section_data (s)->local_dynrel; p != NULL; p = p->next)
{
if (!bfd_is_abs_section (p->sec)
&& bfd_is_abs_section (p->sec->output_section))
@@ -13645,7 +13649,7 @@ elf32_arm_size_dynamic_sections (bfd * output_bfd ATTRIBUTE_UNUSED,
*local_got = (bfd_vma) -2;
/* plt.got_offset needs to know there's a TLS_DESC
reloc in the middle of .got.plt. */
- htab->num_tls_desc++;
+ htab->num_tls_desc++;
}
if (*local_tls_type & GOT_TLS_IE)
s->size += 4;
@@ -13674,7 +13678,7 @@ elf32_arm_size_dynamic_sections (bfd * output_bfd ATTRIBUTE_UNUSED,
if ((info->shared && !(*local_tls_type & GOT_TLS_GDESC))
|| *local_tls_type & GOT_TLS_GD)
elf32_arm_allocate_dynrelocs (info, srel, 1);
-
+
if (info->shared && *local_tls_type & GOT_TLS_GDESC)
{
elf32_arm_allocate_dynrelocs (info,
@@ -13715,8 +13719,8 @@ elf32_arm_size_dynamic_sections (bfd * output_bfd ATTRIBUTE_UNUSED,
if (!bfd_elf32_arm_process_before_allocation (ibfd, info)
|| !bfd_elf32_arm_vfp11_erratum_scan (ibfd, info))
- /* xgettext:c-format */
- _bfd_error_handler (_("Errors encountered processing file %s"),
+ /* xgettext:c-format */
+ _bfd_error_handler (_("Errors encountered processing file %s"),
ibfd->filename);
}
@@ -13740,7 +13744,7 @@ elf32_arm_size_dynamic_sections (bfd * output_bfd ATTRIBUTE_UNUSED,
htab->root.splt->size += htab->plt_entry_size;
/* If we're not using lazy TLS relocations, don't generate the
- PLT and GOT entries they require. */
+ PLT and GOT entries they require. */
if (!(info->flags & DF_BIND_NOW))
{
htab->dt_tlsdesc_got = htab->root.sgot->size;
@@ -13894,7 +13898,7 @@ elf32_arm_size_dynamic_sections (bfd * output_bfd ATTRIBUTE_UNUSED,
static bfd_boolean
elf32_arm_always_size_sections (bfd *output_bfd,
- struct bfd_link_info *info)
+ struct bfd_link_info *info)
{
asection *tls_sec;
@@ -13911,22 +13915,22 @@ elf32_arm_always_size_sections (bfd *output_bfd,
(elf_hash_table (info), "_TLS_MODULE_BASE_", TRUE, TRUE, FALSE);
if (tlsbase)
- {
- struct bfd_link_hash_entry *bh = NULL;
+ {
+ struct bfd_link_hash_entry *bh = NULL;
const struct elf_backend_data *bed
- = get_elf_backend_data (output_bfd);
+ = get_elf_backend_data (output_bfd);
- if (!(_bfd_generic_link_add_one_symbol
+ if (!(_bfd_generic_link_add_one_symbol
(info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
tls_sec, 0, NULL, FALSE,
bed->collect, &bh)))
return FALSE;
- tlsbase->type = STT_TLS;
- tlsbase = (struct elf_link_hash_entry *)bh;
- tlsbase->def_regular = 1;
- tlsbase->other = STV_HIDDEN;
- (*bed->elf_backend_hide_symbol) (info, tlsbase, TRUE);
+ tlsbase->type = STT_TLS;
+ tlsbase = (struct elf_link_hash_entry *)bh;
+ tlsbase->def_regular = 1;
+ tlsbase->other = STV_HIDDEN;
+ (*bed->elf_backend_hide_symbol) (info, tlsbase, TRUE);
}
}
return TRUE;
@@ -14034,6 +14038,38 @@ arm_put_trampoline (struct elf32_arm_link_hash_table *htab, bfd *output_bfd,
}
}
+/* Install the special first PLT entry for elf32-arm-nacl. Unlike
+ other variants, NaCl needs this entry in a static executable's
+ .iplt too. When we're handling that case, GOT_DISPLACEMENT is
+ zero. For .iplt really only the last bundle is useful, and .iplt
+ could have a shorter first entry, with each individual PLT entry's
+ relative branch calculated differently so it targets the last
+ bundle instead of the instruction before it (labelled .Lplt_tail
+ above). But it's simpler to keep the size and layout of PLT0
+ consistent with the dynamic case, at the cost of some dead code at
+ the start of .iplt and the one dead store to the stack at the start
+ of .Lplt_tail. */
+static void
+arm_nacl_put_plt0 (struct elf32_arm_link_hash_table *htab, bfd *output_bfd,
+ asection *plt, bfd_vma got_displacement)
+{
+ unsigned int i;
+
+ put_arm_insn (htab, output_bfd,
+ elf32_arm_nacl_plt0_entry[0]
+ | arm_movw_immediate (got_displacement),
+ plt->contents + 0);
+ put_arm_insn (htab, output_bfd,
+ elf32_arm_nacl_plt0_entry[1]
+ | arm_movt_immediate (got_displacement),
+ plt->contents + 4);
+
+ for (i = 2; i < ARRAY_SIZE (elf32_arm_nacl_plt0_entry); ++i)
+ put_arm_insn (htab, output_bfd,
+ elf32_arm_nacl_plt0_entry[i],
+ plt->contents + (i * 4));
+}
+
/* Finish up the dynamic sections. */
static bfd_boolean
@@ -14199,16 +14235,16 @@ elf32_arm_finish_dynamic_sections (bfd * output_bfd, struct bfd_link_info * info
break;
case DT_TLSDESC_PLT:
- s = htab->root.splt;
+ s = htab->root.splt;
dyn.d_un.d_ptr = (s->output_section->vma + s->output_offset
+ htab->dt_tlsdesc_plt);
bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
break;
case DT_TLSDESC_GOT:
- s = htab->root.sgot;
+ s = htab->root.sgot;
dyn.d_un.d_ptr = (s->output_section->vma + s->output_offset
- + htab->dt_tlsdesc_got);
+ + htab->dt_tlsdesc_got);
bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
break;
@@ -14272,24 +14308,8 @@ elf32_arm_finish_dynamic_sections (bfd * output_bfd, struct bfd_link_info * info
htab->srelplt2->contents);
}
else if (htab->nacl_p)
- {
- unsigned int i;
-
- got_displacement = got_address + 8 - (plt_address + 16);
-
- put_arm_insn (htab, output_bfd,
- elf32_arm_nacl_plt0_entry[0]
- | arm_movw_immediate (got_displacement),
- splt->contents + 0);
- put_arm_insn (htab, output_bfd,
- elf32_arm_nacl_plt0_entry[1]
- | arm_movt_immediate (got_displacement),
- splt->contents + 4);
- for (i = 2; i < ARRAY_SIZE (elf32_arm_nacl_plt0_entry); ++i)
- put_arm_insn (htab, output_bfd,
- elf32_arm_nacl_plt0_entry[i],
- splt->contents + (i * 4));
- }
+ arm_nacl_put_plt0 (htab, output_bfd, splt,
+ got_address + 8 - (plt_address + 16));
else
{
got_displacement = got_address - (plt_address + 16);
@@ -14382,6 +14402,10 @@ elf32_arm_finish_dynamic_sections (bfd * output_bfd, struct bfd_link_info * info
}
}
+ if (htab->nacl_p && htab->root.iplt != NULL && htab->root.iplt->size > 0)
+ /* NaCl uses a special first entry in .iplt too. */
+ arm_nacl_put_plt0 (htab, output_bfd, htab->root.iplt, 0);
+
/* Fill in the first three entries in the global offset table. */
if (sgot)
{
@@ -14965,6 +14989,15 @@ elf32_arm_output_arch_local_syms (bfd *output_bfd,
#endif
}
}
+ if (htab->nacl_p && htab->root.iplt && htab->root.iplt->size > 0)
+ {
+ /* NaCl uses a special first entry in .iplt too. */
+ osi.sec = htab->root.iplt;
+ osi.sec_shndx = (_bfd_elf_section_from_bfd_section
+ (output_bfd, osi.sec->output_section));
+ if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
+ return FALSE;
+ }
if ((htab->root.splt && htab->root.splt->size > 0)
|| (htab->root.iplt && htab->root.iplt->size > 0))
{
@@ -15101,7 +15134,7 @@ struct a8_branch_to_stub_data
static bfd_boolean
make_branch_to_a8_stub (struct bfd_hash_entry *gen_entry,
- void *in_arg)
+ void *in_arg)
{
struct elf32_arm_stub_hash_entry *stub_entry;
struct a8_branch_to_stub_data *data;
@@ -15240,82 +15273,82 @@ elf32_arm_write_section (bfd *output_bfd,
unsigned int endianflip = bfd_big_endian (output_bfd) ? 3 : 0;
for (errnode = arm_data->erratumlist; errnode != 0;
- errnode = errnode->next)
- {
- bfd_vma target = errnode->vma - offset;
-
- switch (errnode->type)
- {
- case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
- {
- bfd_vma branch_to_veneer;
- /* Original condition code of instruction, plus bit mask for
- ARM B instruction. */
- unsigned int insn = (errnode->u.b.vfp_insn & 0xf0000000)
- | 0x0a000000;
+ errnode = errnode->next)
+ {
+ bfd_vma target = errnode->vma - offset;
+
+ switch (errnode->type)
+ {
+ case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
+ {
+ bfd_vma branch_to_veneer;
+ /* Original condition code of instruction, plus bit mask for
+ ARM B instruction. */
+ unsigned int insn = (errnode->u.b.vfp_insn & 0xf0000000)
+ | 0x0a000000;
/* The instruction is before the label. */
target -= 4;
/* Above offset included in -4 below. */
branch_to_veneer = errnode->u.b.veneer->vma
- - errnode->vma - 4;
+ - errnode->vma - 4;
if ((signed) branch_to_veneer < -(1 << 25)
|| (signed) branch_to_veneer >= (1 << 25))
(*_bfd_error_handler) (_("%B: error: VFP11 veneer out of "
"range"), output_bfd);
- insn |= (branch_to_veneer >> 2) & 0xffffff;
- contents[endianflip ^ target] = insn & 0xff;
- contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff;
- contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff;
- contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff;
- }
- break;
+ insn |= (branch_to_veneer >> 2) & 0xffffff;
+ contents[endianflip ^ target] = insn & 0xff;
+ contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff;
+ contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff;
+ contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff;
+ }
+ break;
case VFP11_ERRATUM_ARM_VENEER:
- {
- bfd_vma branch_from_veneer;
- unsigned int insn;
+ {
+ bfd_vma branch_from_veneer;
+ unsigned int insn;
- /* Take size of veneer into account. */
- branch_from_veneer = errnode->u.v.branch->vma
- - errnode->vma - 12;
+ /* Take size of veneer into account. */
+ branch_from_veneer = errnode->u.v.branch->vma
+ - errnode->vma - 12;
if ((signed) branch_from_veneer < -(1 << 25)
|| (signed) branch_from_veneer >= (1 << 25))
(*_bfd_error_handler) (_("%B: error: VFP11 veneer out of "
"range"), output_bfd);
- /* Original instruction. */
- insn = errnode->u.v.branch->u.b.vfp_insn;
- contents[endianflip ^ target] = insn & 0xff;
- contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff;
- contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff;
- contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff;
-
- /* Branch back to insn after original insn. */
- insn = 0xea000000 | ((branch_from_veneer >> 2) & 0xffffff);
- contents[endianflip ^ (target + 4)] = insn & 0xff;
- contents[endianflip ^ (target + 5)] = (insn >> 8) & 0xff;
- contents[endianflip ^ (target + 6)] = (insn >> 16) & 0xff;
- contents[endianflip ^ (target + 7)] = (insn >> 24) & 0xff;
- }
- break;
+ /* Original instruction. */
+ insn = errnode->u.v.branch->u.b.vfp_insn;
+ contents[endianflip ^ target] = insn & 0xff;
+ contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff;
+ contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff;
+ contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff;
+
+ /* Branch back to insn after original insn. */
+ insn = 0xea000000 | ((branch_from_veneer >> 2) & 0xffffff);
+ contents[endianflip ^ (target + 4)] = insn & 0xff;
+ contents[endianflip ^ (target + 5)] = (insn >> 8) & 0xff;
+ contents[endianflip ^ (target + 6)] = (insn >> 16) & 0xff;
+ contents[endianflip ^ (target + 7)] = (insn >> 24) & 0xff;
+ }
+ break;
- default:
- abort ();
- }
- }
+ default:
+ abort ();
+ }
+ }
}
if (arm_data->elf.this_hdr.sh_type == SHT_ARM_EXIDX)
{
arm_unwind_table_edit *edit_node
- = arm_data->u.exidx.unwind_edit_list;
+ = arm_data->u.exidx.unwind_edit_list;
/* Now, sec->size is the size of the section we will write. The original
- size (before we merged duplicate entries and inserted EXIDX_CANTUNWIND
+ size (before we merged duplicate entries and inserted EXIDX_CANTUNWIND
markers) was sec->rawsize. (This isn't the case if we perform no
edits, then rawsize will be zero and we should use size). */
bfd_byte *edited_contents = (bfd_byte *) bfd_malloc (sec->size);
@@ -15324,13 +15357,13 @@ elf32_arm_write_section (bfd *output_bfd,
bfd_vma add_to_offsets = 0;
for (in_index = 0, out_index = 0; in_index * 8 < input_size || edit_node;)
- {
+ {
if (edit_node)
{
unsigned int edit_index = edit_node->index;
if (in_index < edit_index && in_index * 8 < input_size)
- {
+ {
copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
contents + in_index * 8, add_to_offsets);
out_index++;
@@ -15339,7 +15372,7 @@ elf32_arm_write_section (bfd *output_bfd,
else if (in_index == edit_index
|| (in_index * 8 >= input_size
&& edit_index == UINT_MAX))
- {
+ {
switch (edit_node->type)
{
case DELETE_EXIDX_ENTRY:
@@ -15349,12 +15382,12 @@ elf32_arm_write_section (bfd *output_bfd,
case INSERT_EXIDX_CANTUNWIND_AT_END:
{
- asection *text_sec = edit_node->linked_section;
+ asection *text_sec = edit_node->linked_section;
bfd_vma text_offset = text_sec->output_section->vma
+ text_sec->output_offset
+ text_sec->size;
bfd_vma exidx_offset = offset + out_index * 8;
- unsigned long prel31_offset;
+ unsigned long prel31_offset;
/* Note: this is meant to be equivalent to an
R_ARM_PREL31 relocation. These synthetic
@@ -15419,45 +15452,45 @@ elf32_arm_write_section (bfd *output_bfd,
ptr = map[0].vma;
for (i = 0; i < mapcount; i++)
- {
- if (i == mapcount - 1)
+ {
+ if (i == mapcount - 1)
end = sec->size;
- else
- end = map[i + 1].vma;
+ else
+ end = map[i + 1].vma;
- switch (map[i].type)
+ switch (map[i].type)
{
case 'a':
/* Byte swap code words. */
while (ptr + 3 < end)
- {
- tmp = contents[ptr];
- contents[ptr] = contents[ptr + 3];
- contents[ptr + 3] = tmp;
- tmp = contents[ptr + 1];
- contents[ptr + 1] = contents[ptr + 2];
- contents[ptr + 2] = tmp;
- ptr += 4;
- }
+ {
+ tmp = contents[ptr];
+ contents[ptr] = contents[ptr + 3];
+ contents[ptr + 3] = tmp;
+ tmp = contents[ptr + 1];
+ contents[ptr + 1] = contents[ptr + 2];
+ contents[ptr + 2] = tmp;
+ ptr += 4;
+ }
break;
case 't':
/* Byte swap code halfwords. */
while (ptr + 1 < end)
- {
- tmp = contents[ptr];
- contents[ptr] = contents[ptr + 1];
- contents[ptr + 1] = tmp;
- ptr += 2;
- }
+ {
+ tmp = contents[ptr];
+ contents[ptr] = contents[ptr + 1];
+ contents[ptr + 1] = tmp;
+ ptr += 2;
+ }
break;
case 'd':
/* Leave data alone. */
break;
}
- ptr = end;
- }
+ ptr = end;
+ }
}
free (map);
@@ -15526,17 +15559,17 @@ elf32_arm_swap_symbol_out (bfd *abfd,
if (ELF_ST_TYPE (src->st_info) != STT_GNU_IFUNC)
newsym.st_info = ELF_ST_INFO (ELF_ST_BIND (src->st_info), STT_FUNC);
if (newsym.st_shndx != SHN_UNDEF)
- {
- /* Do this only for defined symbols. At link type, the static
- linker will simulate the work of dynamic linker of resolving
- symbols and will carry over the thumbness of found symbols to
- the output symbol table. It's not clear how it happens, but
- the thumbness of undefined symbols can well be different at
- runtime, and writing '1' for them will be confusing for users
- and possibly for dynamic linker itself.
- */
- newsym.st_value |= 1;
- }
+ {
+ /* Do this only for defined symbols. At link type, the static
+ linker will simulate the work of dynamic linker of resolving
+ symbols and will carry over the thumbness of found symbols to
+ the output symbol table. It's not clear how it happens, but
+ the thumbness of undefined symbols can well be different at
+ runtime, and writing '1' for them will be confusing for users
+ and possibly for dynamic linker itself.
+ */
+ newsym.st_value |= 1;
+ }
src = &newsym;
}
@@ -15564,7 +15597,7 @@ elf32_arm_modify_segment_map (bfd *abfd,
if (!m)
{
m = (struct elf_segment_map *)
- bfd_zalloc (abfd, sizeof (struct elf_segment_map));
+ bfd_zalloc (abfd, sizeof (struct elf_segment_map));
if (m == NULL)
return FALSE;
m->p_type = PT_ARM_EXIDX;
@@ -15901,8 +15934,8 @@ elf32_arm_merge_private_bfd_data (bfd * ibfd, bfd * obfd)
flags then do not bother setting the flags for the output
architecture, instead allow future merges to do this. If no
future merges ever set these flags then they will retain their
- uninitialised values, which surprise surprise, correspond
- to the default values. */
+ uninitialised values, which surprise surprise, correspond
+ to the default values. */
if (bfd_get_arch_info (ibfd)->the_default
&& elf_elfheader (ibfd)->e_flags == 0)
return TRUE;
@@ -15950,7 +15983,7 @@ elf32_arm_merge_private_bfd_data (bfd * ibfd, bfd * obfd)
if ((bfd_get_section_flags (ibfd, sec)
& (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
== (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
- only_data_sections = FALSE;
+ only_data_sections = FALSE;
null_input_bfd = FALSE;
break;