summaryrefslogtreecommitdiff
path: root/bfd/elf64-mmix.c
diff options
context:
space:
mode:
Diffstat (limited to 'bfd/elf64-mmix.c')
-rw-r--r--bfd/elf64-mmix.c557
1 files changed, 511 insertions, 46 deletions
diff --git a/bfd/elf64-mmix.c b/bfd/elf64-mmix.c
index 54c4ca892b..5960e32580 100644
--- a/bfd/elf64-mmix.c
+++ b/bfd/elf64-mmix.c
@@ -21,7 +21,10 @@ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
/* No specific ABI or "processor-specific supplement" defined. */
/* TODO:
- - Linker relaxation. */
+ - "Traditional" linker relaxation (shrinking whole sections).
+ - Merge reloc stubs jumping to same location.
+ - GETA stub relaxation (call a stub for out of range new
+ R_MMIX_GETA_STUBBABLE). */
#include "bfd.h"
#include "sysdep.h"
@@ -32,6 +35,8 @@ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
#define MINUS_ONE (((bfd_vma) 0) - 1)
+#define MAX_PUSHJ_STUB_SIZE (5 * 4)
+
/* Put these everywhere in new code. */
#define FATAL_DEBUG \
_bfd_abort (__FILE__, __LINE__, \
@@ -49,6 +54,24 @@ struct _mmix_elf_section_data
struct bpo_reloc_section_info *reloc;
struct bpo_greg_section_info *greg;
} bpo;
+
+ struct pushj_stub_info
+ {
+ /* Maximum number of stubs needed for this section. */
+ bfd_size_type n_pushj_relocs;
+
+ /* Size of stubs after a mmix_elf_relax_section round. */
+ bfd_size_type stubs_size_sum;
+
+ /* Per-reloc stubs_size_sum information. The stubs_size_sum member is the sum
+ of these. Allocated in mmix_elf_check_common_relocs. */
+ bfd_size_type *stub_size;
+
+ /* Offset of next stub during relocation. Somewhat redundant with the
+ above: error coverage is easier and we don't have to reset the
+ stubs_size_sum for relocation. */
+ bfd_size_type stub_offset;
+ } pjs;
};
#define mmix_elf_section_data(sec) \
@@ -119,9 +142,9 @@ struct bpo_greg_section_info
size_t n_remaining_bpo_relocs_this_relaxation_round;
/* The number of linker-allocated GREGs resulting from BPO relocs.
- This is an approximation after _bfd_mmix_allocated_gregs_init and
- supposedly accurate after mmix_elf_relax_section is called for all
- incoming non-collected sections. */
+ This is an approximation after _bfd_mmix_before_linker_allocation
+ and supposedly accurate after mmix_elf_relax_section is called for
+ all incoming non-collected sections. */
size_t n_allocated_bpo_gregs;
/* Index into reloc_request[], sorted on increasing "value", secondary
@@ -202,6 +225,15 @@ extern void mmix_elf_symbol_processing PARAMS ((bfd *, asymbol *));
extern void mmix_dump_bpo_gregs
PARAMS ((struct bfd_link_info *, bfd_error_handler_type));
+static void
+mmix_set_relaxable_size
+ PARAMS ((bfd *, asection *, void *));
+
+static bfd_boolean
+mmix_elf_get_section_contents
+ PARAMS ((bfd *, sec_ptr, void *, file_ptr, bfd_size_type));
+
+
/* Watch out: this currently needs to have elements with the same index as
their R_MMIX_ number. */
static reloc_howto_type elf_mmix_howto_table[] =
@@ -538,7 +570,8 @@ static reloc_howto_type elf_mmix_howto_table[] =
/* The PUSHJ instruction can reach any (code) address, as long as it's
the beginning of a function (no usable restriction). It can silently
expand to a 64-bit operand, but will emit an error if any of the two
- least significant bits are set. The howto members reflect a simple
+ least significant bits are set. It can also expand into a call to a
+ stub; see R_MMIX_PUSHJ_STUBBABLE. The howto members reflect a simple
PUSHJ. */
HOWTO (R_MMIX_PUSHJ, /* type */
2, /* rightshift */
@@ -754,6 +787,20 @@ static reloc_howto_type elf_mmix_howto_table[] =
0, /* src_mask */
0, /* dst_mask */
FALSE), /* pcrel_offset */
+
+ HOWTO (R_MMIX_PUSHJ_STUBBABLE, /* type */
+ 2, /* rightshift */
+ 2, /* size (0 = byte, 1 = short, 2 = long) */
+ 19, /* bitsize */
+ TRUE, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_signed, /* complain_on_overflow */
+ mmix_elf_reloc, /* special_function */
+ "R_MMIX_PUSHJ_STUBBABLE", /* name */
+ FALSE, /* partial_inplace */
+ ~0x0100ffff, /* src_mask */
+ 0x0100ffff, /* dst_mask */
+ TRUE) /* pcrel_offset */
};
@@ -790,7 +837,8 @@ static const struct mmix_reloc_map mmix_reloc_map[] =
{BFD_RELOC_MMIX_REG_OR_BYTE, R_MMIX_REG_OR_BYTE},
{BFD_RELOC_MMIX_REG, R_MMIX_REG},
{BFD_RELOC_MMIX_BASE_PLUS_OFFSET, R_MMIX_BASE_PLUS_OFFSET},
- {BFD_RELOC_MMIX_LOCAL, R_MMIX_LOCAL}
+ {BFD_RELOC_MMIX_LOCAL, R_MMIX_LOCAL},
+ {BFD_RELOC_MMIX_PUSHJ_STUBBABLE, R_MMIX_PUSHJ_STUBBABLE}
};
static reloc_howto_type *
@@ -880,7 +928,7 @@ mmix_elf_perform_relocation (isec, howto, datap, addr, value)
asection *isec;
reloc_howto_type *howto;
PTR datap;
- bfd_vma addr ATTRIBUTE_UNUSED;
+ bfd_vma addr;
bfd_vma value;
{
bfd *abfd = isec->owner;
@@ -931,6 +979,96 @@ mmix_elf_perform_relocation (isec, howto, datap, addr, value)
}
break;
+ case R_MMIX_PUSHJ_STUBBABLE:
+ /* If the address fits, we're fine. */
+ if ((value & 3) == 0
+ /* Note rightshift 0; see R_MMIX_JMP case below. */
+ && (r = bfd_check_overflow (complain_overflow_signed,
+ howto->bitsize,
+ 0,
+ bfd_arch_bits_per_address (abfd),
+ value)) == bfd_reloc_ok)
+ goto pcrel_mmix_reloc_fits;
+ else
+ {
+ bfd_size_type raw_size
+ = (isec->_raw_size
+ - mmix_elf_section_data (isec)->pjs.n_pushj_relocs
+ * MAX_PUSHJ_STUB_SIZE);
+
+ /* We have the bytes at the PUSHJ insn and need to get the
+ position for the stub. There's supposed to be room allocated
+ for the stub. */
+ bfd_byte *stubcontents
+ = ((char *) datap
+ - (addr - (isec->output_section->vma + isec->output_offset))
+ + raw_size
+ + mmix_elf_section_data (isec)->pjs.stub_offset);
+ bfd_vma stubaddr;
+
+ /* The address doesn't fit, so redirect the PUSHJ to the
+ location of the stub. */
+ r = mmix_elf_perform_relocation (isec,
+ &elf_mmix_howto_table
+ [R_MMIX_ADDR19],
+ datap,
+ addr,
+ isec->output_section->vma
+ + isec->output_offset
+ + raw_size
+ + (mmix_elf_section_data (isec)
+ ->pjs.stub_offset)
+ - addr);
+ if (r != bfd_reloc_ok)
+ return r;
+
+ stubaddr
+ = (isec->output_section->vma
+ + isec->output_offset
+ + raw_size
+ + mmix_elf_section_data (isec)->pjs.stub_offset);
+
+ /* We generate a simple JMP if that suffices, else the whole 5
+ insn stub. */
+ if (bfd_check_overflow (complain_overflow_signed,
+ elf_mmix_howto_table[R_MMIX_ADDR27].bitsize,
+ 0,
+ bfd_arch_bits_per_address (abfd),
+ addr + value - stubaddr) == bfd_reloc_ok)
+ {
+ bfd_put_32 (abfd, JMP_INSN_BYTE << 24, stubcontents);
+ r = mmix_elf_perform_relocation (isec,
+ &elf_mmix_howto_table
+ [R_MMIX_ADDR27],
+ stubcontents,
+ stubaddr,
+ value + addr - stubaddr);
+ mmix_elf_section_data (isec)->pjs.stub_offset += 4;
+
+ if (raw_size
+ + mmix_elf_section_data (isec)->pjs.stub_offset
+ > isec->_cooked_size)
+ abort ();
+
+ return r;
+ }
+ else
+ {
+ /* Put a "GO $255,0" after the common sequence. */
+ bfd_put_32 (abfd,
+ ((GO_INSN_BYTE | IMM_OFFSET_BIT) << 24)
+ | 0xff00, (bfd_byte *) stubcontents + 16);
+
+ /* Prepare for the general code to set the first part of the
+ linker stub, and */
+ value += addr;
+ datap = stubcontents;
+ mmix_elf_section_data (isec)->pjs.stub_offset
+ += MAX_PUSHJ_STUB_SIZE;
+ }
+ }
+ break;
+
case R_MMIX_PUSHJ:
{
int inreg = bfd_get_8 (abfd, (bfd_byte *) datap + 1);
@@ -978,6 +1116,7 @@ mmix_elf_perform_relocation (isec, howto, datap, addr, value)
/* FALLTHROUGH. */
case R_MMIX_ADDR19:
case R_MMIX_ADDR27:
+ pcrel_mmix_reloc_fits:
/* These must be in range, or else we emit an error. */
if ((value & 3) == 0
/* Note rightshift 0; see above. */
@@ -993,7 +1132,7 @@ mmix_elf_perform_relocation (isec, howto, datap, addr, value)
if ((bfd_signed_vma) value < 0)
{
- highbit = (1 << 24);
+ highbit = 1 << 24;
value += (1 << (howto->bitsize - 1));
}
else
@@ -1203,6 +1342,11 @@ mmix_elf_relocate_section (output_bfd, info, input_bfd, input_section,
struct elf_link_hash_entry **sym_hashes;
Elf_Internal_Rela *rel;
Elf_Internal_Rela *relend;
+ bfd_size_type raw_size
+ = (input_section->_raw_size
+ - mmix_elf_section_data (input_section)->pjs.n_pushj_relocs
+ * MAX_PUSHJ_STUB_SIZE);
+ size_t pjsno = 0;
symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
sym_hashes = elf_sym_hashes (input_bfd);
@@ -1231,10 +1375,10 @@ mmix_elf_relocate_section (output_bfd, info, input_bfd, input_section,
if (info->relocatable)
{
- /* This is a relocatable link. We don't have to change
- anything, unless the reloc is against a section symbol,
- in which case we have to adjust according to where the
- section symbol winds up in the output section. */
+ /* This is a relocatable link. For most relocs we don't have to
+ change anything, unless the reloc is against a section
+ symbol, in which case we have to adjust according to where
+ the section symbol winds up in the output section. */
if (r_symndx < symtab_hdr->sh_info)
{
sym = local_syms + r_symndx;
@@ -1246,6 +1390,75 @@ mmix_elf_relocate_section (output_bfd, info, input_bfd, input_section,
}
}
+ /* For PUSHJ stub relocs however, we may need to change the
+ reloc and the section contents, if the reloc doesn't reach
+ beyond the end of the output section and previous stubs.
+ Then we change the section contents to be a PUSHJ to the end
+ of the input section plus stubs (we can do that without using
+ a reloc), and then we change the reloc to be a R_MMIX_PUSHJ
+ at the stub location. */
+ if (r_type == R_MMIX_PUSHJ_STUBBABLE)
+ {
+ /* We've already checked whether we need a stub; use that
+ knowledge. */
+ if (mmix_elf_section_data (input_section)->pjs.stub_size[pjsno]
+ != 0)
+ {
+ Elf_Internal_Rela relcpy;
+
+ if (mmix_elf_section_data (input_section)
+ ->pjs.stub_size[pjsno] != MAX_PUSHJ_STUB_SIZE)
+ abort ();
+
+ /* There's already a PUSHJ insn there, so just fill in
+ the offset bits to the stub. */
+ if (mmix_final_link_relocate (elf_mmix_howto_table
+ + R_MMIX_ADDR19,
+ input_section,
+ contents,
+ rel->r_offset,
+ 0,
+ input_section
+ ->output_section->vma
+ + input_section->output_offset
+ + raw_size
+ + mmix_elf_section_data (input_section)
+ ->pjs.stub_offset,
+ NULL, NULL) != bfd_reloc_ok)
+ return FALSE;
+
+ /* Put a JMP insn at the stub; it goes with the
+ R_MMIX_JMP reloc. */
+ bfd_put_32 (output_bfd, JMP_INSN_BYTE << 24,
+ contents
+ + raw_size
+ + mmix_elf_section_data (input_section)
+ ->pjs.stub_offset);
+
+ /* Change the reloc to be at the stub, and to a full
+ R_MMIX_JMP reloc. */
+ rel->r_info = ELF64_R_INFO (r_symndx, R_MMIX_JMP);
+ rel->r_offset
+ = (raw_size
+ + mmix_elf_section_data (input_section)
+ ->pjs.stub_offset);
+
+ mmix_elf_section_data (input_section)->pjs.stub_offset
+ += MAX_PUSHJ_STUB_SIZE;
+
+ /* Shift this reloc to the end of the relocs to maintain
+ the r_offset sorted reloc order. */
+ relcpy = *rel;
+ memmove (rel, rel + 1, (char *) relend - (char *) rel);
+ relend[-1] = relcpy;
+
+ /* Back up one reloc, or else we'd skip the next reloc
+ in turn. */
+ rel--;
+ }
+
+ pjsno++;
+ }
continue;
}
@@ -1383,6 +1596,7 @@ mmix_final_link_relocate (howto, input_section, contents,
switch (howto->type)
{
/* All these are PC-relative. */
+ case R_MMIX_PUSHJ_STUBBABLE:
case R_MMIX_PUSHJ:
case R_MMIX_CBRANCH:
case R_MMIX_ADDR19:
@@ -1669,9 +1883,6 @@ mmix_elf_check_common_relocs (abfd, info, sec, relocs)
const Elf_Internal_Rela *rel;
const Elf_Internal_Rela *rel_end;
- if (info->relocatable)
- return TRUE;
-
/* We currently have to abuse this COFF-specific member, since there's
no target-machine-dedicated member. There's no alternative outside
the bfd_link_info struct; we can't specialize a hash-table since
@@ -1689,6 +1900,10 @@ mmix_elf_check_common_relocs (abfd, info, sec, relocs)
the ELF dynobj for this, since the ELF bits assume lots of
DSO-related stuff if that member is non-NULL. */
case R_MMIX_BASE_PLUS_OFFSET:
+ /* We don't do anything with this reloc for a relocatable link. */
+ if (info->relocatable)
+ break;
+
if (bpo_greg_owner == NULL)
{
bpo_greg_owner = abfd;
@@ -1756,13 +1971,34 @@ mmix_elf_check_common_relocs (abfd, info, sec, relocs)
gregdata->n_max_bpo_relocs++;
/* We don't get another chance to set this before GC; we've not
- set up set up any hook that runs before GC. */
+ set up any hook that runs before GC. */
gregdata->n_bpo_relocs
= gregdata->n_max_bpo_relocs;
break;
+
+ case R_MMIX_PUSHJ_STUBBABLE:
+ mmix_elf_section_data (sec)->pjs.n_pushj_relocs++;
+ break;
}
}
+ /* Allocate per-reloc stub storage and initialize it to the max stub
+ size. */
+ if (mmix_elf_section_data (sec)->pjs.n_pushj_relocs != 0)
+ {
+ size_t i;
+
+ mmix_elf_section_data (sec)->pjs.stub_size
+ = bfd_alloc (abfd, mmix_elf_section_data (sec)->pjs.n_pushj_relocs
+ * sizeof (mmix_elf_section_data (sec)
+ ->pjs.stub_size[0]));
+ if (mmix_elf_section_data (sec)->pjs.stub_size == NULL)
+ return FALSE;
+
+ for (i = 0; i < mmix_elf_section_data (sec)->pjs.n_pushj_relocs; i++)
+ mmix_elf_section_data (sec)->pjs.stub_size[i] = MAX_PUSHJ_STUB_SIZE;
+ }
+
return TRUE;
}
@@ -1780,9 +2016,6 @@ mmix_elf_check_relocs (abfd, info, sec, relocs)
const Elf_Internal_Rela *rel;
const Elf_Internal_Rela *rel_end;
- if (info->relocatable)
- return TRUE;
-
symtab_hdr = &elf_tdata (abfd)->symtab_hdr;
sym_hashes = elf_sym_hashes (abfd);
sym_hashes_end = sym_hashes + symtab_hdr->sh_size/sizeof(Elf64_External_Sym);
@@ -1798,6 +2031,9 @@ mmix_elf_check_relocs (abfd, info, sec, relocs)
if (!mmix_elf_check_common_relocs (abfd, info, sec, relocs))
return FALSE;
+ if (info->relocatable)
+ return TRUE;
+
rel_end = relocs + sec->reloc_count;
for (rel = relocs; rel < rel_end; rel++)
{
@@ -1904,8 +2140,7 @@ static asection mmix_elf_reg_section;
static asymbol mmix_elf_reg_section_symbol;
static asymbol *mmix_elf_reg_section_symbol_ptr;
-/* Handle the special MIPS section numbers that a symbol may use.
- This is used for both the 32-bit and the 64-bit ABI. */
+/* Handle the special section numbers that a symbol may use. */
void
mmix_elf_symbol_processing (abfd, asym)
@@ -2082,11 +2317,49 @@ mmix_elf_final_link (abfd, info)
return TRUE;
}
+/* We need to include the maximum size of PUSHJ-stubs in the initial
+ section size. This is expected to shrink during linker relaxation.
+
+ You might think that we should set *only* _cooked_size, but that won't
+ work: section contents allocation will be using _raw_size in mixed
+ format linking and not enough storage will be allocated. FIXME: That's
+ a major bug, including the name bfd_get_section_size_before_reloc; it
+ should be bfd_get_section_size_before_relax. The relaxation functions
+ set _cooked size. Relaxation happens before relocation. All functions
+ *after relaxation* should be using _cooked size. */
+
+static void
+mmix_set_relaxable_size (abfd, sec, ptr)
+ bfd *abfd ATTRIBUTE_UNUSED;
+ asection *sec;
+ void *ptr;
+{
+ struct bfd_link_info *info = ptr;
+
+ /* Make sure we only do this for section where we know we want this,
+ otherwise we might end up resetting the size of COMMONs. */
+ if (mmix_elf_section_data (sec)->pjs.n_pushj_relocs == 0)
+ return;
+
+ sec->_cooked_size
+ = (sec->_raw_size
+ + mmix_elf_section_data (sec)->pjs.n_pushj_relocs
+ * MAX_PUSHJ_STUB_SIZE);
+ sec->_raw_size = sec->_cooked_size;
+
+ /* For use in relocatable link, we start with a max stubs size. See
+ mmix_elf_relax_section. */
+ if (info->relocatable && sec->output_section)
+ mmix_elf_section_data (sec->output_section)->pjs.stubs_size_sum
+ += (mmix_elf_section_data (sec)->pjs.n_pushj_relocs
+ * MAX_PUSHJ_STUB_SIZE);
+}
+
/* Initialize stuff for the linker-generated GREGs to match
R_MMIX_BASE_PLUS_OFFSET relocs seen by the linker. */
bfd_boolean
-_bfd_mmix_prepare_linker_allocated_gregs (abfd, info)
+_bfd_mmix_before_linker_allocation (abfd, info)
bfd *abfd ATTRIBUTE_UNUSED;
struct bfd_link_info *info;
{
@@ -2097,6 +2370,11 @@ _bfd_mmix_prepare_linker_allocated_gregs (abfd, info)
bfd_vma gregs_size;
size_t i;
size_t *bpo_reloc_indexes;
+ bfd *ibfd;
+
+ /* Set the initial size of sections. */
+ for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
+ bfd_map_over_sections (ibfd, mmix_set_relaxable_size, info);
/* The bpo_greg_owner bfd is supposed to have been set by
mmix_elf_check_relocs when the first R_MMIX_BASE_PLUS_OFFSET is seen.
@@ -2160,7 +2438,7 @@ _bfd_mmix_prepare_linker_allocated_gregs (abfd, info)
calculated at this point; we just move the contents into place here. */
bfd_boolean
-_bfd_mmix_finalize_linker_allocated_gregs (abfd, link_info)
+_bfd_mmix_after_linker_allocation (abfd, link_info)
bfd *abfd ATTRIBUTE_UNUSED;
struct bfd_link_info *link_info;
{
@@ -2324,6 +2602,8 @@ mmix_dump_bpo_gregs (link_info, pf)
from the first allocated register number) and offsets for use in real
relocation.
+ PUSHJ stub accounting is also done here.
+
Symbol- and reloc-reading infrastructure copied from elf-m10200.c. */
static bfd_boolean
@@ -2340,37 +2620,51 @@ mmix_elf_relax_section (abfd, sec, link_info, again)
struct bpo_greg_section_info *gregdata;
struct bpo_reloc_section_info *bpodata
= mmix_elf_section_data (sec)->bpo.reloc;
- size_t bpono;
+ /* The initialization is to quiet compiler warnings. The value is to
+ spot a missing actual initialization. */
+ size_t bpono = (size_t) -1;
+ size_t pjsno = 0;
bfd *bpo_greg_owner;
Elf_Internal_Sym *isymbuf = NULL;
+ bfd_size_type raw_size
+ = (sec->_raw_size
+ - mmix_elf_section_data (sec)->pjs.n_pushj_relocs
+ * MAX_PUSHJ_STUB_SIZE);
+
+ mmix_elf_section_data (sec)->pjs.stubs_size_sum = 0;
/* Assume nothing changes. */
*again = FALSE;
/* If this is the first time we have been called for this section,
initialize the cooked size. */
- if (sec->_cooked_size == 0)
- sec->_cooked_size = sec->_raw_size;
-
- /* We don't have to do anything for a relocatable link, if
- this section does not have relocs, or if this is not a
- code section. */
- if (link_info->relocatable
- || (sec->flags & SEC_RELOC) == 0
+ if (sec->_cooked_size == 0 && sec->_raw_size != 0)
+ abort ();
+
+ /* We don't have to do anything if this section does not have relocs, or
+ if this is not a code section. */
+ if ((sec->flags & SEC_RELOC) == 0
|| sec->reloc_count == 0
|| (sec->flags & SEC_CODE) == 0
|| (sec->flags & SEC_LINKER_CREATED) != 0
- /* If no R_MMIX_BASE_PLUS_OFFSET relocs, then nothing to do. */
- || bpodata == NULL)
+ /* If no R_MMIX_BASE_PLUS_OFFSET relocs and no PUSHJ-stub relocs,
+ then nothing to do. */
+ || (bpodata == NULL
+ && mmix_elf_section_data (sec)->pjs.n_pushj_relocs == 0))
return TRUE;
symtab_hdr = &elf_tdata (abfd)->symtab_hdr;
bpo_greg_owner = (bfd *) link_info->base_file;
- bpo_gregs_section = bpodata->bpo_greg_section;
- gregdata = mmix_elf_section_data (bpo_gregs_section)->bpo.greg;
- bpono = bpodata->first_base_plus_offset_reloc;
+ if (bpodata != NULL)
+ {
+ bpo_gregs_section = bpodata->bpo_greg_section;
+ gregdata = mmix_elf_section_data (bpo_gregs_section)->bpo.greg;
+ bpono = bpodata->first_base_plus_offset_reloc;
+ }
+ else
+ gregdata = NULL;
/* Get a copy of the native relocations. */
internal_relocs
@@ -2385,10 +2679,61 @@ mmix_elf_relax_section (abfd, sec, link_info, again)
for (irel = internal_relocs; irel < irelend; irel++)
{
bfd_vma symval;
+ struct elf_link_hash_entry *h = NULL;
- if (ELF64_R_TYPE (irel->r_info) != (int) R_MMIX_BASE_PLUS_OFFSET)
+ /* We only process two relocs. */
+ if (ELF64_R_TYPE (irel->r_info) != (int) R_MMIX_BASE_PLUS_OFFSET
+ && ELF64_R_TYPE (irel->r_info) != (int) R_MMIX_PUSHJ_STUBBABLE)
continue;
+ /* We process relocs in a distinctly different way when this is a
+ relocatable link (for one, we don't look at symbols), so we avoid
+ mixing its code with that for the "normal" relaxation. */
+ if (link_info->relocatable)
+ {
+ /* The only transformation in a relocatable link is to generate
+ a full stub at the location of the stub calculated for the
+ input section, if the relocated stub location, the end of the
+ output section plus earlier stubs, cannot be reached. Thus
+ relocatable linking can only lead to worse code, but it still
+ works. */
+ if (ELF64_R_TYPE (irel->r_info) == R_MMIX_PUSHJ_STUBBABLE)
+ {
+ /* If we can reach the end of the output-section and beyond
+ any current stubs, then we don't need a stub for this
+ reloc. The relaxed order of output stub allocation may
+ not exactly match the straightforward order, so we always
+ assume presence of output stubs, which will allow
+ relaxation only on relocations indifferent to the
+ presence of output stub allocations for other relocations
+ and thus the order of output stub allocation. */
+ if (bfd_check_overflow (complain_overflow_signed,
+ 19,
+ 0,
+ bfd_arch_bits_per_address (abfd),
+ /* Output-stub location. */
+ sec->output_section->_cooked_size
+ + (mmix_elf_section_data (sec
+ ->output_section)
+ ->pjs.stubs_size_sum)
+ /* Location of this PUSHJ reloc. */
+ - (sec->output_offset + irel->r_offset)
+ /* Don't count *this* stub twice. */
+ - (mmix_elf_section_data (sec)
+ ->pjs.stub_size[pjsno]
+ + MAX_PUSHJ_STUB_SIZE))
+ == bfd_reloc_ok)
+ mmix_elf_section_data (sec)->pjs.stub_size[pjsno] = 0;
+
+ mmix_elf_section_data (sec)->pjs.stubs_size_sum
+ += mmix_elf_section_data (sec)->pjs.stub_size[pjsno];
+
+ pjsno++;
+ }
+
+ continue;
+ }
+
/* Get the value of the symbol referred to by the reloc. */
if (ELF64_R_SYM (irel->r_info) < symtab_hdr->sh_info)
{
@@ -2424,7 +2769,6 @@ mmix_elf_relax_section (abfd, sec, link_info, again)
else
{
unsigned long indx;
- struct elf_link_hash_entry *h;
/* An external symbol. */
indx = ELF64_R_SYM (irel->r_info) - symtab_hdr->sh_info;
@@ -2433,12 +2777,16 @@ mmix_elf_relax_section (abfd, sec, link_info, again)
if (h->root.type != bfd_link_hash_defined
&& h->root.type != bfd_link_hash_defweak)
{
- /* This appears to be a reference to an undefined symbol.
- Just ignore it--it will be caught by the regular reloc
- processing. We need to keep BPO reloc accounting
- consistent, though. */
- gregdata->n_remaining_bpo_relocs_this_relaxation_round--;
- bpono++;
+ /* This appears to be a reference to an undefined symbol. Just
+ ignore it--it will be caught by the regular reloc processing.
+ We need to keep BPO reloc accounting consistent, though
+ else we'll abort instead of emitting an error message. */
+ if (ELF64_R_TYPE (irel->r_info) == R_MMIX_BASE_PLUS_OFFSET
+ && gregdata != NULL)
+ {
+ gregdata->n_remaining_bpo_relocs_this_relaxation_round--;
+ bpono++;
+ }
continue;
}
@@ -2447,6 +2795,62 @@ mmix_elf_relax_section (abfd, sec, link_info, again)
+ h->root.u.def.section->output_offset);
}
+ if (ELF64_R_TYPE (irel->r_info) == (int) R_MMIX_PUSHJ_STUBBABLE)
+ {
+ bfd_vma value = symval + irel->r_addend;
+ bfd_vma dot
+ = (sec->output_section->vma
+ + sec->output_offset
+ + irel->r_offset);
+ bfd_vma stubaddr
+ = (sec->output_section->vma
+ + sec->output_offset
+ + raw_size
+ + mmix_elf_section_data (sec)->pjs.stubs_size_sum);
+
+ if ((value & 3) == 0
+ && bfd_check_overflow (complain_overflow_signed,
+ 19,
+ 0,
+ bfd_arch_bits_per_address (abfd),
+ value - dot
+ - (value > dot
+ ? mmix_elf_section_data (sec)
+ ->pjs.stub_size[pjsno]
+ : 0))
+ == bfd_reloc_ok)
+ /* If the reloc fits, no stub is needed. */
+ mmix_elf_section_data (sec)->pjs.stub_size[pjsno] = 0;
+ else
+ /* Maybe we can get away with just a JMP insn? */
+ if ((value & 3) == 0
+ && bfd_check_overflow (complain_overflow_signed,
+ 27,
+ 0,
+ bfd_arch_bits_per_address (abfd),
+ value - stubaddr
+ - (value > dot
+ ? mmix_elf_section_data (sec)
+ ->pjs.stub_size[pjsno] - 4
+ : 0))
+ == bfd_reloc_ok)
+ /* Yep, account for a stub consisting of a single JMP insn. */
+ mmix_elf_section_data (sec)->pjs.stub_size[pjsno] = 4;
+ else
+ /* Nope, go for the full insn stub. It doesn't seem useful to
+ emit the intermediate sizes; those will only be useful for
+ a >64M program assuming contiguous code. */
+ mmix_elf_section_data (sec)->pjs.stub_size[pjsno]
+ = MAX_PUSHJ_STUB_SIZE;
+
+ mmix_elf_section_data (sec)->pjs.stubs_size_sum
+ += mmix_elf_section_data (sec)->pjs.stub_size[pjsno];
+ pjsno++;
+ continue;
+ }
+
+ /* We're looking at a R_MMIX_BASE_PLUS_OFFSET reloc. */
+
gregdata->reloc_request[gregdata->bpo_reloc_indexes[bpono]].value
= symval + irel->r_addend;
gregdata->reloc_request[gregdata->bpo_reloc_indexes[bpono++]].valid = TRUE;
@@ -2457,7 +2861,8 @@ mmix_elf_relax_section (abfd, sec, link_info, again)
calculate how many registers we need to cover them. Set the size of
the linker gregs, and if the number of registers changed, indicate
that we need to relax some more because we have more work to do. */
- if (gregdata->n_remaining_bpo_relocs_this_relaxation_round == 0)
+ if (gregdata != NULL
+ && gregdata->n_remaining_bpo_relocs_this_relaxation_round == 0)
{
size_t i;
bfd_vma prev_base;
@@ -2529,6 +2934,18 @@ mmix_elf_relax_section (abfd, sec, link_info, again)
&& elf_section_data (sec)->relocs != internal_relocs)
free (internal_relocs);
+ if (sec->_cooked_size
+ < raw_size + mmix_elf_section_data (sec)->pjs.stubs_size_sum)
+ abort ();
+
+ if (sec->_cooked_size
+ > raw_size + mmix_elf_section_data (sec)->pjs.stubs_size_sum)
+ {
+ sec->_cooked_size
+ = raw_size + mmix_elf_section_data (sec)->pjs.stubs_size_sum;
+ *again = TRUE;
+ }
+
return TRUE;
error_return:
@@ -2539,6 +2956,53 @@ mmix_elf_relax_section (abfd, sec, link_info, again)
free (internal_relocs);
return FALSE;
}
+
+/* Because we set _raw_size to include the max size of pushj stubs,
+ i.e. larger than the actual section input size (see
+ mmix_set_relaxable_raw_size), we have to take care of that when reading
+ the section. */
+
+static bfd_boolean
+mmix_elf_get_section_contents (abfd, section, location, offset, count)
+ bfd *abfd;
+ sec_ptr section;
+ void *location;
+ file_ptr offset;
+ bfd_size_type count;
+{
+ bfd_size_type raw_size
+ = (section->_raw_size
+ - mmix_elf_section_data (section)->pjs.n_pushj_relocs
+ * MAX_PUSHJ_STUB_SIZE);
+
+ if (offset + count > section->_raw_size)
+ {
+ abort();
+ bfd_set_error (bfd_error_invalid_operation);
+ return FALSE;
+ }
+
+ /* Check bounds against the faked raw_size. */
+ if (offset + count > raw_size)
+ {
+ /* Clear the part in the faked area. */
+ memset (location + raw_size - offset, 0, count - (raw_size - offset));
+
+ /* If there's no initial part within the "real" contents, we're
+ done. */
+ if ((bfd_size_type) offset >= raw_size)
+ return TRUE;
+
+ /* Else adjust the count and fall through to call the generic
+ function. */
+ count = raw_size - offset;
+ }
+
+ return
+ _bfd_generic_get_section_contents (abfd, section, location, offset,
+ count);
+}
+
#define ELF_ARCH bfd_arch_mmix
#define ELF_MACHINE_CODE EM_MMIX
@@ -2587,5 +3051,6 @@ mmix_elf_relax_section (abfd, sec, link_info, again)
#define bfd_elf64_new_section_hook mmix_elf_new_section_hook
#define bfd_elf64_bfd_final_link mmix_elf_final_link
#define bfd_elf64_bfd_relax_section mmix_elf_relax_section
+#define bfd_elf64_get_section_contents mmix_elf_get_section_contents
#include "elf64-target.h"