* elf64-ppc.c (struct ppc_link_hash_table): Reinstate top_index.

Restore previous input_list type.
	(ppc64_elf_link_hash_table_create): Undo last change.
	(ppc64_elf_setup_section_lists): Reinstate code setting up input lists
	per output section, but don't bother with bfd_abs_section marker.
	(ppc64_elf_next_input_section): Adjust for multiple input section
	lists.
	(group_sections): Likewise.
This commit is contained in:
Alan Modra 2003-06-17 13:43:02 +00:00
parent 23fbd6fa70
commit 734b6cf9ae
2 changed files with 98 additions and 63 deletions

View File

@ -1,3 +1,14 @@
2003-06-17 Alan Modra <amodra@bigpond.net.au>
* elf64-ppc.c (struct ppc_link_hash_table): Reinstate top_index.
Restore previous input_list type.
(ppc64_elf_link_hash_table_create): Undo last change.
(ppc64_elf_setup_section_lists): Reinstate code setting up input lists
per output section, but don't bother with bfd_abs_section marker.
(ppc64_elf_next_input_section): Adjust for multiple input section
lists.
(group_sections): Likewise.
2003-06-17 Jakub Jelinek <jakub@redhat.com>
* elf64-ppc.c (ppc64_elf_relocation_section): Ensure

View File

@ -2773,8 +2773,11 @@ struct ppc_link_hash_table
/* Temp used when calculating TOC pointers. */
bfd_vma toc_curr;
/* List of input code sections used by ppc64_elf_size_stubs. */
asection *input_list;
/* Highest output section index. */
int top_index;
/* List of input sections for each output section. */
asection **input_list;
/* Short-cuts to get to dynamic linker sections. */
asection *sgot;
@ -3053,7 +3056,6 @@ ppc64_elf_link_hash_table_create (abfd)
htab->no_multi_toc = 0;
htab->multi_toc_needed = 0;
htab->toc_curr = 0;
htab->input_list = NULL;
htab->sgot = NULL;
htab->srelgot = NULL;
htab->splt = NULL;
@ -6500,8 +6502,9 @@ ppc64_elf_setup_section_lists (output_bfd, info)
struct bfd_link_info *info;
{
bfd *input_bfd;
int top_id, id;
int top_id, top_index, id;
asection *section;
asection **input_list;
bfd_size_type amt;
struct ppc_link_hash_table *htab = ppc_hash_table (info);
@ -6532,6 +6535,25 @@ ppc64_elf_setup_section_lists (output_bfd, info)
htab->stub_group[id].toc_off = TOC_BASE_OFF;
elf_gp (output_bfd) = htab->toc_curr = ppc64_elf_toc (output_bfd);
/* We can't use output_bfd->section_count here to find the top output
section index as some sections may have been removed, and
_bfd_strip_section_from_output doesn't renumber the indices. */
for (section = output_bfd->sections, top_index = 0;
section != NULL;
section = section->next)
{
if (top_index < section->index)
top_index = section->index;
}
htab->top_index = top_index;
amt = sizeof (asection *) * (top_index + 1);
input_list = (asection **) bfd_zmalloc (amt);
htab->input_list = input_list;
if (input_list == NULL)
return -1;
return 1;
}
@ -6588,14 +6610,16 @@ ppc64_elf_next_input_section (info, isec)
{
struct ppc_link_hash_table *htab = ppc_hash_table (info);
if ((isec->output_section->flags & SEC_CODE) != 0)
if ((isec->output_section->flags & SEC_CODE) != 0
&& isec->output_section->index <= htab->top_index)
{
asection **list = htab->input_list + isec->output_section->index;
/* Steal the link_sec pointer for our list. */
#define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
/* This happens to make the list in reverse order,
which is what we want. */
PREV_SEC (isec) = htab->input_list;
htab->input_list = isec;
PREV_SEC (isec) = *list;
*list = isec;
}
/* If a code section has a function that uses the TOC then we need
@ -6624,73 +6648,73 @@ group_sections (htab, stub_group_size, stubs_always_before_branch)
bfd_size_type stub_group_size;
bfd_boolean stubs_always_before_branch;
{
asection *tail = htab->input_list;
while (tail != NULL)
asection **list = htab->input_list + htab->top_index;
do
{
asection *curr;
asection *prev;
bfd_size_type total;
bfd_boolean big_sec;
bfd_vma curr_toc;
curr = tail;
if (tail->_cooked_size)
total = tail->_cooked_size;
else
total = tail->_raw_size;
big_sec = total >= stub_group_size;
curr_toc = htab->stub_group[tail->id].toc_off;
while ((prev = PREV_SEC (curr)) != NULL
&& ((total += (curr->output_section->vma
+ curr->output_offset
- prev->output_section->vma
- prev->output_offset))
< stub_group_size)
&& htab->stub_group[prev->id].toc_off == curr_toc)
curr = prev;
/* OK, the size from the start of CURR to the end is less
than stub_group_size and thus can be handled by one stub
section. (or the tail section is itself larger than
stub_group_size, in which case we may be toast.) We
should really be keeping track of the total size of stubs
added here, as stubs contribute to the final output
section size. That's a little tricky, and this way will
only break if stubs added make the total size more than
2^25, ie. for the default stub_group_size, if stubs total
more than 2097152 bytes, or nearly 75000 plt call stubs. */
do
asection *tail = *list;
while (tail != NULL)
{
prev = PREV_SEC (tail);
/* Set up this stub group. */
htab->stub_group[tail->id].link_sec = curr;
}
while (tail != curr && (tail = prev) != NULL);
asection *curr;
asection *prev;
bfd_size_type total;
bfd_boolean big_sec;
bfd_vma curr_toc;
/* But wait, there's more! Input sections up to stub_group_size
bytes before the stub section can be handled by it too.
Don't do this if we have a really large section after the
stubs, as adding more stubs increases the chance that
branches may not reach into the stub section. */
if (!stubs_always_before_branch && !big_sec)
{
total = 0;
while (prev != NULL
&& ((total += (tail->output_section->vma
+ tail->output_offset
- prev->output_section->vma
- prev->output_offset))
curr = tail;
if (tail->_cooked_size)
total = tail->_cooked_size;
else
total = tail->_raw_size;
big_sec = total >= stub_group_size;
curr_toc = htab->stub_group[tail->id].toc_off;
while ((prev = PREV_SEC (curr)) != NULL
&& ((total += curr->output_offset - prev->output_offset)
< stub_group_size)
&& htab->stub_group[prev->id].toc_off == curr_toc)
curr = prev;
/* OK, the size from the start of CURR to the end is less
than stub_group_size and thus can be handled by one stub
section. (or the tail section is itself larger than
stub_group_size, in which case we may be toast.) We
should really be keeping track of the total size of stubs
added here, as stubs contribute to the final output
section size. That's a little tricky, and this way will
only break if stubs added make the total size more than
2^25, ie. for the default stub_group_size, if stubs total
more than 2097152 bytes, or nearly 75000 plt call stubs. */
do
{
tail = prev;
prev = PREV_SEC (tail);
/* Set up this stub group. */
htab->stub_group[tail->id].link_sec = curr;
}
while (tail != curr && (tail = prev) != NULL);
/* But wait, there's more! Input sections up to stub_group_size
bytes before the stub section can be handled by it too.
Don't do this if we have a really large section after the
stubs, as adding more stubs increases the chance that
branches may not reach into the stub section. */
if (!stubs_always_before_branch && !big_sec)
{
total = 0;
while (prev != NULL
&& ((total += tail->output_offset - prev->output_offset)
< stub_group_size)
&& htab->stub_group[prev->id].toc_off == curr_toc)
{
tail = prev;
prev = PREV_SEC (tail);
htab->stub_group[tail->id].link_sec = curr;
}
}
tail = prev;
}
tail = prev;
}
while (list-- != htab->input_list);
free (htab->input_list);
#undef PREV_SEC
}