From 0e5fabeb2c4b90857403995e14550210fe1cae71 Mon Sep 17 00:00:00 2001 From: Alan Modra Date: Wed, 22 Apr 2015 22:46:19 +0930 Subject: [PATCH] Rewrite relro adjusting code The linker tries to put the end of the last section in the relro segment exactly on a page boundary, because the relro segment itself must end on a page boundary. If for any reason this can't be done, padding is inserted. Since the end of the relro segment is typically between .got and .got.plt, padding effectively increases the size of the GOT. This isn't nice for targets and code models with limited GOT addressing. The problem with the current code is that it doesn't cope very well with aligned sections in the relro segment. When making .got aligned to a 256 byte boundary for PowerPC64, I found that often the initial alignment attempt failed and the fallback attempt to be less than adequate. This is a particular problem for PowerPC64 since the distance between .got and .plt affects the size of plt call stubs, leading to "stubs don't match calculated size" errors. So this rewrite takes a direct approach to calculating a new relro base. Starting from the last section in the segment, we calculate where it must start to position its end on the boundary, or as near as possible considering alignment requirements. The new start then becomes the goal for the previous section to end, and so on for all sections. This of course ignores the possibility that user scripts will place . = ALIGN(xxx); in the relro segment, or provide section address expressions. In those cases we might fail, but the old code probably did too, and a fallback is provided. ld/ * ldexp.h (struct ldexp_control): Delete dataseg.min_base. Add data_seg.relro_offset. * ldexp.c (fold_binary ): Don't set min_base. (fold_binary ): Do set relro_offset. * ldlang.c (lang_size_sections): Rewrite code adjusting relro segment base to line up last section on page boundary. ld/testsuite/ * ld-x86-64/pr18176.d: Update. --- ld/ChangeLog | 9 ++++ ld/ldexp.c | 2 +- ld/ldexp.h | 2 +- ld/ldlang.c | 73 +++++++++++++++----------------- ld/testsuite/ChangeLog | 4 ++ ld/testsuite/ld-x86-64/pr18176.d | 2 +- 6 files changed, 50 insertions(+), 42 deletions(-) diff --git a/ld/ChangeLog b/ld/ChangeLog index b9b75fb03d..104963db2d 100644 --- a/ld/ChangeLog +++ b/ld/ChangeLog @@ -1,3 +1,12 @@ +2015-04-22 Alan Modra + + * ldexp.h (struct ldexp_control): Delete dataseg.min_base. Add + data_seg.relro_offset. + * ldexp.c (fold_binary ): Don't set min_base. + (fold_binary ): Do set relro_offset. + * ldlang.c (lang_size_sections): Rewrite code adjusting relro + segment base to line up last section on page boundary. + 2015-04-15 H.J. Lu * NEWS: Mention diff --git a/ld/ldexp.c b/ld/ldexp.c index 9cd9e29d81..a5192b1eaa 100644 --- a/ld/ldexp.c +++ b/ld/ldexp.c @@ -575,7 +575,6 @@ fold_binary (etree_type *tree) else if (expld.dataseg.phase == exp_dataseg_none) { expld.dataseg.phase = exp_dataseg_align_seen; - expld.dataseg.min_base = expld.dot; expld.dataseg.base = expld.result.value; expld.dataseg.pagesize = commonpage; expld.dataseg.maxpagesize = maxpage; @@ -591,6 +590,7 @@ fold_binary (etree_type *tree) /* Operands swapped! DATA_SEGMENT_RELRO_END(offset,exp) has offset in expld.result and exp in lhs. */ expld.dataseg.relro = exp_dataseg_relro_end; + expld.dataseg.relro_offset = expld.result.value; if (expld.phase == lang_first_phase_enum || expld.section != bfd_abs_section_ptr) expld.result.valid_p = FALSE; diff --git a/ld/ldexp.h b/ld/ldexp.h index 10fcf3d96c..f61df6b459 100644 --- a/ld/ldexp.h +++ b/ld/ldexp.h @@ -156,7 +156,7 @@ struct ldexp_control { struct { enum phase_enum phase; - bfd_vma base, min_base, relro_end, end, pagesize, maxpagesize; + bfd_vma base, relro_offset, relro_end, end, pagesize, maxpagesize; enum relro_enum relro; diff --git a/ld/ldlang.c b/ld/ldlang.c index b074169104..c96c21fd4e 100644 --- a/ld/ldlang.c +++ b/ld/ldlang.c @@ -5382,56 +5382,51 @@ lang_size_sections (bfd_boolean *relax, bfd_boolean check_regions) if (expld.dataseg.phase == exp_dataseg_end_seen && link_info.relro && expld.dataseg.relro_end) { - bfd_vma initial_base, min_base, relro_end, maxpage; + bfd_vma initial_base, relro_end, desired_end; + asection *sec; - expld.dataseg.phase = exp_dataseg_relro_adjust; - maxpage = expld.dataseg.maxpagesize; - initial_base = expld.dataseg.base; - /* Try to put expld.dataseg.relro_end on a (common) page boundary. */ - expld.dataseg.base += (-expld.dataseg.relro_end - & (expld.dataseg.pagesize - 1)); /* Compute the expected PT_GNU_RELRO segment end. */ relro_end = ((expld.dataseg.relro_end + expld.dataseg.pagesize - 1) & ~(expld.dataseg.pagesize - 1)); - /* MIN_BASE is the absolute minimum address we are allowed to start the - read-write segment (byte before will be mapped read-only). */ - min_base = (expld.dataseg.min_base + maxpage - 1) & ~(maxpage - 1); - if (min_base + maxpage < expld.dataseg.base) - { - expld.dataseg.base -= maxpage; - relro_end -= maxpage; - } + + /* Adjust by the offset arg of DATA_SEGMENT_RELRO_END. */ + desired_end = relro_end - expld.dataseg.relro_offset; + + /* For sections in the relro segment.. */ + for (sec = link_info.output_bfd->section_last; sec; sec = sec->prev) + if (!IGNORE_SECTION (sec) + && sec->vma >= expld.dataseg.base + && sec->vma < expld.dataseg.relro_end - expld.dataseg.relro_offset) + { + /* Where do we want to put this section so that it ends as + desired? */ + bfd_vma start = sec->vma; + bfd_vma end = start + sec->size; + bfd_vma bump = desired_end - end; + /* We'd like to increase START by BUMP, but we must heed + alignment so the increase might be less than optimum. */ + start += bump & ~(((bfd_vma) 1 << sec->alignment_power) - 1); + /* This is now the desired end for the previous section. */ + desired_end = start; + } + + expld.dataseg.phase = exp_dataseg_relro_adjust; + ASSERT (desired_end >= expld.dataseg.base); + initial_base = expld.dataseg.base; + expld.dataseg.base = desired_end; lang_reset_memory_regions (); one_lang_size_sections_pass (relax, check_regions); + if (expld.dataseg.relro_end > relro_end) { - /* The alignment of sections between DATA_SEGMENT_ALIGN - and DATA_SEGMENT_RELRO_END can cause excessive padding to - be inserted at DATA_SEGMENT_RELRO_END. Try to start a - bit lower so that the section alignments will fit in. */ - asection *sec; - unsigned int max_alignment_power = 0; - - /* Find maximum alignment power of sections between - DATA_SEGMENT_ALIGN and DATA_SEGMENT_RELRO_END. */ - for (sec = link_info.output_bfd->sections; sec; sec = sec->next) - if (sec->vma >= expld.dataseg.base - && sec->vma < expld.dataseg.relro_end - && sec->alignment_power > max_alignment_power) - max_alignment_power = sec->alignment_power; - - /* Aligning the adjusted base guarantees the padding - between sections won't change. This is better than - simply subtracting 1 << max_alignment_power which is - what we used to do here. */ - expld.dataseg.base &= ~(((bfd_vma) 1 << max_alignment_power) - 1); - /* It doesn't make much sense to go lower than the initial - base. That can only increase padding. */ - if (expld.dataseg.base < initial_base) - expld.dataseg.base = initial_base; + /* Assignments to dot, or to output section address in a + user script have increased padding over the original. + Revert. */ + expld.dataseg.base = initial_base; lang_reset_memory_regions (); one_lang_size_sections_pass (relax, check_regions); } + link_info.relro_start = expld.dataseg.base; link_info.relro_end = expld.dataseg.relro_end; } diff --git a/ld/testsuite/ChangeLog b/ld/testsuite/ChangeLog index ea6a86326b..8c37da21f0 100644 --- a/ld/testsuite/ChangeLog +++ b/ld/testsuite/ChangeLog @@ -1,3 +1,7 @@ +2015-04-22 Alan Modra + + * ld-x86-64/pr18176.d: Update. + 2015-04-22 H.J. Lu PR ld/18289 diff --git a/ld/testsuite/ld-x86-64/pr18176.d b/ld/testsuite/ld-x86-64/pr18176.d index 3a0853937c..4e3ad9ff08 100644 --- a/ld/testsuite/ld-x86-64/pr18176.d +++ b/ld/testsuite/ld-x86-64/pr18176.d @@ -5,5 +5,5 @@ #target: x86_64-*-linux* #... - GNU_RELRO 0x04bd07 0x000000000024bd07 0x000000000024bd07 0x0022f9 0x0022f9 R 0x1 + GNU_RELRO 0x04bd17 0x000000000024bd17 0x000000000024bd17 0x0022e9 0x0022e9 R 0x1 #pass