ARM: mm: allow non-text sections to be non-executable

Adds CONFIG_ARM_KERNMEM_PERMS to separate the kernel memory regions
into section-sized areas that can have different permisions. Performs
the NX permission changes during free_initmem, so that init memory can be
reclaimed.

This uses section size instead of PMD size to reduce memory lost to
padding on non-LPAE systems.

Based on work by Brad Spengler, Larry Bassel, and Laura Abbott.

Signed-off-by: Kees Cook <keescook@chromium.org>
Tested-by: Laura Abbott <lauraa@codeaurora.org>
Acked-by: Nicolas Pitre <nico@linaro.org>
This commit is contained in:
Kees Cook 2014-04-03 17:28:11 -07:00
parent 23a4e4050b
commit 1e6b48116a
4 changed files with 133 additions and 2 deletions

View File

@ -8,6 +8,9 @@
#include <asm/thread_info.h>
#include <asm/memory.h>
#include <asm/page.h>
#ifdef CONFIG_ARM_KERNMEM_PERMS
#include <asm/pgtable.h>
#endif
#define PROC_INFO \
. = ALIGN(4); \
@ -90,6 +93,11 @@ SECTIONS
_text = .;
HEAD_TEXT
}
#ifdef CONFIG_ARM_KERNMEM_PERMS
. = ALIGN(1<<SECTION_SHIFT);
#endif
.text : { /* Real text segment */
_stext = .; /* Text and read-only data */
__exception_text_start = .;
@ -145,7 +153,11 @@ SECTIONS
_etext = .; /* End of text and rodata section */
#ifndef CONFIG_XIP_KERNEL
# ifdef CONFIG_ARM_KERNMEM_PERMS
. = ALIGN(1<<SECTION_SHIFT);
# else
. = ALIGN(PAGE_SIZE);
# endif
__init_begin = .;
#endif
/*
@ -220,7 +232,11 @@ SECTIONS
. = PAGE_OFFSET + TEXT_OFFSET;
#else
__init_end = .;
#ifdef CONFIG_ARM_KERNMEM_PERMS
. = ALIGN(1<<SECTION_SHIFT);
#else
. = ALIGN(THREAD_SIZE);
#endif
__data_loc = .;
#endif

View File

@ -1008,3 +1008,12 @@ config ARCH_SUPPORTS_BIG_ENDIAN
help
This option specifies the architecture can support big endian
operation.
config ARM_KERNMEM_PERMS
bool "Restrict kernel memory permissions"
help
If this is set, kernel memory other than kernel text (and rodata)
will be made non-executable. The tradeoff is that each region is
padded to section-size (1MiB) boundaries (because their permissions
are different and splitting the 1M pages into 4K ones causes TLB
performance problems), wasting memory.

View File

@ -29,6 +29,7 @@
#include <asm/prom.h>
#include <asm/sections.h>
#include <asm/setup.h>
#include <asm/system_info.h>
#include <asm/tlb.h>
#include <asm/fixmap.h>
@ -615,7 +616,99 @@ void __init mem_init(void)
}
}
void free_initmem(void)
#ifdef CONFIG_ARM_KERNMEM_PERMS
struct section_perm {
unsigned long start;
unsigned long end;
pmdval_t mask;
pmdval_t prot;
};
struct section_perm nx_perms[] = {
/* Make pages tables, etc before _stext RW (set NX). */
{
.start = PAGE_OFFSET,
.end = (unsigned long)_stext,
.mask = ~PMD_SECT_XN,
.prot = PMD_SECT_XN,
},
/* Make init RW (set NX). */
{
.start = (unsigned long)__init_begin,
.end = (unsigned long)_sdata,
.mask = ~PMD_SECT_XN,
.prot = PMD_SECT_XN,
},
};
/*
* Updates section permissions only for the current mm (sections are
* copied into each mm). During startup, this is the init_mm. Is only
* safe to be called with preemption disabled, as under stop_machine().
*/
static inline void section_update(unsigned long addr, pmdval_t mask,
pmdval_t prot)
{
struct mm_struct *mm;
pmd_t *pmd;
mm = current->active_mm;
pmd = pmd_offset(pud_offset(pgd_offset(mm, addr), addr), addr);
#ifdef CONFIG_ARM_LPAE
pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot);
#else
if (addr & SECTION_SIZE)
pmd[1] = __pmd((pmd_val(pmd[1]) & mask) | prot);
else
pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot);
#endif
flush_pmd_entry(pmd);
local_flush_tlb_kernel_range(addr, addr + SECTION_SIZE);
}
/* Make sure extended page tables are in use. */
static inline bool arch_has_strict_perms(void)
{
if (cpu_architecture() < CPU_ARCH_ARMv6)
return false;
return !!(get_cr() & CR_XP);
}
#define set_section_perms(perms, field) { \
size_t i; \
unsigned long addr; \
\
if (!arch_has_strict_perms()) \
return; \
\
for (i = 0; i < ARRAY_SIZE(perms); i++) { \
if (!IS_ALIGNED(perms[i].start, SECTION_SIZE) || \
!IS_ALIGNED(perms[i].end, SECTION_SIZE)) { \
pr_err("BUG: section %lx-%lx not aligned to %lx\n", \
perms[i].start, perms[i].end, \
SECTION_SIZE); \
continue; \
} \
\
for (addr = perms[i].start; \
addr < perms[i].end; \
addr += SECTION_SIZE) \
section_update(addr, perms[i].mask, \
perms[i].field); \
} \
}
static inline void fix_kernmem_perms(void)
{
set_section_perms(nx_perms, prot);
}
#else
static inline void fix_kernmem_perms(void) { }
#endif /* CONFIG_ARM_KERNMEM_PERMS */
void free_tcmmem(void)
{
#ifdef CONFIG_HAVE_TCM
extern char __tcm_start, __tcm_end;
@ -623,6 +716,12 @@ void free_initmem(void)
poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start);
free_reserved_area(&__tcm_start, &__tcm_end, -1, "TCM link");
#endif
}
void free_initmem(void)
{
fix_kernmem_perms();
free_tcmmem();
poison_init_mem(__init_begin, __init_end - __init_begin);
if (!machine_is_integrator() && !machine_is_cintegrator())

View File

@ -1373,12 +1373,19 @@ static void __init map_lowmem(void)
if (start >= end)
break;
if (end < kernel_x_start || start >= kernel_x_end) {
if (end < kernel_x_start) {
map.pfn = __phys_to_pfn(start);
map.virtual = __phys_to_virt(start);
map.length = end - start;
map.type = MT_MEMORY_RWX;
create_mapping(&map);
} else if (start >= kernel_x_end) {
map.pfn = __phys_to_pfn(start);
map.virtual = __phys_to_virt(start);
map.length = end - start;
map.type = MT_MEMORY_RW;
create_mapping(&map);
} else {
/* This better cover the entire kernel */