Merge branch 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm

Pull ARM updates from Russell King:

 - add support for ftrace-with-registers, which is needed for kgraft and
   other ftrace tools

 - support for mremap() for the sigpage/vDSO so that checkpoint/restore
   can work

 - add timestamps to each line of the register dump output

 - remove the unused KTHREAD_SIZE from nommu

 - align the ARM bitops APIs with the generic API (using unsigned long
   pointers rather than void pointers)

 - make the configuration of userspace Thumb support an expert option so
   that we can default it on, and avoid some hard to debug userspace
   crashes

* 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm:
  ARM: 8684/1: NOMMU: Remove unused KTHREAD_SIZE definition
  ARM: 8683/1: ARM32: Support mremap() for sigpage/vDSO
  ARM: 8679/1: bitops: Align prototypes to generic API
  ARM: 8678/1: ftrace: Adds support for CONFIG_DYNAMIC_FTRACE_WITH_REGS
  ARM: make configuration of userspace Thumb support an expert option
  ARM: 8673/1: Fix __show_regs output timestamps
This commit is contained in:
Linus Torvalds 2017-07-08 12:17:25 -07:00
commit 09b56d5a41
11 changed files with 185 additions and 18 deletions

View File

@ -58,6 +58,7 @@ config ARM
select HAVE_DMA_API_DEBUG
select HAVE_DMA_CONTIGUOUS if MMU
select HAVE_DYNAMIC_FTRACE if (!XIP_KERNEL) && !CPU_ENDIAN_BE32 && MMU
select HAVE_DYNAMIC_FTRACE_WITH_REGS if HAVE_DYNAMIC_FTRACE
select HAVE_EFFICIENT_UNALIGNED_ACCESS if (CPU_V6 || CPU_V6K || CPU_V7) && MMU
select HAVE_EXIT_THREAD
select HAVE_FTRACE_MCOUNT_RECORD if (!XIP_KERNEL)

View File

@ -159,16 +159,16 @@ extern int _test_and_change_bit(int nr, volatile unsigned long * p);
/*
* Little endian assembly bitops. nr = 0 -> byte 0 bit 0.
*/
extern int _find_first_zero_bit_le(const void * p, unsigned size);
extern int _find_next_zero_bit_le(const void * p, int size, int offset);
extern int _find_first_zero_bit_le(const unsigned long *p, unsigned size);
extern int _find_next_zero_bit_le(const unsigned long *p, int size, int offset);
extern int _find_first_bit_le(const unsigned long *p, unsigned size);
extern int _find_next_bit_le(const unsigned long *p, int size, int offset);
/*
* Big endian assembly bitops. nr = 0 -> byte 3 bit 0.
*/
extern int _find_first_zero_bit_be(const void * p, unsigned size);
extern int _find_next_zero_bit_be(const void * p, int size, int offset);
extern int _find_first_zero_bit_be(const unsigned long *p, unsigned size);
extern int _find_next_zero_bit_be(const unsigned long *p, int size, int offset);
extern int _find_first_bit_be(const unsigned long *p, unsigned size);
extern int _find_next_bit_be(const unsigned long *p, int size, int offset);

View File

@ -1,6 +1,10 @@
#ifndef _ASM_ARM_FTRACE
#define _ASM_ARM_FTRACE
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
#define ARCH_SUPPORTS_FTRACE_OPS 1
#endif
#ifdef CONFIG_FUNCTION_TRACER
#define MCOUNT_ADDR ((unsigned long)(__gnu_mcount_nc))
#define MCOUNT_INSN_SIZE 4 /* sizeof mcount call */

View File

@ -11,12 +11,6 @@
#ifndef _ASMARM_PAGE_NOMMU_H
#define _ASMARM_PAGE_NOMMU_H
#if !defined(CONFIG_SMALL_TASKS) && PAGE_SHIFT < 13
#define KTHREAD_SIZE (8192)
#else
#define KTHREAD_SIZE PAGE_SIZE
#endif
#define clear_page(page) memset((page), 0, PAGE_SIZE)
#define copy_page(to,from) memcpy((to), (from), PAGE_SIZE)

View File

@ -92,12 +92,95 @@
2: mcount_exit
.endm
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
.macro __ftrace_regs_caller
sub sp, sp, #8 @ space for PC and CPSR OLD_R0,
@ OLD_R0 will overwrite previous LR
add ip, sp, #12 @ move in IP the value of SP as it was
@ before the push {lr} of the mcount mechanism
str lr, [sp, #0] @ store LR instead of PC
ldr lr, [sp, #8] @ get previous LR
str r0, [sp, #8] @ write r0 as OLD_R0 over previous LR
stmdb sp!, {ip, lr}
stmdb sp!, {r0-r11, lr}
@ stack content at this point:
@ 0 4 48 52 56 60 64 68 72
@ R0 | R1 | ... | LR | SP + 4 | previous LR | LR | PSR | OLD_R0 |
mov r3, sp @ struct pt_regs*
ldr r2, =function_trace_op
ldr r2, [r2] @ pointer to the current
@ function tracing op
ldr r1, [sp, #S_LR] @ lr of instrumented func
ldr lr, [sp, #S_PC] @ get LR
mcount_adjust_addr r0, lr @ instrumented function
.globl ftrace_regs_call
ftrace_regs_call:
bl ftrace_stub
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
.globl ftrace_graph_regs_call
ftrace_graph_regs_call:
mov r0, r0
#endif
@ pop saved regs
ldmia sp!, {r0-r12} @ restore r0 through r12
ldr ip, [sp, #8] @ restore PC
ldr lr, [sp, #4] @ restore LR
ldr sp, [sp, #0] @ restore SP
mov pc, ip @ return
.endm
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
.macro __ftrace_graph_regs_caller
sub r0, fp, #4 @ lr of instrumented routine (parent)
@ called from __ftrace_regs_caller
ldr r1, [sp, #S_PC] @ instrumented routine (func)
mcount_adjust_addr r1, r1
mov r2, fp @ frame pointer
bl prepare_ftrace_return
@ pop registers saved in ftrace_regs_caller
ldmia sp!, {r0-r12} @ restore r0 through r12
ldr ip, [sp, #8] @ restore PC
ldr lr, [sp, #4] @ restore LR
ldr sp, [sp, #0] @ restore SP
mov pc, ip @ return
.endm
#endif
#endif
.macro __ftrace_caller suffix
mcount_enter
mcount_get_lr r1 @ lr of instrumented func
mcount_adjust_addr r0, lr @ instrumented function
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
ldr r2, =function_trace_op
ldr r2, [r2] @ pointer to the current
@ function tracing op
mov r3, #0 @ regs is NULL
#endif
.globl ftrace_call\suffix
ftrace_call\suffix:
bl ftrace_stub
@ -212,6 +295,15 @@ UNWIND(.fnstart)
__ftrace_caller
UNWIND(.fnend)
ENDPROC(ftrace_caller)
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
ENTRY(ftrace_regs_caller)
UNWIND(.fnstart)
__ftrace_regs_caller
UNWIND(.fnend)
ENDPROC(ftrace_regs_caller)
#endif
#endif
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
@ -220,6 +312,14 @@ UNWIND(.fnstart)
__ftrace_graph_caller
UNWIND(.fnend)
ENDPROC(ftrace_graph_caller)
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
ENTRY(ftrace_graph_regs_caller)
UNWIND(.fnstart)
__ftrace_graph_regs_caller
UNWIND(.fnend)
ENDPROC(ftrace_graph_regs_caller)
#endif
#endif
.purgem mcount_enter

View File

@ -141,6 +141,15 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
ret = ftrace_modify_code(pc, 0, new, false);
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
if (!ret) {
pc = (unsigned long)&ftrace_regs_call;
new = ftrace_call_replace(pc, (unsigned long)func);
ret = ftrace_modify_code(pc, 0, new, false);
}
#endif
#ifdef CONFIG_OLD_MCOUNT
if (!ret) {
pc = (unsigned long)&ftrace_call_old;
@ -159,11 +168,29 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
unsigned long ip = rec->ip;
old = ftrace_nop_replace(rec);
new = ftrace_call_replace(ip, adjust_address(rec, addr));
return ftrace_modify_code(rec->ip, old, new, true);
}
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
unsigned long addr)
{
unsigned long new, old;
unsigned long ip = rec->ip;
old = ftrace_call_replace(ip, adjust_address(rec, old_addr));
new = ftrace_call_replace(ip, adjust_address(rec, addr));
return ftrace_modify_code(rec->ip, old, new, true);
}
#endif
int ftrace_make_nop(struct module *mod,
struct dyn_ftrace *rec, unsigned long addr)
{
@ -231,6 +258,8 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
extern unsigned long ftrace_graph_call;
extern unsigned long ftrace_graph_call_old;
extern void ftrace_graph_caller_old(void);
extern unsigned long ftrace_graph_regs_call;
extern void ftrace_graph_regs_caller(void);
static int __ftrace_modify_caller(unsigned long *callsite,
void (*func) (void), bool enable)
@ -253,6 +282,14 @@ static int ftrace_modify_graph_caller(bool enable)
ftrace_graph_caller,
enable);
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
if (!ret)
ret = __ftrace_modify_caller(&ftrace_graph_regs_call,
ftrace_graph_regs_caller,
enable);
#endif
#ifdef CONFIG_OLD_MCOUNT
if (!ret)
ret = __ftrace_modify_caller(&ftrace_graph_call_old,

View File

@ -123,10 +123,10 @@ void __show_regs(struct pt_regs *regs)
print_symbol("PC is at %s\n", instruction_pointer(regs));
print_symbol("LR is at %s\n", regs->ARM_lr);
printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n"
"sp : %08lx ip : %08lx fp : %08lx\n",
regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr,
regs->ARM_sp, regs->ARM_ip, regs->ARM_fp);
printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n",
regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr);
printk("sp : %08lx ip : %08lx fp : %08lx\n",
regs->ARM_sp, regs->ARM_ip, regs->ARM_fp);
printk("r10: %08lx r9 : %08lx r8 : %08lx\n",
regs->ARM_r10, regs->ARM_r9,
regs->ARM_r8);
@ -404,9 +404,17 @@ static unsigned long sigpage_addr(const struct mm_struct *mm,
static struct page *signal_page;
extern struct page *get_signal_page(void);
static int sigpage_mremap(const struct vm_special_mapping *sm,
struct vm_area_struct *new_vma)
{
current->mm->context.sigpage = new_vma->vm_start;
return 0;
}
static const struct vm_special_mapping sigpage_mapping = {
.name = "[sigpage]",
.pages = &signal_page,
.mremap = sigpage_mremap,
};
int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)

View File

@ -54,8 +54,26 @@ static const struct vm_special_mapping vdso_data_mapping = {
.pages = &vdso_data_page,
};
static int vdso_mremap(const struct vm_special_mapping *sm,
struct vm_area_struct *new_vma)
{
unsigned long new_size = new_vma->vm_end - new_vma->vm_start;
unsigned long vdso_size;
/* without VVAR page */
vdso_size = (vdso_total_pages - 1) << PAGE_SHIFT;
if (vdso_size != new_size)
return -EINVAL;
current->mm->context.vdso = new_vma->vm_start;
return 0;
}
static struct vm_special_mapping vdso_text_mapping __ro_after_init = {
.name = "[vdso]",
.mremap = vdso_mremap,
};
struct elfinfo {

View File

@ -679,7 +679,7 @@ config ARCH_DMA_ADDR_T_64BIT
bool
config ARM_THUMB
bool "Support Thumb user binaries" if !CPU_THUMBONLY
bool "Support Thumb user binaries" if !CPU_THUMBONLY && EXPERT
depends on CPU_THUMB_CAPABLE
default y
help
@ -690,6 +690,10 @@ config ARM_THUMB
instruction set resulting in smaller binaries at the expense of
slightly less efficient code.
If this option is disabled, and you run userspace that switches to
Thumb mode, signal handling will not work correctly, resulting in
segmentation faults or illegal instruction aborts.
If you don't know what this all is, saying Y is a safe choice.
config ARM_THUMBEE

View File

@ -78,9 +78,6 @@ static int vdso_mremap(const struct vm_special_mapping *sm,
if (image->size != new_size)
return -EINVAL;
if (WARN_ON_ONCE(current->mm != new_vma->vm_mm))
return -EFAULT;
vdso_fix_landing(image, new_vma);
current->mm->context.vdso = (void __user *)new_vma->vm_start;

View File

@ -3186,8 +3186,12 @@ static int special_mapping_mremap(struct vm_area_struct *new_vma)
{
struct vm_special_mapping *sm = new_vma->vm_private_data;
if (WARN_ON_ONCE(current->mm != new_vma->vm_mm))
return -EFAULT;
if (sm->mremap)
return sm->mremap(sm, new_vma);
return 0;
}