From 524dabe1c68e0bca25ce7b108099e5d89472a101 Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Mon, 16 Jan 2017 12:46:33 +0100 Subject: [PATCH 1/8] arm64: Fix swiotlb fallback allocation Commit b67a8b29df introduced logic to skip swiotlb allocation when all memory is DMA accessible anyway. While this is a great idea, __dma_alloc still calls swiotlb code unconditionally to allocate memory when there is no CMA memory available. The swiotlb code is called to ensure that we at least try get_free_pages(). Without initialization, swiotlb allocation code tries to access io_tlb_list which is NULL. That results in a stack trace like this: Unable to handle kernel NULL pointer dereference at virtual address 00000000 [...] [] swiotlb_tbl_map_single+0xd0/0x2b0 [] swiotlb_alloc_coherent+0x10c/0x198 [] __dma_alloc+0x68/0x1a8 [] drm_gem_cma_create+0x98/0x108 [drm] [] drm_fbdev_cma_create_with_funcs+0xbc/0x368 [drm_kms_helper] [] drm_fbdev_cma_create+0x2c/0x40 [drm_kms_helper] [] drm_fb_helper_initial_config+0x238/0x410 [drm_kms_helper] [] drm_fbdev_cma_init_with_funcs+0x98/0x160 [drm_kms_helper] [] drm_fbdev_cma_init+0x40/0x58 [drm_kms_helper] [] vc4_kms_load+0x90/0xf0 [vc4] [] vc4_drm_bind+0xec/0x168 [vc4] [...] Thankfully swiotlb code just learned how to not do allocations with the FORCE_NO option. This patch configures the swiotlb code to use that if we decide not to initialize the swiotlb framework. Fixes: b67a8b29df ("arm64: mm: only initialize swiotlb when necessary") Signed-off-by: Alexander Graf CC: Jisheng Zhang CC: Geert Uytterhoeven CC: Konrad Rzeszutek Wilk Signed-off-by: Catalin Marinas --- arch/arm64/mm/init.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 716d1226ba69..380ebe705093 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -404,6 +404,8 @@ void __init mem_init(void) if (swiotlb_force == SWIOTLB_FORCE || max_pfn > (arm64_dma_phys_limit >> PAGE_SHIFT)) swiotlb_init(1); + else + swiotlb_force = SWIOTLB_NO_FORCE; set_max_mapnr(pfn_to_page(max_pfn) - mem_map); From 1c8a946bf3754a59cba1fc373949a8114bfe5aaa Mon Sep 17 00:00:00 2001 From: Oleksandr Andrushchenko Date: Wed, 18 Jan 2017 09:09:25 +0200 Subject: [PATCH 2/8] arm64: mm: avoid name clash in __page_to_voff() The arm64 __page_to_voff() macro takes a parameter called 'page', and also refers to 'struct page'. Thus, if the value passed in is not called 'page', we'll refer to the wrong struct name (which might not exist). Fixes: 3fa72fe9c614 ("arm64: mm: fix __page_to_voff definition") Acked-by: Mark Rutland Suggested-by: Volodymyr Babchuk Signed-off-by: Oleksandr Andrushchenko Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/memory.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index bfe632808d77..90c39a662379 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -222,7 +222,7 @@ static inline void *phys_to_virt(phys_addr_t x) #define _virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) #else #define __virt_to_pgoff(kaddr) (((u64)(kaddr) & ~PAGE_OFFSET) / PAGE_SIZE * sizeof(struct page)) -#define __page_to_voff(page) (((u64)(page) & ~VMEMMAP_START) * PAGE_SIZE / sizeof(struct page)) +#define __page_to_voff(kaddr) (((u64)(kaddr) & ~VMEMMAP_START) * PAGE_SIZE / sizeof(struct page)) #define page_to_virt(page) ((void *)((__page_to_voff(page)) | PAGE_OFFSET)) #define virt_to_page(vaddr) ((struct page *)((__virt_to_pgoff(vaddr)) | VMEMMAP_START)) From 9a17b876b573441bfb3387ad55d98bf7184daf9d Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Wed, 18 Jan 2017 16:25:20 +0000 Subject: [PATCH 3/8] arm64/ptrace: Preserve previous registers for short regset write Ensure that if userspace supplies insufficient data to PTRACE_SETREGSET to fill all the registers, the thread's old registers are preserved. Cc: # 3.7.x- Fixes: 478fcb2cdb23 ("arm64: Debugging support") Signed-off-by: Dave Martin Acked-by: Will Deacon Signed-off-by: Catalin Marinas --- arch/arm64/kernel/ptrace.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index fc35e06ccaac..8c0bc3434f55 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c @@ -596,7 +596,7 @@ static int gpr_set(struct task_struct *target, const struct user_regset *regset, const void *kbuf, const void __user *ubuf) { int ret; - struct user_pt_regs newregs; + struct user_pt_regs newregs = task_pt_regs(target)->user_regs; ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newregs, 0, -1); if (ret) @@ -626,7 +626,8 @@ static int fpr_set(struct task_struct *target, const struct user_regset *regset, const void *kbuf, const void __user *ubuf) { int ret; - struct user_fpsimd_state newstate; + struct user_fpsimd_state newstate = + target->thread.fpsimd_state.user_fpsimd; ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newstate, 0, -1); if (ret) @@ -650,7 +651,7 @@ static int tls_set(struct task_struct *target, const struct user_regset *regset, const void *kbuf, const void __user *ubuf) { int ret; - unsigned long tls; + unsigned long tls = target->thread.tp_value; ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1); if (ret) From 9dd73f72f218320c6c90da5f834996e7360dc227 Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Wed, 18 Jan 2017 16:25:21 +0000 Subject: [PATCH 4/8] arm64/ptrace: Preserve previous registers for short regset write Ensure that if userspace supplies insufficient data to PTRACE_SETREGSET to fill all the registers, the thread's old registers are preserved. Cc: # 3.19.x- Fixes: 766a85d7bc5d ("arm64: ptrace: add NT_ARM_SYSTEM_CALL regset") Signed-off-by: Dave Martin Acked-by: Will Deacon Signed-off-by: Catalin Marinas --- arch/arm64/kernel/ptrace.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index 8c0bc3434f55..ead36d5fad6d 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c @@ -677,7 +677,8 @@ static int system_call_set(struct task_struct *target, unsigned int pos, unsigned int count, const void *kbuf, const void __user *ubuf) { - int syscallno, ret; + int syscallno = task_pt_regs(target)->syscallno; + int ret; ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &syscallno, 0, -1); if (ret) From a672401c00f82e4e19704aff361d9bad18003714 Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Wed, 18 Jan 2017 16:25:22 +0000 Subject: [PATCH 5/8] arm64/ptrace: Preserve previous registers for short regset write Ensure that if userspace supplies insufficient data to PTRACE_SETREGSET to fill all the registers, the thread's old registers are preserved. Cc: # 4.3.x- Fixes: 5d220ff9420f ("arm64: Better native ptrace support for compat tasks") Signed-off-by: Dave Martin Acked-by: Will Deacon Signed-off-by: Catalin Marinas --- arch/arm64/kernel/ptrace.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index ead36d5fad6d..91c4719dfc00 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c @@ -950,7 +950,7 @@ static int compat_tls_set(struct task_struct *target, const void __user *ubuf) { int ret; - compat_ulong_t tls; + compat_ulong_t tls = target->thread.tp_value; ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1); if (ret) From aeb1f39d814b2e21e5e5706a48834bfd553d0059 Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Wed, 18 Jan 2017 16:25:23 +0000 Subject: [PATCH 6/8] arm64/ptrace: Avoid uninitialised struct padding in fpr_set() This patch adds an explicit __reserved[] field to user_fpsimd_state to replace what was previously unnamed padding. This ensures that data in this region are propagated across assignment rather than being left possibly uninitialised at the destination. Cc: # 3.7.x- Fixes: 60ffc30d5652 ("arm64: Exception handling") Signed-off-by: Dave Martin Acked-by: Will Deacon Signed-off-by: Catalin Marinas --- arch/arm64/include/uapi/asm/ptrace.h | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/arm64/include/uapi/asm/ptrace.h b/arch/arm64/include/uapi/asm/ptrace.h index b5c3933ed441..d1ff83dfe5de 100644 --- a/arch/arm64/include/uapi/asm/ptrace.h +++ b/arch/arm64/include/uapi/asm/ptrace.h @@ -77,6 +77,7 @@ struct user_fpsimd_state { __uint128_t vregs[32]; __u32 fpsr; __u32 fpcr; + __u32 __reserved[2]; }; struct user_hwdebug_state { From ad9e202aa1ce571b1d7fed969d06f66067f8a086 Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Wed, 18 Jan 2017 16:25:24 +0000 Subject: [PATCH 7/8] arm64/ptrace: Reject attempts to set incomplete hardware breakpoint fields We cannot preserve partial fields for hardware breakpoints, because the values written by userspace to the hardware breakpoint registers can't subsequently be recovered intact from the hardware. So, just reject attempts to write incomplete fields with -EINVAL. Cc: # 3.7.x- Fixes: 478fcb2cdb23 ("arm64: Debugging support") Signed-off-by: Dave Martin Acked-by: Will Deacon Signed-off-by: Catalin Marinas --- arch/arm64/kernel/ptrace.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index 91c4719dfc00..a22161ccf447 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c @@ -551,6 +551,8 @@ static int hw_break_set(struct task_struct *target, /* (address, ctrl) registers */ limit = regset->n * regset->size; while (count && offset < limit) { + if (count < PTRACE_HBP_ADDR_SZ) + return -EINVAL; ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &addr, offset, offset + PTRACE_HBP_ADDR_SZ); if (ret) @@ -560,6 +562,8 @@ static int hw_break_set(struct task_struct *target, return ret; offset += PTRACE_HBP_ADDR_SZ; + if (!count) + break; ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl, offset, offset + PTRACE_HBP_CTRL_SZ); if (ret) From 7d9e8f71b989230bc613d121ca38507d34ada849 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Wed, 18 Jan 2017 17:23:41 +0000 Subject: [PATCH 8/8] arm64: avoid returning from bad_mode Generally, taking an unexpected exception should be a fatal event, and bad_mode is intended to cater for this. However, it should be possible to contain unexpected synchronous exceptions from EL0 without bringing the kernel down, by sending a SIGILL to the task. We tried to apply this approach in commit 9955ac47f4ba1c95 ("arm64: don't kill the kernel on a bad esr from el0"), by sending a signal for any bad_mode call resulting from an EL0 exception. However, this also applies to other unexpected exceptions, such as SError and FIQ. The entry paths for these exceptions branch to bad_mode without configuring the link register, and have no kernel_exit. Thus, if we take one of these exceptions from EL0, bad_mode will eventually return to the original user link register value. This patch fixes this by introducing a new bad_el0_sync handler to cater for the recoverable case, and restoring bad_mode to its original state, whereby it calls panic() and never returns. The recoverable case branches to bad_el0_sync with a bl, and returns to userspace via the usual ret_to_user mechanism. Signed-off-by: Mark Rutland Fixes: 9955ac47f4ba1c95 ("arm64: don't kill the kernel on a bad esr from el0") Reported-by: Mark Salter Cc: Will Deacon Cc: stable@vger.kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/kernel/entry.S | 2 +- arch/arm64/kernel/traps.c | 28 ++++++++++++++++++++++++---- 2 files changed, 25 insertions(+), 5 deletions(-) diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 923841ffe4a9..43512d4d7df2 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -683,7 +683,7 @@ el0_inv: mov x0, sp mov x1, #BAD_SYNC mov x2, x25 - bl bad_mode + bl bad_el0_sync b ret_to_user ENDPROC(el0_sync) diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index 5b830be79c01..659b2e6b6cf7 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -604,17 +604,34 @@ const char *esr_get_class_string(u32 esr) } /* - * bad_mode handles the impossible case in the exception vector. + * bad_mode handles the impossible case in the exception vector. This is always + * fatal. */ asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr) { - siginfo_t info; - void __user *pc = (void __user *)instruction_pointer(regs); console_verbose(); pr_crit("Bad mode in %s handler detected on CPU%d, code 0x%08x -- %s\n", handler[reason], smp_processor_id(), esr, esr_get_class_string(esr)); + + die("Oops - bad mode", regs, 0); + local_irq_disable(); + panic("bad mode"); +} + +/* + * bad_el0_sync handles unexpected, but potentially recoverable synchronous + * exceptions taken from EL0. Unlike bad_mode, this returns. + */ +asmlinkage void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr) +{ + siginfo_t info; + void __user *pc = (void __user *)instruction_pointer(regs); + console_verbose(); + + pr_crit("Bad EL0 synchronous exception detected on CPU%d, code 0x%08x -- %s\n", + smp_processor_id(), esr, esr_get_class_string(esr)); __show_regs(regs); info.si_signo = SIGILL; @@ -622,7 +639,10 @@ asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr) info.si_code = ILL_ILLOPC; info.si_addr = pc; - arm64_notify_die("Oops - bad mode", regs, &info, 0); + current->thread.fault_address = 0; + current->thread.fault_code = 0; + + force_sig_info(info.si_signo, &info, current); } void __pte_error(const char *file, int line, unsigned long val)