From 1d0bbf428924f94867542d49d436cf254b9dbd06 Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 6 Aug 2013 09:49:14 +0100 Subject: [PATCH 1/8] ARM: Fix the world famous typo with is_gate_vma() Signed-off-by: Russell King --- arch/arm/kernel/process.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index d03b5bd889c5..e28d43f74dbf 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -459,7 +459,7 @@ int in_gate_area_no_mm(unsigned long addr) { return in_gate_area(NULL, addr); } -#define is_gate_vma(vma) ((vma) = &gate_vma) +#define is_gate_vma(vma) ((vma) == &gate_vma) #else #define is_gate_vma(vma) 0 #endif From 1b16c4bcf80e319b2226a886b72b8466179c8e3a Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 6 Aug 2013 09:48:42 +0100 Subject: [PATCH 2/8] ARM: Fix !kuser helpers case Fix yet another build failure caused by a weird set of configuration settings: LD init/built-in.o arch/arm/kernel/built-in.o: In function `__dabt_usr': /home/tom3q/kernel/arch/arm/kernel/entry-armv.S:377: undefined reference to `kuser_cmpxchg64_fixup' arch/arm/kernel/built-in.o: In function `__irq_usr': /home/tom3q/kernel/arch/arm/kernel/entry-armv.S:387: undefined reference to `kuser_cmpxchg64_fixup' caused by: CONFIG_KUSER_HELPERS=n CONFIG_CPU_32v6K=n CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG=n Reported-by: Tomasz Figa Signed-off-by: Russell King --- arch/arm/kernel/entry-armv.S | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index d40d0ef389db..9cbe70c8b0ef 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -357,7 +357,8 @@ ENDPROC(__pabt_svc) .endm .macro kuser_cmpxchg_check -#if !defined(CONFIG_CPU_32v6K) && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG) +#if !defined(CONFIG_CPU_32v6K) && defined(CONFIG_KUSER_HELPERS) && \ + !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG) #ifndef CONFIG_MMU #warning "NPTL on non MMU needs fixing" #else From 2ba85e7af4c639d933c9a87a6d7363f2983d5ada Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 8 Aug 2013 11:51:21 +0100 Subject: [PATCH 3/8] ARM: Fix FIQ code on VIVT CPUs Aaro Koskinen reports the following oops: Installing fiq handler from c001b110, length 0x164 Unable to handle kernel paging request at virtual address ffff1224 pgd = c0004000 [ffff1224] *pgd=00000000, *pte=11fff0cb, *ppte=11fff00a ... [] (set_fiq_handler+0x0/0x6c) from [] (ams_delta_init_fiq+0xa8/0x160) r6:00000164 r5:c001b110 r4:00000000 r3:fefecb4c [] (ams_delta_init_fiq+0x0/0x160) from [] (ams_delta_init+0xd4/0x114) r6:00000000 r5:fffece10 r4:c037a9e0 [] (ams_delta_init+0x0/0x114) from [] (customize_machine+0x24/0x30) This is because the vectors page is now write-protected, and to change code in there we must write to its original alias. Make that change, and adjust the cache flushing such that the code will become visible to the instruction stream on VIVT CPUs. Reported-by: Aaro Koskinen Tested-by: Aaro Koskinen Signed-off-by: Russell King --- arch/arm/kernel/fiq.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c index 25442f451148..fc7920288a3d 100644 --- a/arch/arm/kernel/fiq.c +++ b/arch/arm/kernel/fiq.c @@ -84,17 +84,13 @@ int show_fiq_list(struct seq_file *p, int prec) void set_fiq_handler(void *start, unsigned int length) { -#if defined(CONFIG_CPU_USE_DOMAINS) - void *base = (void *)0xffff0000; -#else void *base = vectors_page; -#endif unsigned offset = FIQ_OFFSET; memcpy(base + offset, start, length); + if (!cache_is_vipt_nonaliasing()) + flush_icache_range(base + offset, offset + length); flush_icache_range(0xffff0000 + offset, 0xffff0000 + offset + length); - if (!vectors_high()) - flush_icache_range(offset, offset + length); } int claim_fiq(struct fiq_handler *f) From c95eb3184ea1a3a2551df57190c81da695e2144b Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Wed, 7 Aug 2013 23:39:41 +0100 Subject: [PATCH 4/8] ARM: 7809/1: perf: fix event validation for software group leaders It is possible to construct an event group with a software event as a group leader and then subsequently add a hardware event to the group. This results in the event group being validated by adding all members of the group to a fake PMU and attempting to allocate each event on their respective PMU. Unfortunately, for software events wthout a corresponding arm_pmu, this results in a kernel crash attempting to dereference the ->get_event_idx function pointer. This patch fixes the problem by checking explicitly for software events and ignoring those in event validation (since they can always be scheduled). We will probably want to revisit this for 3.12, since the validation checks don't appear to work correctly when dealing with multiple hardware PMUs anyway. Cc: Reported-by: Vince Weaver Tested-by: Vince Weaver Tested-by: Mark Rutland Signed-off-by: Will Deacon Signed-off-by: Russell King --- arch/arm/kernel/perf_event.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index d9f5cd4e533f..0500f10b5041 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c @@ -253,6 +253,9 @@ validate_event(struct pmu_hw_events *hw_events, struct arm_pmu *armpmu = to_arm_pmu(event->pmu); struct pmu *leader_pmu = event->group_leader->pmu; + if (is_software_event(event)) + return 1; + if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF) return 1; From d9f966357b14e356dbd83b8f4a197a287ab4ff83 Mon Sep 17 00:00:00 2001 From: Stephen Boyd Date: Thu, 8 Aug 2013 18:41:59 +0100 Subject: [PATCH 5/8] ARM: 7810/1: perf: Fix array out of bounds access in armpmu_map_hw_event() Vince Weaver reports an oops in the ARM perf event code while running his perf_fuzzer tool on a pandaboard running v3.11-rc4. Unable to handle kernel paging request at virtual address 73fd14cc pgd = eca6c000 [73fd14cc] *pgd=00000000 Internal error: Oops: 5 [#1] SMP ARM Modules linked in: snd_soc_omap_hdmi omapdss snd_soc_omap_abe_twl6040 snd_soc_twl6040 snd_soc_omap snd_soc_omap_hdmi_card snd_soc_omap_mcpdm snd_soc_omap_mcbsp snd_soc_core snd_compress regmap_spi snd_pcm snd_page_alloc snd_timer snd soundcore CPU: 1 PID: 2790 Comm: perf_fuzzer Not tainted 3.11.0-rc4 #6 task: eddcab80 ti: ed892000 task.ti: ed892000 PC is at armpmu_map_event+0x20/0x88 LR is at armpmu_event_init+0x38/0x280 pc : [] lr : [] psr: 60000013 sp : ed893e40 ip : ecececec fp : edfaec00 r10: 00000000 r9 : 00000000 r8 : ed8c3ac0 r7 : ed8c3b5c r6 : edfaec00 r5 : 00000000 r4 : 00000000 r3 : 000000ff r2 : c0496144 r1 : c049611c r0 : edfaec00 Flags: nZCv IRQs on FIQs on Mode SVC_32 ISA ARM Segment user Control: 10c5387d Table: aca6c04a DAC: 00000015 Process perf_fuzzer (pid: 2790, stack limit = 0xed892240) Stack: (0xed893e40 to 0xed894000) 3e40: 00000800 c001c17c 00000002 c008a748 00000001 00000000 00000000 c00bf078 3e60: 00000000 edfaee50 00000000 00000000 00000000 edfaec00 ed8c3ac0 edfaec00 3e80: 00000000 c073ffac ed893f20 c00bf180 00000001 00000000 c00bf078 ed893f20 3ea0: 00000000 ed8c3ac0 00000000 00000000 00000000 c0cb0818 eddcab80 c00bf440 3ec0: ed893f20 00000000 eddcab80 eca76800 00000000 eca76800 00000000 00000000 3ee0: 00000000 ec984c80 eddcab80 c00bfe68 00000000 00000000 00000000 00000080 3f00: 00000000 ed892000 00000000 ed892030 00000004 ecc7e3c8 ecc7e3c8 00000000 3f20: 00000000 00000048 ecececec 00000000 00000000 00000000 00000000 00000000 3f40: 00000000 00000000 00297810 00000000 00000000 00000000 00000000 00000000 3f60: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 3f80: 00000002 00000002 000103a4 00000002 0000016c c00128e8 ed892000 00000000 3fa0: 00090998 c0012700 00000002 000103a4 00090ab8 00000000 00000000 0000000f 3fc0: 00000002 000103a4 00000002 0000016c 00090ab0 00090ab8 000107a0 00090998 3fe0: bed92be0 bed92bd0 0000b785 b6e8f6d0 40000010 00090ab8 00000000 00000000 [] (armpmu_map_event+0x20/0x88) from [] (armpmu_event_init+0x38/0x280) [] (armpmu_event_init+0x38/0x280) from [] (perf_init_event+0x108/0x180) [] (perf_init_event+0x108/0x180) from [] (perf_event_alloc+0x248/0x40c) [] (perf_event_alloc+0x248/0x40c) from [] (SyS_perf_event_open+0x4f4/0x8fc) [] (SyS_perf_event_open+0x4f4/0x8fc) from [] (ret_fast_syscall+0x0/0x48) Code: 0a000005 e3540004 0a000016 e3540000 (0791010c) This is because event->attr.config in armpmu_event_init() contains a very large number copied directly from userspace and is never checked against the size of the array indexed in armpmu_map_hw_event(). Fix the problem by checking the value of config before indexing the array and rejecting invalid config values. Reported-by: Vince Weaver Tested-by: Vince Weaver Acked-by: Will Deacon Signed-off-by: Stephen Boyd Signed-off-by: Russell King --- arch/arm/kernel/perf_event.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index 0500f10b5041..e186ee1e63f6 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c @@ -53,7 +53,12 @@ armpmu_map_cache_event(const unsigned (*cache_map) static int armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config) { - int mapping = (*event_map)[config]; + int mapping; + + if (config >= PERF_COUNT_HW_MAX) + return -EINVAL; + + mapping = (*event_map)[config]; return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping; } From afa31d8eb86fc2f25083e675d57ac8173a98f999 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Mon, 12 Aug 2013 18:03:26 +0100 Subject: [PATCH 6/8] ARM: 7811/1: locks: use early clobber in arch_spin_trylock The res variable is written before we've finished with the input operands (namely the lock address), so ensure that we mark it as `early clobber' to avoid unintended register sharing. Signed-off-by: Will Deacon Signed-off-by: Russell King --- arch/arm/include/asm/spinlock.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h index f8b8965666e9..7ed43f68e044 100644 --- a/arch/arm/include/asm/spinlock.h +++ b/arch/arm/include/asm/spinlock.h @@ -107,7 +107,7 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock) " subs %1, %0, %0, ror #16\n" " addeq %0, %0, %4\n" " strexeq %2, %0, [%3]" - : "=&r" (slock), "=&r" (contended), "=r" (res) + : "=&r" (slock), "=&r" (contended), "=&r" (res) : "r" (&lock->slock), "I" (1 << TICKET_SHIFT) : "cc"); } while (res); From 00efaa0250939dc148e2d3104fb3c18395d24a2d Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Mon, 12 Aug 2013 18:04:05 +0100 Subject: [PATCH 7/8] ARM: 7812/1: rwlocks: retry trylock operation if strex fails on free lock Commit 15e7e5c1ebf5 ("ARM: 7749/1: spinlock: retry trylock operation if strex fails on free lock") modifying our arch_spin_trylock to retry the acquisition if the lock appeared uncontended, but the strex failed. This patch does the same for rwlocks, which were missed by the original patch. Signed-off-by: Will Deacon Signed-off-by: Russell King --- arch/arm/include/asm/spinlock.h | 49 ++++++++++++++++++++------------- 1 file changed, 30 insertions(+), 19 deletions(-) diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h index 7ed43f68e044..b07c09e5a0ac 100644 --- a/arch/arm/include/asm/spinlock.h +++ b/arch/arm/include/asm/spinlock.h @@ -168,17 +168,20 @@ static inline void arch_write_lock(arch_rwlock_t *rw) static inline int arch_write_trylock(arch_rwlock_t *rw) { - unsigned long tmp; + unsigned long contended, res; - __asm__ __volatile__( -" ldrex %0, [%1]\n" -" teq %0, #0\n" -" strexeq %0, %2, [%1]" - : "=&r" (tmp) - : "r" (&rw->lock), "r" (0x80000000) - : "cc"); + do { + __asm__ __volatile__( + " ldrex %0, [%2]\n" + " mov %1, #0\n" + " teq %0, #0\n" + " strexeq %1, %3, [%2]" + : "=&r" (contended), "=&r" (res) + : "r" (&rw->lock), "r" (0x80000000) + : "cc"); + } while (res); - if (tmp == 0) { + if (!contended) { smp_mb(); return 1; } else { @@ -254,18 +257,26 @@ static inline void arch_read_unlock(arch_rwlock_t *rw) static inline int arch_read_trylock(arch_rwlock_t *rw) { - unsigned long tmp, tmp2 = 1; + unsigned long contended, res; - __asm__ __volatile__( -" ldrex %0, [%2]\n" -" adds %0, %0, #1\n" -" strexpl %1, %0, [%2]\n" - : "=&r" (tmp), "+r" (tmp2) - : "r" (&rw->lock) - : "cc"); + do { + __asm__ __volatile__( + " ldrex %0, [%2]\n" + " mov %1, #0\n" + " adds %0, %0, #1\n" + " strexpl %1, %0, [%2]" + : "=&r" (contended), "=&r" (res) + : "r" (&rw->lock) + : "cc"); + } while (res); - smp_mb(); - return tmp2 == 0; + /* If the lock is negative, then it is already held for write. */ + if (contended < 0x80000000) { + smp_mb(); + return 1; + } else { + return 0; + } } /* read_can_lock - would read_trylock() succeed? */ From 2103f6cba61a8b8bea3fc1b63661d830a2125e76 Mon Sep 17 00:00:00 2001 From: Stephen Warren Date: Fri, 2 Aug 2013 20:52:49 +0100 Subject: [PATCH 8/8] ARM: 7807/1: kexec: validate CPU hotplug support Architectures should fully validate whether kexec is possible as part of machine_kexec_prepare(), so that user-space's kexec_load() operation can report any problems. Performing validation in machine_kexec() itself is too late, since it is not allowed to return. Prior to this patch, ARM's machine_kexec() was testing after-the-fact whether machine_kexec_prepare() was able to disable all but one CPU. Instead, modify machine_kexec_prepare() to validate all conditions necessary for machine_kexec_prepare()'s to succeed. BUG if the validation succeeded, yet disabling the CPUs didn't actually work. Signed-off-by: Stephen Warren Acked-by: "Eric W. Biederman" Signed-off-by: Russell King --- arch/arm/include/asm/smp_plat.h | 3 +++ arch/arm/kernel/machine_kexec.c | 20 ++++++++++++++++---- arch/arm/kernel/smp.c | 10 ++++++++++ 3 files changed, 29 insertions(+), 4 deletions(-) diff --git a/arch/arm/include/asm/smp_plat.h b/arch/arm/include/asm/smp_plat.h index 6462a721ebd4..a252c0bfacf5 100644 --- a/arch/arm/include/asm/smp_plat.h +++ b/arch/arm/include/asm/smp_plat.h @@ -88,4 +88,7 @@ static inline u32 mpidr_hash_size(void) { return 1 << mpidr_hash.bits; } + +extern int platform_can_cpu_hotplug(void); + #endif diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c index 4fb074c446bf..d7c82df69243 100644 --- a/arch/arm/kernel/machine_kexec.c +++ b/arch/arm/kernel/machine_kexec.c @@ -15,6 +15,7 @@ #include #include #include +#include #include extern const unsigned char relocate_new_kernel[]; @@ -38,6 +39,14 @@ int machine_kexec_prepare(struct kimage *image) __be32 header; int i, err; + /* + * Validate that if the current HW supports SMP, then the SW supports + * and implements CPU hotplug for the current HW. If not, we won't be + * able to kexec reliably, so fail the prepare operation. + */ + if (num_possible_cpus() > 1 && !platform_can_cpu_hotplug()) + return -EINVAL; + /* * No segment at default ATAGs address. try to locate * a dtb using magic. @@ -134,10 +143,13 @@ void machine_kexec(struct kimage *image) unsigned long reboot_code_buffer_phys; void *reboot_code_buffer; - if (num_online_cpus() > 1) { - pr_err("kexec: error: multiple CPUs still online\n"); - return; - } + /* + * This can only happen if machine_shutdown() failed to disable some + * CPU, and that can only happen if the checks in + * machine_kexec_prepare() were not correct. If this fails, we can't + * reliably kexec anyway, so BUG_ON is appropriate. + */ + BUG_ON(num_online_cpus() > 1); page_list = image->head & PAGE_MASK; diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index c2b4f8f0be9a..2dc19349eb19 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -145,6 +145,16 @@ int boot_secondary(unsigned int cpu, struct task_struct *idle) return -ENOSYS; } +int platform_can_cpu_hotplug(void) +{ +#ifdef CONFIG_HOTPLUG_CPU + if (smp_ops.cpu_kill) + return 1; +#endif + + return 0; +} + #ifdef CONFIG_HOTPLUG_CPU static void percpu_timer_stop(void);