diff --git a/arch/powerpc/kernel/lparcfg.c b/arch/powerpc/kernel/lparcfg.c index 6444eaa30a2f..ff781b2eddec 100644 --- a/arch/powerpc/kernel/lparcfg.c +++ b/arch/powerpc/kernel/lparcfg.c @@ -77,7 +77,7 @@ static int iseries_lparcfg_data(struct seq_file *m, void *v) int processors, max_processors; unsigned long purr = get_purr(); - shared = (int)(get_lppaca()->shared_proc); + shared = (int)(local_paca->lppaca_ptr->shared_proc); seq_printf(m, "system_active_processors=%d\n", (int)HvLpConfig_getSystemPhysicalProcessors()); diff --git a/include/asm-powerpc/io.h b/include/asm-powerpc/io.h index 6805efb2cb6d..affba7052fb6 100644 --- a/include/asm-powerpc/io.h +++ b/include/asm-powerpc/io.h @@ -86,7 +86,7 @@ extern unsigned long pci_dram_offset; */ #ifdef CONFIG_PPC64 -#define IO_SET_SYNC_FLAG() do { get_paca()->io_sync = 1; } while(0) +#define IO_SET_SYNC_FLAG() do { local_paca->io_sync = 1; } while(0) #else #define IO_SET_SYNC_FLAG() #endif diff --git a/include/asm-powerpc/paca.h b/include/asm-powerpc/paca.h index c6a5b1735666..fcd7b428ed0b 100644 --- a/include/asm-powerpc/paca.h +++ b/include/asm-powerpc/paca.h @@ -21,7 +21,18 @@ #include register struct paca_struct *local_paca asm("r13"); + +#if defined(CONFIG_DEBUG_PREEMPT) && defined(CONFIG_SMP) +extern unsigned int debug_smp_processor_id(void); /* from linux/smp.h */ +/* + * Add standard checks that preemption cannot occur when using get_paca(): + * otherwise the paca_struct it points to may be the wrong one just after. + */ +#define get_paca() ((void) debug_smp_processor_id(), local_paca) +#else #define get_paca() local_paca +#endif + #define get_lppaca() (get_paca()->lppaca_ptr) #define get_slb_shadow() (get_paca()->slb_shadow_ptr) diff --git a/include/asm-powerpc/percpu.h b/include/asm-powerpc/percpu.h index 73dc8ba4010d..6b229626d3ff 100644 --- a/include/asm-powerpc/percpu.h +++ b/include/asm-powerpc/percpu.h @@ -28,7 +28,7 @@ /* var is in discarded region: offset to particular copy we want */ #define per_cpu(var, cpu) (*RELOC_HIDE(&per_cpu__##var, __per_cpu_offset(cpu))) #define __get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __my_cpu_offset())) -#define __raw_get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __my_cpu_offset())) +#define __raw_get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, local_paca->data_offset)) /* A macro to avoid #include hell... */ #define percpu_modcopy(pcpudst, src, size) \ diff --git a/include/asm-powerpc/smp.h b/include/asm-powerpc/smp.h index d037f50580e2..19102bfc14ca 100644 --- a/include/asm-powerpc/smp.h +++ b/include/asm-powerpc/smp.h @@ -45,7 +45,7 @@ void generic_mach_cpu_die(void); #endif #ifdef CONFIG_PPC64 -#define raw_smp_processor_id() (get_paca()->paca_index) +#define raw_smp_processor_id() (local_paca->paca_index) #define hard_smp_processor_id() (get_paca()->hw_cpu_id) #else /* 32-bit */ diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 50a94eee4d92..51e2fd0d851c 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -167,7 +167,7 @@ config SLUB_DEBUG_ON config DEBUG_PREEMPT bool "Debug preemptible kernel" - depends on DEBUG_KERNEL && PREEMPT && TRACE_IRQFLAGS_SUPPORT + depends on DEBUG_KERNEL && PREEMPT && (TRACE_IRQFLAGS_SUPPORT || PPC64) default y help If you say Y here then the kernel will use a debug variant of the