diff --git a/arch/alpha/kernel/perf_event.c b/arch/alpha/kernel/perf_event.c index c52e7f0ee5f6..5c218aa3f3df 100644 --- a/arch/alpha/kernel/perf_event.c +++ b/arch/alpha/kernel/perf_event.c @@ -431,7 +431,7 @@ static void maybe_change_configuration(struct cpu_hw_events *cpuc) */ static int alpha_pmu_add(struct perf_event *event, int flags) { - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); struct hw_perf_event *hwc = &event->hw; int n0; int ret; @@ -483,7 +483,7 @@ static int alpha_pmu_add(struct perf_event *event, int flags) */ static void alpha_pmu_del(struct perf_event *event, int flags) { - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); struct hw_perf_event *hwc = &event->hw; unsigned long irq_flags; int j; @@ -531,7 +531,7 @@ static void alpha_pmu_read(struct perf_event *event) static void alpha_pmu_stop(struct perf_event *event, int flags) { struct hw_perf_event *hwc = &event->hw; - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); if (!(hwc->state & PERF_HES_STOPPED)) { cpuc->idx_mask &= ~(1UL<idx); @@ -551,7 +551,7 @@ static void alpha_pmu_stop(struct perf_event *event, int flags) static void alpha_pmu_start(struct perf_event *event, int flags) { struct hw_perf_event *hwc = &event->hw; - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED))) return; @@ -724,7 +724,7 @@ static int alpha_pmu_event_init(struct perf_event *event) */ static void alpha_pmu_enable(struct pmu *pmu) { - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); if (cpuc->enabled) return; @@ -750,7 +750,7 @@ static void alpha_pmu_enable(struct pmu *pmu) static void alpha_pmu_disable(struct pmu *pmu) { - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); if (!cpuc->enabled) return; @@ -814,8 +814,8 @@ static void alpha_perf_event_irq_handler(unsigned long la_ptr, struct hw_perf_event *hwc; int idx, j; - __get_cpu_var(irq_pmi_count)++; - cpuc = &__get_cpu_var(cpu_hw_events); + __this_cpu_inc(irq_pmi_count); + cpuc = this_cpu_ptr(&cpu_hw_events); /* Completely counting through the PMC's period to trigger a new PMC * overflow interrupt while in this interrupt routine is utterly diff --git a/arch/alpha/kernel/time.c b/arch/alpha/kernel/time.c index ee39cee8064c..643a9dcdf093 100644 --- a/arch/alpha/kernel/time.c +++ b/arch/alpha/kernel/time.c @@ -56,9 +56,9 @@ unsigned long est_cycle_freq; DEFINE_PER_CPU(u8, irq_work_pending); -#define set_irq_work_pending_flag() __get_cpu_var(irq_work_pending) = 1 -#define test_irq_work_pending() __get_cpu_var(irq_work_pending) -#define clear_irq_work_pending() __get_cpu_var(irq_work_pending) = 0 +#define set_irq_work_pending_flag() __this_cpu_write(irq_work_pending, 1) +#define test_irq_work_pending() __this_cpu_read(irq_work_pending) +#define clear_irq_work_pending() __this_cpu_write(irq_work_pending, 0) void arch_irq_work_raise(void) { diff --git a/arch/powerpc/include/asm/cputime.h b/arch/powerpc/include/asm/cputime.h index 607559ab271f..e5d10ab8463b 100644 --- a/arch/powerpc/include/asm/cputime.h +++ b/arch/powerpc/include/asm/cputime.h @@ -56,10 +56,10 @@ static inline unsigned long cputime_to_jiffies(const cputime_t ct) static inline cputime_t cputime_to_scaled(const cputime_t ct) { if (cpu_has_feature(CPU_FTR_SPURR) && - __get_cpu_var(cputime_last_delta)) + __this_cpu_read(cputime_last_delta)) return (__force u64) ct * - __get_cpu_var(cputime_scaled_last_delta) / - __get_cpu_var(cputime_last_delta); + __this_cpu_read(cputime_scaled_last_delta) / + __this_cpu_read(cputime_last_delta); return ct; }