diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index 1ad56a7b2f74..56f3236da829 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h @@ -173,7 +173,6 @@ struct hrtimer_clock_base { * struct hrtimer_cpu_base - the per cpu clock bases * @lock: lock protecting the base and associated clock bases * and timers - * @lock_key: the lock_class_key for use with lockdep * @clock_base: array of clock bases for this cpu * @curr_timer: the timer which is executing a callback right now * @expires_next: absolute time of the next event which was scheduled @@ -189,7 +188,6 @@ struct hrtimer_clock_base { */ struct hrtimer_cpu_base { spinlock_t lock; - struct lock_class_key lock_key; struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES]; struct list_head cb_pending; #ifdef CONFIG_HIGH_RES_TIMERS diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index 911e87d0440d..c642ef75069f 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c @@ -1424,7 +1424,6 @@ static void __cpuinit init_hrtimers_cpu(int cpu) int i; spin_lock_init(&cpu_base->lock); - lockdep_set_class(&cpu_base->lock, &cpu_base->lock_key); for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) cpu_base->clock_base[i].cpu_base = cpu_base; @@ -1465,16 +1464,16 @@ static void migrate_hrtimers(int cpu) tick_cancel_sched_timer(cpu); local_irq_disable(); - double_spin_lock(&new_base->lock, &old_base->lock, - smp_processor_id() < cpu); + spin_lock(&new_base->lock); + spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING); for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { migrate_hrtimer_list(&old_base->clock_base[i], &new_base->clock_base[i]); } - double_spin_unlock(&new_base->lock, &old_base->lock, - smp_processor_id() < cpu); + spin_unlock(&old_base->lock); + spin_unlock(&new_base->lock); local_irq_enable(); put_cpu_var(hrtimer_bases); }