diff --git a/include/linux/rwlock_rt.h b/include/linux/rwlock_rt.h index a276fae2740a..e85a5dfb1468 100644 --- a/include/linux/rwlock_rt.h +++ b/include/linux/rwlock_rt.h @@ -33,7 +33,6 @@ extern void __rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key #define read_lock_irqsave(lock, flags) \ do { \ typecheck(unsigned long, flags); \ - migrate_disable(); \ flags = rt_read_lock_irqsave(lock); \ } while (0) @@ -45,14 +44,12 @@ extern void __rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key #define read_lock(lock) \ do { \ - migrate_disable(); \ rt_read_lock(lock); \ } while (0) #define read_lock_bh(lock) \ do { \ local_bh_disable(); \ - migrate_disable(); \ rt_read_lock(lock); \ } while (0) @@ -74,13 +71,11 @@ extern void __rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key #define read_unlock(lock) \ do { \ rt_read_unlock(lock); \ - migrate_enable(); \ } while (0) #define read_unlock_bh(lock) \ do { \ rt_read_unlock(lock); \ - migrate_enable(); \ local_bh_enable(); \ } while (0) @@ -104,7 +99,6 @@ extern void __rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key typecheck(unsigned long, flags); \ (void) flags; \ rt_read_unlock(lock); \ - migrate_enable(); \ } while (0) #define write_unlock_irqrestore(lock, flags) \ diff --git a/kernel/locking/rt.c b/kernel/locking/rt.c index cbeeda70cd00..bc7397fbe83d 100644 --- a/kernel/locking/rt.c +++ b/kernel/locking/rt.c @@ -211,17 +211,19 @@ int __lockfunc rt_read_trylock(rwlock_t *rwlock) * but not when read_depth == 0 which means that the lock is * write locked. */ - migrate_disable(); - if (rt_mutex_owner(lock) != current) + if (rt_mutex_owner(lock) != current) { ret = rt_mutex_trylock(lock); - else if (!rwlock->read_depth) + if (ret) + migrate_disable(); + + } else if (!rwlock->read_depth) { ret = 0; + } if (ret) { rwlock->read_depth++; rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_); - } else - migrate_enable(); + } return ret; } @@ -244,8 +246,10 @@ void __lockfunc rt_read_lock(rwlock_t *rwlock) /* * recursive read locks succeed when current owns the lock */ - if (rt_mutex_owner(lock) != current) + if (rt_mutex_owner(lock) != current) { __rt_spin_lock(lock); + migrate_disable(); + } rwlock->read_depth++; } @@ -265,8 +269,10 @@ void __lockfunc rt_read_unlock(rwlock_t *rwlock) rwlock_release(&rwlock->dep_map, 1, _RET_IP_); /* Release the lock only when read_depth is down to 0 */ - if (--rwlock->read_depth == 0) + if (--rwlock->read_depth == 0) { __rt_spin_unlock(&rwlock->lock); + migrate_enable(); + } } EXPORT_SYMBOL(rt_read_unlock);