write_lock migrate_disable pushdown to rt_write_lock

pushdown of migrate_disable/enable from write_*lock* to the rt_write_*lock*
api level

general mapping of write_*lock* to mutexes:

write_*lock*
  `-> rt_write_*lock*
          `-> __spin_lock (the sleeping __spin_lock)
                 `-> rt_mutex

write_*lock*s are non-recursive so we have two lock chains to consider
 - write_trylock*/write_unlock
 - write_lock*/wirte_unlock
for both paths the migration_disable/enable must be balanced.

write_trylock* mapping:

write_trylock_irqsave
                `-> rt_write_trylock_irqsave
write_trylock             \
          `-------->  rt_write_trylock
                       ret = rt_mutex_trylock
                              rt_mutex_fasttrylock
                               rt_mutex_cmpxchg
                       if (ret)
                            migrate_disable

write_lock* mapping:

                  write_lock_irqsave
                                `-> rt_write_lock_irqsave
write_lock_irq -> write_lock ----.     \
                  write_lock_bh -+      \
                                 `-> rt_write_lock
                                      __rt_spin_lock()
                                       rt_spin_lock_fastlock()
                                        rt_mutex_cmpxchg()
                                     migrate_disable()

write_unlock* mapping:

                    write_unlock_irqrestore.
                    write_unlock_bh -------+
write_unlock_irq -> write_unlock ----------+
                                           `-> rt_write_unlock()
                                                __rt_spin_unlock()
                                                 rt_spin_lock_fastunlock()
                                                  rt_mutex_cmpxchg()
                                               migrate_enable()

So calls to migrate_disable/enable() are better placed at the rt_write_*
level of lock/trylock/unlock as all of the write_*lock* API has this as a
common path.

This approach to write_*_bh also eliminates the concerns raised with
regards to api inbalances (write_lock_bh -> write_unlock+local_bh_enable)

Tested-by: Carsten Emde <C.Emde@osadl.org>
Signed-off-by: Nicholas Mc Guire <der.herr@hofr.at>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
This commit is contained in:
Nicholas Mc Guire 2014-01-02 10:18:42 +01:00 committed by Alibek Omarov
parent 266d7f88f1
commit 8a5392d84d
2 changed files with 2 additions and 8 deletions

View File

@ -40,7 +40,6 @@ extern void __rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key
#define write_lock_irqsave(lock, flags) \
do { \
typecheck(unsigned long, flags); \
migrate_disable(); \
flags = rt_write_lock_irqsave(lock); \
} while (0)
@ -61,14 +60,12 @@ extern void __rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key
#define write_lock(lock) \
do { \
migrate_disable(); \
rt_write_lock(lock); \
} while (0)
#define write_lock_bh(lock) \
do { \
local_bh_disable(); \
migrate_disable(); \
rt_write_lock(lock); \
} while (0)
@ -92,13 +89,11 @@ extern void __rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key
#define write_unlock(lock) \
do { \
rt_write_unlock(lock); \
migrate_enable(); \
} while (0)
#define write_unlock_bh(lock) \
do { \
rt_write_unlock(lock); \
migrate_enable(); \
local_bh_enable(); \
} while (0)
@ -117,7 +112,6 @@ extern void __rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key
typecheck(unsigned long, flags); \
(void) flags; \
rt_write_unlock(lock); \
migrate_enable(); \
} while (0)
#endif

View File

@ -197,8 +197,6 @@ int __lockfunc rt_write_trylock_irqsave(rwlock_t *rwlock, unsigned long *flags)
*flags = 0;
ret = rt_write_trylock(rwlock);
if (ret)
migrate_disable();
return ret;
}
EXPORT_SYMBOL(rt_write_trylock_irqsave);
@ -232,6 +230,7 @@ EXPORT_SYMBOL(rt_read_trylock);
void __lockfunc rt_write_lock(rwlock_t *rwlock)
{
rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_);
migrate_disable();
__rt_spin_lock(&rwlock->lock);
}
EXPORT_SYMBOL(rt_write_lock);
@ -257,6 +256,7 @@ void __lockfunc rt_write_unlock(rwlock_t *rwlock)
/* NOTE: we always pass in '1' for nested, for simplicity */
rwlock_release(&rwlock->dep_map, 1, _RET_IP_);
__rt_spin_unlock(&rwlock->lock);
migrate_enable();
}
EXPORT_SYMBOL(rt_write_unlock);