read_lock migrate_disable pushdown to rt_read_lock

pushdown of migrate_disable/enable from read_*lock* to the rt_read_*lock*
api level

general mapping to mutexes:

read_*lock*
  `-> rt_read_*lock*
          `-> __spin_lock (the sleeping spin locks)
                 `-> rt_mutex

The real read_lock* mapping:

          read_lock_irqsave -.
read_lock_irq                `-> rt_read_lock_irqsave()
       `->read_lock ---------.       \
          read_lock_bh ------+        \
                             `--> rt_read_lock()
                                   if (rt_mutex_owner(lock) != current){
                                           `-> __rt_spin_lock()
                                                rt_spin_lock_fastlock()
                                                       `->rt_mutex_cmpxchg()
                                    migrate_disable()
                                   }
                                   rwlock->read_depth++;
read_trylock mapping:

read_trylock
          `-> rt_read_trylock
               if (rt_mutex_owner(lock) != current){
                                              `-> rt_mutex_trylock()
                                                   rt_mutex_fasttrylock()
                                                    rt_mutex_cmpxchg()
                migrate_disable()
               }
               rwlock->read_depth++;

read_unlock* mapping:

read_unlock_bh --------+
read_unlock_irq -------+
read_unlock_irqrestore +
read_unlock -----------+
                       `-> rt_read_unlock()
                            if(--rwlock->read_depth==0){
                                      `-> __rt_spin_unlock()
                                           rt_spin_lock_fastunlock()
                                                        `-> rt_mutex_cmpxchg()
                             migrate_disable()
                            }

So calls to migrate_disable/enable() are better placed at the rt_read_*
level of lock/trylock/unlock as all of the read_*lock* API has this as a
common path. In the rt_read* API of lock/trylock/unlock the nesting level
is already being recorded in rwlock->read_depth, so we can push down the
migrate disable/enable to that level and condition it on the read_depth
going from 0 to 1 -> migrate_disable and 1 to 0 -> migrate_enable. This
eliminates the recursive calls that were needed when migrate_disable/enable
was done at the read_*lock* level.

The approach to read_*_bh also eliminates the concerns raised with the
regards to api inbalances (read_lock_bh -> read_unlock+local_bh_enable)

Tested-by: Carsten Emde <C.Emde@osadl.org>
Signed-off-by: Nicholas Mc Guire <der.herr@hofr.at>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
This commit is contained in:
Nicholas Mc Guire 2014-01-02 10:19:15 +01:00 committed by Alibek Omarov
parent 8a5392d84d
commit b939a242f6
2 changed files with 13 additions and 13 deletions

View File

@ -33,7 +33,6 @@ extern void __rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key
#define read_lock_irqsave(lock, flags) \
do { \
typecheck(unsigned long, flags); \
migrate_disable(); \
flags = rt_read_lock_irqsave(lock); \
} while (0)
@ -45,14 +44,12 @@ extern void __rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key
#define read_lock(lock) \
do { \
migrate_disable(); \
rt_read_lock(lock); \
} while (0)
#define read_lock_bh(lock) \
do { \
local_bh_disable(); \
migrate_disable(); \
rt_read_lock(lock); \
} while (0)
@ -74,13 +71,11 @@ extern void __rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key
#define read_unlock(lock) \
do { \
rt_read_unlock(lock); \
migrate_enable(); \
} while (0)
#define read_unlock_bh(lock) \
do { \
rt_read_unlock(lock); \
migrate_enable(); \
local_bh_enable(); \
} while (0)
@ -104,7 +99,6 @@ extern void __rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key
typecheck(unsigned long, flags); \
(void) flags; \
rt_read_unlock(lock); \
migrate_enable(); \
} while (0)
#define write_unlock_irqrestore(lock, flags) \

View File

@ -211,17 +211,19 @@ int __lockfunc rt_read_trylock(rwlock_t *rwlock)
* but not when read_depth == 0 which means that the lock is
* write locked.
*/
migrate_disable();
if (rt_mutex_owner(lock) != current)
if (rt_mutex_owner(lock) != current) {
ret = rt_mutex_trylock(lock);
else if (!rwlock->read_depth)
if (ret)
migrate_disable();
} else if (!rwlock->read_depth) {
ret = 0;
}
if (ret) {
rwlock->read_depth++;
rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_);
} else
migrate_enable();
}
return ret;
}
@ -244,8 +246,10 @@ void __lockfunc rt_read_lock(rwlock_t *rwlock)
/*
* recursive read locks succeed when current owns the lock
*/
if (rt_mutex_owner(lock) != current)
if (rt_mutex_owner(lock) != current) {
__rt_spin_lock(lock);
migrate_disable();
}
rwlock->read_depth++;
}
@ -265,8 +269,10 @@ void __lockfunc rt_read_unlock(rwlock_t *rwlock)
rwlock_release(&rwlock->dep_map, 1, _RET_IP_);
/* Release the lock only when read_depth is down to 0 */
if (--rwlock->read_depth == 0)
if (--rwlock->read_depth == 0) {
__rt_spin_unlock(&rwlock->lock);
migrate_enable();
}
}
EXPORT_SYMBOL(rt_read_unlock);