workqueue: Use normal rcu

There is no need for sched_rcu. The undocumented reason why sched_rcu
is used is to avoid a few explicit rcu_read_lock()/unlock() pairs by
abusing the fact that sched_rcu reader side critical sections are also
protected by preempt or irq disabled regions.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
Thomas Gleixner 2013-07-24 15:26:54 +02:00 committed by Alibek Omarov
parent 59c262358b
commit 91965a4c3d
1 changed files with 47 additions and 38 deletions

View File

@ -129,11 +129,11 @@ enum {
*
* PL: wq_pool_mutex protected.
*
* PR: wq_pool_mutex protected for writes. Sched-RCU protected for reads.
* PR: wq_pool_mutex protected for writes. RCU protected for reads.
*
* WQ: wq->mutex protected.
*
* WR: wq->mutex protected for writes. Sched-RCU protected for reads.
* WR: wq->mutex protected for writes. RCU protected for reads.
*
* MD: wq_mayday_lock protected.
*/
@ -178,7 +178,7 @@ struct worker_pool {
atomic_t nr_running ____cacheline_aligned_in_smp;
/*
* Destruction of pool is sched-RCU protected to allow dereferences
* Destruction of pool is RCU protected to allow dereferences
* from get_work_pool().
*/
struct rcu_head rcu;
@ -207,7 +207,7 @@ struct pool_workqueue {
/*
* Release of unbound pwq is punted to system_wq. See put_pwq()
* and pwq_unbound_release_workfn() for details. pool_workqueue
* itself is also sched-RCU protected so that the first pwq can be
* itself is also RCU protected so that the first pwq can be
* determined without grabbing wq->mutex.
*/
struct work_struct unbound_release_work;
@ -331,14 +331,14 @@ static void copy_workqueue_attrs(struct workqueue_attrs *to,
#include <trace/events/workqueue.h>
#define assert_rcu_or_pool_mutex() \
rcu_lockdep_assert(rcu_read_lock_sched_held() || \
rcu_lockdep_assert(rcu_read_lock_held() || \
lockdep_is_held(&wq_pool_mutex), \
"sched RCU or wq_pool_mutex should be held")
"RCU or wq_pool_mutex should be held")
#define assert_rcu_or_wq_mutex(wq) \
rcu_lockdep_assert(rcu_read_lock_sched_held() || \
rcu_lockdep_assert(rcu_read_lock_held() || \
lockdep_is_held(&wq->mutex), \
"sched RCU or wq->mutex should be held")
"RCU or wq->mutex should be held")
#ifdef CONFIG_LOCKDEP
#define assert_manager_or_pool_lock(pool) \
@ -360,7 +360,7 @@ static void copy_workqueue_attrs(struct workqueue_attrs *to,
* @pool: iteration cursor
* @pi: integer used for iteration
*
* This must be called either with wq_pool_mutex held or sched RCU read
* This must be called either with wq_pool_mutex held or RCU read
* locked. If the pool needs to be used beyond the locking in effect, the
* caller is responsible for guaranteeing that the pool stays online.
*
@ -393,7 +393,7 @@ static void copy_workqueue_attrs(struct workqueue_attrs *to,
* @pwq: iteration cursor
* @wq: the target workqueue
*
* This must be called either with wq->mutex held or sched RCU read locked.
* This must be called either with wq->mutex held or RCU read locked.
* If the pwq needs to be used beyond the locking in effect, the caller is
* responsible for guaranteeing that the pwq stays online.
*
@ -548,7 +548,7 @@ static int worker_pool_assign_id(struct worker_pool *pool)
* @wq: the target workqueue
* @node: the node ID
*
* This must be called either with pwq_lock held or sched RCU read locked.
* This must be called either with pwq_lock held or RCU read locked.
* If the pwq needs to be used beyond the locking in effect, the caller is
* responsible for guaranteeing that the pwq stays online.
*
@ -652,8 +652,8 @@ static struct pool_workqueue *get_work_pwq(struct work_struct *work)
* @work: the work item of interest
*
* Pools are created and destroyed under wq_pool_mutex, and allows read
* access under sched-RCU read lock. As such, this function should be
* called under wq_pool_mutex or with preemption disabled.
* access under RCU read lock. As such, this function should be
* called under wq_pool_mutex or inside of a rcu_read_lock() region.
*
* All fields of the returned pool are accessible as long as the above
* mentioned locking is in effect. If the returned pool needs to be used
@ -1086,7 +1086,7 @@ static void put_pwq_unlocked(struct pool_workqueue *pwq)
{
if (pwq) {
/*
* As both pwqs and pools are sched-RCU protected, the
* As both pwqs and pools are RCU protected, the
* following lock operations are safe.
*/
spin_lock_irq(&pwq->pool->lock);
@ -1212,6 +1212,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
return 0;
rcu_read_lock();
/*
* The queueing is in progress, or it is already queued. Try to
* steal it from ->worklist without clearing WORK_STRUCT_PENDING.
@ -1250,10 +1251,12 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
set_work_pool_and_keep_pending(work, pool->id);
spin_unlock(&pool->lock);
rcu_read_unlock();
return 1;
}
spin_unlock(&pool->lock);
fail:
rcu_read_unlock();
local_irq_restore(*flags);
if (work_is_canceling(work))
return -ENOENT;
@ -1334,6 +1337,8 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
if (unlikely(wq->flags & __WQ_DRAINING) &&
WARN_ON_ONCE(!is_chained_work(wq)))
return;
rcu_read_lock();
retry:
if (req_cpu == WORK_CPU_UNBOUND)
cpu = raw_smp_processor_id();
@ -1390,10 +1395,8 @@ retry:
/* pwq determined, queue */
trace_workqueue_queue_work(req_cpu, pwq, work);
if (WARN_ON(!list_empty(&work->entry))) {
spin_unlock(&pwq->pool->lock);
return;
}
if (WARN_ON(!list_empty(&work->entry)))
goto out;
pwq->nr_in_flight[pwq->work_color]++;
work_flags = work_color_to_flags(pwq->work_color);
@ -1409,7 +1412,9 @@ retry:
insert_work(pwq, work, worklist, work_flags);
out:
spin_unlock(&pwq->pool->lock);
rcu_read_unlock();
}
/**
@ -2824,14 +2829,14 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr)
might_sleep();
local_irq_disable();
rcu_read_lock();
pool = get_work_pool(work);
if (!pool) {
local_irq_enable();
rcu_read_unlock();
return false;
}
spin_lock(&pool->lock);
spin_lock_irq(&pool->lock);
/* see the comment in try_to_grab_pending() with the same code */
pwq = get_work_pwq(work);
if (pwq) {
@ -2858,10 +2863,11 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr)
else
lock_map_acquire_read(&pwq->wq->lockdep_map);
lock_map_release(&pwq->wq->lockdep_map);
rcu_read_unlock();
return true;
already_gone:
spin_unlock_irq(&pool->lock);
rcu_read_unlock();
return false;
}
@ -3227,7 +3233,8 @@ static ssize_t wq_pool_ids_show(struct device *dev,
const char *delim = "";
int node, written = 0;
rcu_read_lock_sched();
get_online_cpus();
rcu_read_lock();
for_each_node(node) {
written += scnprintf(buf + written, PAGE_SIZE - written,
"%s%d:%d", delim, node,
@ -3235,7 +3242,8 @@ static ssize_t wq_pool_ids_show(struct device *dev,
delim = " ";
}
written += scnprintf(buf + written, PAGE_SIZE - written, "\n");
rcu_read_unlock_sched();
rcu_read_unlock();
put_online_cpus();
return written;
}
@ -3602,7 +3610,7 @@ static void rcu_free_pool(struct rcu_head *rcu)
* put_unbound_pool - put a worker_pool
* @pool: worker_pool to put
*
* Put @pool. If its refcnt reaches zero, it gets destroyed in sched-RCU
* Put @pool. If its refcnt reaches zero, it gets destroyed in RCU
* safe manner. get_unbound_pool() calls this function on its failure path
* and this function should be able to release pools which went through,
* successfully or not, init_worker_pool().
@ -3649,8 +3657,8 @@ static void put_unbound_pool(struct worker_pool *pool)
del_timer_sync(&pool->idle_timer);
del_timer_sync(&pool->mayday_timer);
/* sched-RCU protected to allow dereferences from get_work_pool() */
call_rcu_sched(&pool->rcu, rcu_free_pool);
/* RCU protected to allow dereferences from get_work_pool() */
call_rcu(&pool->rcu, rcu_free_pool);
}
/**
@ -3763,7 +3771,7 @@ static void pwq_unbound_release_workfn(struct work_struct *work)
put_unbound_pool(pool);
mutex_unlock(&wq_pool_mutex);
call_rcu_sched(&pwq->rcu, rcu_free_pwq);
call_rcu(&pwq->rcu, rcu_free_pwq);
/*
* If we're the last pwq going away, @wq is already dead and no one
@ -4477,7 +4485,8 @@ bool workqueue_congested(int cpu, struct workqueue_struct *wq)
struct pool_workqueue *pwq;
bool ret;
rcu_read_lock_sched();
rcu_read_lock();
preempt_disable();
if (cpu == WORK_CPU_UNBOUND)
cpu = smp_processor_id();
@ -4488,7 +4497,8 @@ bool workqueue_congested(int cpu, struct workqueue_struct *wq)
pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));
ret = !list_empty(&pwq->delayed_works);
rcu_read_unlock_sched();
preempt_enable();
rcu_read_unlock();
return ret;
}
@ -4514,16 +4524,15 @@ unsigned int work_busy(struct work_struct *work)
if (work_pending(work))
ret |= WORK_BUSY_PENDING;
local_irq_save(flags);
rcu_read_lock()
pool = get_work_pool(work);
if (pool) {
spin_lock(&pool->lock);
spin_lock_irqsave(&pool->lock, flags);
if (find_worker_executing_work(pool, work))
ret |= WORK_BUSY_RUNNING;
spin_unlock(&pool->lock);
spin_unlock_irqrestore(&pool->lock, flags);
}
local_irq_restore(flags);
rcu_read_unlock();
return ret;
}
EXPORT_SYMBOL_GPL(work_busy);
@ -4971,16 +4980,16 @@ bool freeze_workqueues_busy(void)
* nr_active is monotonically decreasing. It's safe
* to peek without lock.
*/
rcu_read_lock_sched();
rcu_read_lock();
for_each_pwq(pwq, wq) {
WARN_ON_ONCE(pwq->nr_active < 0);
if (pwq->nr_active) {
busy = true;
rcu_read_unlock_sched();
rcu_read_unlock();
goto out_unlock;
}
}
rcu_read_unlock_sched();
rcu_read_unlock();
}
out_unlock:
mutex_unlock(&wq_pool_mutex);