Use local irq lock instead of irq disable regions
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
parent
91965a4c3d
commit
2d1e8fd236
|
@ -48,6 +48,7 @@
|
|||
#include <linux/nodemask.h>
|
||||
#include <linux/moduleparam.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/locallock.h>
|
||||
|
||||
#include "workqueue_internal.h"
|
||||
|
||||
|
@ -323,6 +324,8 @@ EXPORT_SYMBOL_GPL(system_power_efficient_wq);
|
|||
struct workqueue_struct *system_freezable_power_efficient_wq __read_mostly;
|
||||
EXPORT_SYMBOL_GPL(system_freezable_power_efficient_wq);
|
||||
|
||||
static DEFINE_LOCAL_IRQ_LOCK(pendingb_lock);
|
||||
|
||||
static int worker_thread(void *__worker);
|
||||
static void copy_workqueue_attrs(struct workqueue_attrs *to,
|
||||
const struct workqueue_attrs *from);
|
||||
|
@ -1089,9 +1092,9 @@ static void put_pwq_unlocked(struct pool_workqueue *pwq)
|
|||
* As both pwqs and pools are RCU protected, the
|
||||
* following lock operations are safe.
|
||||
*/
|
||||
spin_lock_irq(&pwq->pool->lock);
|
||||
local_spin_lock_irq(pendingb_lock, &pwq->pool->lock);
|
||||
put_pwq(pwq);
|
||||
spin_unlock_irq(&pwq->pool->lock);
|
||||
local_spin_unlock_irq(pendingb_lock, &pwq->pool->lock);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1193,7 +1196,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
|
|||
struct worker_pool *pool;
|
||||
struct pool_workqueue *pwq;
|
||||
|
||||
local_irq_save(*flags);
|
||||
local_lock_irqsave(pendingb_lock, *flags);
|
||||
|
||||
/* try to steal the timer if it exists */
|
||||
if (is_dwork) {
|
||||
|
@ -1257,7 +1260,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
|
|||
spin_unlock(&pool->lock);
|
||||
fail:
|
||||
rcu_read_unlock();
|
||||
local_irq_restore(*flags);
|
||||
local_unlock_irqrestore(pendingb_lock, *flags);
|
||||
if (work_is_canceling(work))
|
||||
return -ENOENT;
|
||||
cpu_relax();
|
||||
|
@ -1329,7 +1332,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
|
|||
* queued or lose PENDING. Grabbing PENDING and queueing should
|
||||
* happen with IRQ disabled.
|
||||
*/
|
||||
WARN_ON_ONCE(!irqs_disabled());
|
||||
WARN_ON_ONCE_NONRT(!irqs_disabled());
|
||||
|
||||
debug_work_activate(work);
|
||||
|
||||
|
@ -1434,14 +1437,14 @@ bool queue_work_on(int cpu, struct workqueue_struct *wq,
|
|||
bool ret = false;
|
||||
unsigned long flags;
|
||||
|
||||
local_irq_save(flags);
|
||||
local_lock_irqsave(pendingb_lock,flags);
|
||||
|
||||
if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
|
||||
__queue_work(cpu, wq, work);
|
||||
ret = true;
|
||||
}
|
||||
|
||||
local_irq_restore(flags);
|
||||
local_unlock_irqrestore(pendingb_lock, flags);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(queue_work_on);
|
||||
|
@ -1508,14 +1511,14 @@ bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
|
|||
unsigned long flags;
|
||||
|
||||
/* read the comment in __queue_work() */
|
||||
local_irq_save(flags);
|
||||
local_lock_irqsave(pendingb_lock, flags);
|
||||
|
||||
if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
|
||||
__queue_delayed_work(cpu, wq, dwork, delay);
|
||||
ret = true;
|
||||
}
|
||||
|
||||
local_irq_restore(flags);
|
||||
local_unlock_irqrestore(pendingb_lock, flags);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(queue_delayed_work_on);
|
||||
|
@ -1550,7 +1553,7 @@ bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
|
|||
|
||||
if (likely(ret >= 0)) {
|
||||
__queue_delayed_work(cpu, wq, dwork, delay);
|
||||
local_irq_restore(flags);
|
||||
local_unlock_irqrestore(pendingb_lock, flags);
|
||||
}
|
||||
|
||||
/* -ENOENT from try_to_grab_pending() becomes %true */
|
||||
|
@ -2954,7 +2957,7 @@ static bool __cancel_work_timer(struct work_struct *work, bool is_dwork)
|
|||
|
||||
/* tell other tasks trying to grab @work to back off */
|
||||
mark_work_canceling(work);
|
||||
local_irq_restore(flags);
|
||||
local_unlock_irqrestore(pendingb_lock, flags);
|
||||
|
||||
flush_work(work);
|
||||
clear_work_data(work);
|
||||
|
@ -3009,10 +3012,10 @@ EXPORT_SYMBOL_GPL(cancel_work_sync);
|
|||
*/
|
||||
bool flush_delayed_work(struct delayed_work *dwork)
|
||||
{
|
||||
local_irq_disable();
|
||||
local_lock_irq(pendingb_lock);
|
||||
if (del_timer_sync(&dwork->timer))
|
||||
__queue_work(dwork->cpu, dwork->wq, &dwork->work);
|
||||
local_irq_enable();
|
||||
local_unlock_irq(pendingb_lock);
|
||||
return flush_work(&dwork->work);
|
||||
}
|
||||
EXPORT_SYMBOL(flush_delayed_work);
|
||||
|
@ -3047,7 +3050,7 @@ bool cancel_delayed_work(struct delayed_work *dwork)
|
|||
|
||||
set_work_pool_and_clear_pending(&dwork->work,
|
||||
get_work_pool_id(&dwork->work));
|
||||
local_irq_restore(flags);
|
||||
local_unlock_irqrestore(pendingb_lock, flags);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(cancel_delayed_work);
|
||||
|
@ -4524,7 +4527,7 @@ unsigned int work_busy(struct work_struct *work)
|
|||
if (work_pending(work))
|
||||
ret |= WORK_BUSY_PENDING;
|
||||
|
||||
rcu_read_lock()
|
||||
rcu_read_lock();
|
||||
pool = get_work_pool(work);
|
||||
if (pool) {
|
||||
spin_lock_irqsave(&pool->lock, flags);
|
||||
|
|
Loading…
Reference in New Issue