workqueue: Sync with upstream

This is an all-on-one patch reverting the following commits:
  workqueue: Don't assume that the callback has interrupts disabled
  sched/swait: Add swait_event_lock_irq()
  workqueue: Use swait for wq_manager_wait
  workqueue: Convert the locks to raw type

and introducing the following commits from upstream:
  workqueue: Use rcuwait for wq_manager_wait
  workqueue: Convert the pool::lock and wq_mayday_lock to raw_spinlock_t

as an replacement.

Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
This commit is contained in:
Sebastian Andrzej Siewior 2020-06-10 12:59:18 +02:00 committed by Alibek Omarov
parent f958e52689
commit 58d58190a1
2 changed files with 19 additions and 23 deletions

View File

@ -299,18 +299,4 @@ do { \
__ret; \
})
#define __swait_event_lock_irq(wq, condition, lock, cmd) \
___swait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, \
raw_spin_unlock_irq(&lock); \
cmd; \
schedule(); \
raw_spin_lock_irq(&lock))
#define swait_event_lock_irq(wq_head, condition, lock) \
do { \
if (condition) \
break; \
__swait_event_lock_irq(wq_head, condition, lock, ); \
} while (0)
#endif /* _LINUX_SWAIT_H */

View File

@ -51,7 +51,6 @@
#include <linux/sched/isolation.h>
#include <linux/nmi.h>
#include <linux/kvm_para.h>
#include <linux/swait.h>
#include "workqueue_internal.h"
@ -303,7 +302,8 @@ static struct workqueue_attrs *wq_update_unbound_numa_attrs_buf;
static DEFINE_MUTEX(wq_pool_mutex); /* protects pools and workqueues list */
static DEFINE_MUTEX(wq_pool_attach_mutex); /* protects worker attach/detach */
static DEFINE_RAW_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */
static DECLARE_SWAIT_QUEUE_HEAD(wq_manager_wait); /* wait for manager to go away */
/* wait for manager to go away */
static struct rcuwait manager_wait = __RCUWAIT_INITIALIZER(manager_wait);
static LIST_HEAD(workqueues); /* PR: list of all workqueues */
static bool workqueue_freezing; /* PL: have wqs started freezing? */
@ -1627,11 +1627,9 @@ EXPORT_SYMBOL_GPL(queue_work_node);
void delayed_work_timer_fn(struct timer_list *t)
{
struct delayed_work *dwork = from_timer(dwork, t, timer);
unsigned long flags;
local_irq_save(flags);
/* should have been called from irqsafe timer with irq already off */
__queue_work(dwork->cpu, dwork->wq, &dwork->work);
local_irq_restore(flags);
}
EXPORT_SYMBOL(delayed_work_timer_fn);
@ -2156,7 +2154,7 @@ static bool manage_workers(struct worker *worker)
pool->manager = NULL;
pool->flags &= ~POOL_MANAGER_ACTIVE;
swake_up_one(&wq_manager_wait);
rcuwait_wake_up(&manager_wait);
return true;
}
@ -3520,6 +3518,18 @@ static void rcu_free_pool(struct rcu_head *rcu)
kfree(pool);
}
/* This returns with the lock held on success (pool manager is inactive). */
static bool wq_manager_inactive(struct worker_pool *pool)
{
raw_spin_lock_irq(&pool->lock);
if (pool->flags & POOL_MANAGER_ACTIVE) {
raw_spin_unlock_irq(&pool->lock);
return false;
}
return true;
}
/**
* put_unbound_pool - put a worker_pool
* @pool: worker_pool to put
@ -3555,10 +3565,10 @@ static void put_unbound_pool(struct worker_pool *pool)
* Become the manager and destroy all workers. This prevents
* @pool's workers from blocking on attach_mutex. We're the last
* manager and @pool gets freed with the flag set.
* Because of how wq_manager_inactive() works, we will hold the
* spinlock after a successful wait.
*/
raw_spin_lock_irq(&pool->lock);
swait_event_lock_irq(wq_manager_wait,
!(pool->flags & POOL_MANAGER_ACTIVE), pool->lock);
rcuwait_wait_event(&manager_wait, wq_manager_inactive(pool));
pool->flags |= POOL_MANAGER_ACTIVE;
while ((worker = first_idle_worker(pool)))