irq_work: Convert flags to atomic_t
We need to convert flags to atomic_t in order to later fix an ordering issue on atomic_cmpxchg() failure. This will allow us to use atomic_fetch_or(). Also clarify the nature of those flags. [ mingo: Converted two more usage site the original patch missed. ] Signed-off-by: Frederic Weisbecker <frederic@kernel.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Paul E . McKenney <paulmck@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Link: https://lkml.kernel.org/r/20191108160858.31665-2-frederic@kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
31f4f5b495
commit
153bedbac2
@ -22,7 +22,7 @@
|
|||||||
#define IRQ_WORK_CLAIMED (IRQ_WORK_PENDING | IRQ_WORK_BUSY)
|
#define IRQ_WORK_CLAIMED (IRQ_WORK_PENDING | IRQ_WORK_BUSY)
|
||||||
|
|
||||||
struct irq_work {
|
struct irq_work {
|
||||||
unsigned long flags;
|
atomic_t flags;
|
||||||
struct llist_node llnode;
|
struct llist_node llnode;
|
||||||
void (*func)(struct irq_work *);
|
void (*func)(struct irq_work *);
|
||||||
};
|
};
|
||||||
@ -30,11 +30,15 @@ struct irq_work {
|
|||||||
static inline
|
static inline
|
||||||
void init_irq_work(struct irq_work *work, void (*func)(struct irq_work *))
|
void init_irq_work(struct irq_work *work, void (*func)(struct irq_work *))
|
||||||
{
|
{
|
||||||
work->flags = 0;
|
atomic_set(&work->flags, 0);
|
||||||
work->func = func;
|
work->func = func;
|
||||||
}
|
}
|
||||||
|
|
||||||
#define DEFINE_IRQ_WORK(name, _f) struct irq_work name = { .func = (_f), }
|
#define DEFINE_IRQ_WORK(name, _f) struct irq_work name = { \
|
||||||
|
.flags = ATOMIC_INIT(0), \
|
||||||
|
.func = (_f) \
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
bool irq_work_queue(struct irq_work *work);
|
bool irq_work_queue(struct irq_work *work);
|
||||||
bool irq_work_queue_on(struct irq_work *work, int cpu);
|
bool irq_work_queue_on(struct irq_work *work, int cpu);
|
||||||
|
@ -289,7 +289,7 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
|
|||||||
|
|
||||||
if (in_nmi()) {
|
if (in_nmi()) {
|
||||||
work = this_cpu_ptr(&up_read_work);
|
work = this_cpu_ptr(&up_read_work);
|
||||||
if (work->irq_work.flags & IRQ_WORK_BUSY)
|
if (atomic_read(&work->irq_work.flags) & IRQ_WORK_BUSY)
|
||||||
/* cannot queue more up_read, fallback */
|
/* cannot queue more up_read, fallback */
|
||||||
irq_work_busy = true;
|
irq_work_busy = true;
|
||||||
}
|
}
|
||||||
|
@ -29,16 +29,16 @@ static DEFINE_PER_CPU(struct llist_head, lazy_list);
|
|||||||
*/
|
*/
|
||||||
static bool irq_work_claim(struct irq_work *work)
|
static bool irq_work_claim(struct irq_work *work)
|
||||||
{
|
{
|
||||||
unsigned long flags, oflags, nflags;
|
int flags, oflags, nflags;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Start with our best wish as a premise but only trust any
|
* Start with our best wish as a premise but only trust any
|
||||||
* flag value after cmpxchg() result.
|
* flag value after cmpxchg() result.
|
||||||
*/
|
*/
|
||||||
flags = work->flags & ~IRQ_WORK_PENDING;
|
flags = atomic_read(&work->flags) & ~IRQ_WORK_PENDING;
|
||||||
for (;;) {
|
for (;;) {
|
||||||
nflags = flags | IRQ_WORK_CLAIMED;
|
nflags = flags | IRQ_WORK_CLAIMED;
|
||||||
oflags = cmpxchg(&work->flags, flags, nflags);
|
oflags = atomic_cmpxchg(&work->flags, flags, nflags);
|
||||||
if (oflags == flags)
|
if (oflags == flags)
|
||||||
break;
|
break;
|
||||||
if (oflags & IRQ_WORK_PENDING)
|
if (oflags & IRQ_WORK_PENDING)
|
||||||
@ -61,7 +61,7 @@ void __weak arch_irq_work_raise(void)
|
|||||||
static void __irq_work_queue_local(struct irq_work *work)
|
static void __irq_work_queue_local(struct irq_work *work)
|
||||||
{
|
{
|
||||||
/* If the work is "lazy", handle it from next tick if any */
|
/* If the work is "lazy", handle it from next tick if any */
|
||||||
if (work->flags & IRQ_WORK_LAZY) {
|
if (atomic_read(&work->flags) & IRQ_WORK_LAZY) {
|
||||||
if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) &&
|
if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) &&
|
||||||
tick_nohz_tick_stopped())
|
tick_nohz_tick_stopped())
|
||||||
arch_irq_work_raise();
|
arch_irq_work_raise();
|
||||||
@ -143,7 +143,7 @@ static void irq_work_run_list(struct llist_head *list)
|
|||||||
{
|
{
|
||||||
struct irq_work *work, *tmp;
|
struct irq_work *work, *tmp;
|
||||||
struct llist_node *llnode;
|
struct llist_node *llnode;
|
||||||
unsigned long flags;
|
int flags;
|
||||||
|
|
||||||
BUG_ON(!irqs_disabled());
|
BUG_ON(!irqs_disabled());
|
||||||
|
|
||||||
@ -159,15 +159,15 @@ static void irq_work_run_list(struct llist_head *list)
|
|||||||
* to claim that work don't rely on us to handle their data
|
* to claim that work don't rely on us to handle their data
|
||||||
* while we are in the middle of the func.
|
* while we are in the middle of the func.
|
||||||
*/
|
*/
|
||||||
flags = work->flags & ~IRQ_WORK_PENDING;
|
flags = atomic_read(&work->flags) & ~IRQ_WORK_PENDING;
|
||||||
xchg(&work->flags, flags);
|
atomic_xchg(&work->flags, flags);
|
||||||
|
|
||||||
work->func(work);
|
work->func(work);
|
||||||
/*
|
/*
|
||||||
* Clear the BUSY bit and return to the free state if
|
* Clear the BUSY bit and return to the free state if
|
||||||
* no-one else claimed it meanwhile.
|
* no-one else claimed it meanwhile.
|
||||||
*/
|
*/
|
||||||
(void)cmpxchg(&work->flags, flags, flags & ~IRQ_WORK_BUSY);
|
(void)atomic_cmpxchg(&work->flags, flags, flags & ~IRQ_WORK_BUSY);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -199,7 +199,7 @@ void irq_work_sync(struct irq_work *work)
|
|||||||
{
|
{
|
||||||
lockdep_assert_irqs_enabled();
|
lockdep_assert_irqs_enabled();
|
||||||
|
|
||||||
while (work->flags & IRQ_WORK_BUSY)
|
while (atomic_read(&work->flags) & IRQ_WORK_BUSY)
|
||||||
cpu_relax();
|
cpu_relax();
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(irq_work_sync);
|
EXPORT_SYMBOL_GPL(irq_work_sync);
|
||||||
|
@ -2961,7 +2961,7 @@ static void wake_up_klogd_work_func(struct irq_work *irq_work)
|
|||||||
|
|
||||||
static DEFINE_PER_CPU(struct irq_work, wake_up_klogd_work) = {
|
static DEFINE_PER_CPU(struct irq_work, wake_up_klogd_work) = {
|
||||||
.func = wake_up_klogd_work_func,
|
.func = wake_up_klogd_work_func,
|
||||||
.flags = IRQ_WORK_LAZY,
|
.flags = ATOMIC_INIT(IRQ_WORK_LAZY),
|
||||||
};
|
};
|
||||||
|
|
||||||
void wake_up_klogd(void)
|
void wake_up_klogd(void)
|
||||||
|
@ -660,7 +660,7 @@ BPF_CALL_1(bpf_send_signal, u32, sig)
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
work = this_cpu_ptr(&send_signal_work);
|
work = this_cpu_ptr(&send_signal_work);
|
||||||
if (work->irq_work.flags & IRQ_WORK_BUSY)
|
if (atomic_read(&work->irq_work.flags) & IRQ_WORK_BUSY)
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
|
|
||||||
/* Add the current task, which is the target of sending signal,
|
/* Add the current task, which is the target of sending signal,
|
||||||
|
Loading…
Reference in New Issue
Block a user