work-simple: Simple work queue implemenation

Provides a framework for enqueuing callbacks from irq context
PREEMPT_RT_FULL safe. The callbacks are executed in kthread context.

Bases on wait-simple.

Signed-off-by: Daniel Wagner <daniel.wagner@bmw-carit.de>
Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
This commit is contained in:
Daniel Wagner 2014-07-11 15:26:11 +02:00 committed by Alibek Omarov
parent d9859c5f3a
commit 7dafcfee7b
3 changed files with 197 additions and 1 deletions

View File

@ -0,0 +1,24 @@
#ifndef _LINUX_SWORK_H
#define _LINUX_SWORK_H
#include <linux/list.h>
struct swork_event {
struct list_head item;
unsigned long flags;
void (*func)(struct swork_event *);
};
static inline void INIT_SWORK(struct swork_event *event,
void (*func)(struct swork_event *))
{
event->flags = 0;
event->func = func;
}
bool swork_queue(struct swork_event *sev);
int swork_get(void);
void swork_put(void);
#endif /* _LINUX_SWORK_H */

View File

@ -13,7 +13,7 @@ endif
obj-y += core.o proc.o clock.o cputime.o
obj-y += idle_task.o fair.o rt.o deadline.o stop_task.o
obj-y += wait.o wait-simple.o completion.o
obj-y += wait.o wait-simple.o work-simple.o completion.o
obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o
obj-$(CONFIG_SCHED_AUTOGROUP) += auto_group.o
obj-$(CONFIG_SCHEDSTATS) += stats.o

172
kernel/sched/work-simple.c Normal file
View File

@ -0,0 +1,172 @@
/*
* Copyright (C) 2014 BMW Car IT GmbH, Daniel Wagner daniel.wagner@bmw-carit.de
*
* Provides a framework for enqueuing callbacks from irq context
* PREEMPT_RT_FULL safe. The callbacks are executed in kthread context.
*/
#include <linux/wait-simple.h>
#include <linux/work-simple.h>
#include <linux/kthread.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#define SWORK_EVENT_PENDING (1 << 0)
static DEFINE_MUTEX(worker_mutex);
static struct sworker *glob_worker;
struct sworker {
struct list_head events;
struct swait_head wq;
raw_spinlock_t lock;
struct task_struct *task;
int refs;
};
static bool swork_readable(struct sworker *worker)
{
bool r;
if (kthread_should_stop())
return true;
raw_spin_lock_irq(&worker->lock);
r = !list_empty(&worker->events);
raw_spin_unlock_irq(&worker->lock);
return r;
}
static int swork_kthread(void *arg)
{
struct sworker *worker = arg;
for (;;) {
swait_event_interruptible(worker->wq,
swork_readable(worker));
if (kthread_should_stop())
break;
raw_spin_lock_irq(&worker->lock);
while (!list_empty(&worker->events)) {
struct swork_event *sev;
sev = list_first_entry(&worker->events,
struct swork_event, item);
list_del(&sev->item);
raw_spin_unlock_irq(&worker->lock);
WARN_ON_ONCE(!test_and_clear_bit(SWORK_EVENT_PENDING,
&sev->flags));
sev->func(sev);
raw_spin_lock_irq(&worker->lock);
}
raw_spin_unlock_irq(&worker->lock);
}
return 0;
}
static struct sworker *swork_create(void)
{
struct sworker *worker;
worker = kzalloc(sizeof(*worker), GFP_KERNEL);
if (!worker)
return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&worker->events);
raw_spin_lock_init(&worker->lock);
init_swait_head(&worker->wq);
worker->task = kthread_run(swork_kthread, worker, "kswork");
if (IS_ERR(worker->task)) {
kfree(worker);
return ERR_PTR(-ENOMEM);
}
return worker;
}
static void swork_destroy(struct sworker *worker)
{
kthread_stop(worker->task);
WARN_ON(!list_empty(&worker->events));
kfree(worker);
}
/**
* swork_queue - queue swork
*
* Returns %false if @work was already on a queue, %true otherwise.
*
* The work is queued and processed on a random CPU
*/
bool swork_queue(struct swork_event *sev)
{
unsigned long flags;
if (test_and_set_bit(SWORK_EVENT_PENDING, &sev->flags))
return false;
raw_spin_lock_irqsave(&glob_worker->lock, flags);
list_add_tail(&sev->item, &glob_worker->events);
raw_spin_unlock_irqrestore(&glob_worker->lock, flags);
swait_wake(&glob_worker->wq);
return true;
}
EXPORT_SYMBOL_GPL(swork_queue);
/**
* swork_get - get an instance of the sworker
*
* Returns an negative error code if the initialization if the worker did not
* work, %0 otherwise.
*
*/
int swork_get(void)
{
struct sworker *worker;
mutex_lock(&worker_mutex);
if (!glob_worker) {
worker = swork_create();
if (IS_ERR(worker)) {
mutex_unlock(&worker_mutex);
return -ENOMEM;
}
glob_worker = worker;
}
glob_worker->refs++;
mutex_unlock(&worker_mutex);
return 0;
}
EXPORT_SYMBOL_GPL(swork_get);
/**
* swork_put - puts an instance of the sworker
*
* Will destroy the sworker thread. This function must not be called until all
* queued events have been completed.
*/
void swork_put(void)
{
mutex_lock(&worker_mutex);
glob_worker->refs--;
if (glob_worker->refs > 0)
goto out;
swork_destroy(glob_worker);
glob_worker = NULL;
out:
mutex_unlock(&worker_mutex);
}
EXPORT_SYMBOL_GPL(swork_put);