printk-rb: add prb locking functions

Add processor-reentrant spin locking functions. These allow
restricting the number of possible contexts to 2, which can simplify
implementing code that also supports NMI interruptions.

    prb_lock();

    /*
     * This code is synchronized with all contexts
     * except an NMI on the same processor.
     */

    prb_unlock();

In order to support printk's emergency messages, a
processor-reentrant spin lock will be used to control raw access to
the emergency console. However, it must be the same
processor-reentrant spin lock as the one used by the ring buffer,
otherwise a deadlock can occur:

    CPU1: printk lock -> emergency -> serial lock
    CPU2: serial lock -> printk lock

By making the processor-reentrant implemtation available externally,
printk can use the same atomic_t for the ring buffer as for the
emergency console and thus avoid the above deadlock.

Signed-off-by: John Ogness <john.ogness@linutronix.de>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
This commit is contained in:
John Ogness 2019-02-12 15:29:40 +01:00 committed by Alibek Omarov
parent 50f14b884f
commit 76c17264c8
3 changed files with 102 additions and 1 deletions

View File

@ -0,0 +1,24 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_PRINTK_RINGBUFFER_H
#define _LINUX_PRINTK_RINGBUFFER_H
#include <linux/atomic.h>
#include <linux/percpu.h>
struct prb_cpulock {
atomic_t owner;
unsigned long __percpu *irqflags;
};
#define DECLARE_STATIC_PRINTKRB_CPULOCK(name) \
static DEFINE_PER_CPU(unsigned long, _##name##_percpu_irqflags); \
static struct prb_cpulock name = { \
.owner = ATOMIC_INIT(-1), \
.irqflags = &_##name##_percpu_irqflags, \
}
/* utility functions */
void prb_lock(struct prb_cpulock *cpu_lock, unsigned int *cpu_store);
void prb_unlock(struct prb_cpulock *cpu_lock, unsigned int cpu_store);
#endif /*_LINUX_PRINTK_RINGBUFFER_H */

View File

@ -26,7 +26,7 @@ endif
lib-y := ctype.o string.o vsprintf.o cmdline.o \
rbtree.o radix-tree.o timerqueue.o xarray.o \
idr.o extable.o \
idr.o extable.o printk_ringbuffer.o \
sha1.o chacha.o irq_regs.o argv_split.o \
flex_proportions.o ratelimit.o show_mem.o \
is_single_threaded.o plist.o decompress.o kobject_uevent.o \

77
lib/printk_ringbuffer.c Normal file
View File

@ -0,0 +1,77 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/smp.h>
#include <linux/printk_ringbuffer.h>
static bool __prb_trylock(struct prb_cpulock *cpu_lock,
unsigned int *cpu_store)
{
unsigned long *flags;
unsigned int cpu;
cpu = get_cpu();
*cpu_store = atomic_read(&cpu_lock->owner);
/* memory barrier to ensure the current lock owner is visible */
smp_rmb();
if (*cpu_store == -1) {
flags = per_cpu_ptr(cpu_lock->irqflags, cpu);
local_irq_save(*flags);
if (atomic_try_cmpxchg_acquire(&cpu_lock->owner,
cpu_store, cpu)) {
return true;
}
local_irq_restore(*flags);
} else if (*cpu_store == cpu) {
return true;
}
put_cpu();
return false;
}
/*
* prb_lock: Perform a processor-reentrant spin lock.
* @cpu_lock: A pointer to the lock object.
* @cpu_store: A "flags" pointer to store lock status information.
*
* If no processor has the lock, the calling processor takes the lock and
* becomes the owner. If the calling processor is already the owner of the
* lock, this function succeeds immediately. If lock is locked by another
* processor, this function spins until the calling processor becomes the
* owner.
*
* It is safe to call this function from any context and state.
*/
void prb_lock(struct prb_cpulock *cpu_lock, unsigned int *cpu_store)
{
for (;;) {
if (__prb_trylock(cpu_lock, cpu_store))
break;
cpu_relax();
}
}
/*
* prb_unlock: Perform a processor-reentrant spin unlock.
* @cpu_lock: A pointer to the lock object.
* @cpu_store: A "flags" object storing lock status information.
*
* Release the lock. The calling processor must be the owner of the lock.
*
* It is safe to call this function from any context and state.
*/
void prb_unlock(struct prb_cpulock *cpu_lock, unsigned int cpu_store)
{
unsigned long *flags;
unsigned int cpu;
cpu = atomic_read(&cpu_lock->owner);
atomic_set_release(&cpu_lock->owner, cpu_store);
if (cpu_store == -1) {
flags = per_cpu_ptr(cpu_lock->irqflags, cpu);
local_irq_restore(*flags);
}
put_cpu();
}