printk_safe: remove printk safe code
vprintk variants are now NMI-safe so there is no longer a need for the "safe" calls. NOTE: This also removes printk flushing functionality. Signed-off-by: John Ogness <john.ogness@linutronix.de> Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
This commit is contained in:
parent
5c57b182f2
commit
acc072bdd0
|
@ -171,7 +171,6 @@ extern void panic_flush_kmsg_start(void)
|
|||
|
||||
extern void panic_flush_kmsg_end(void)
|
||||
{
|
||||
printk_safe_flush_on_panic();
|
||||
kmsg_dump(KMSG_DUMP_PANIC);
|
||||
bust_spinlocks(0);
|
||||
debug_locks_off();
|
||||
|
|
|
@ -181,11 +181,6 @@ static void watchdog_smp_panic(int cpu, u64 tb)
|
|||
|
||||
wd_smp_unlock(&flags);
|
||||
|
||||
printk_safe_flush();
|
||||
/*
|
||||
* printk_safe_flush() seems to require another print
|
||||
* before anything actually goes out to console.
|
||||
*/
|
||||
if (sysctl_hardlockup_all_cpu_backtrace)
|
||||
trigger_allbutself_cpu_backtrace();
|
||||
|
||||
|
|
|
@ -68,7 +68,6 @@ extern void irq_exit(void);
|
|||
#define nmi_enter() \
|
||||
do { \
|
||||
arch_nmi_enter(); \
|
||||
printk_nmi_enter(); \
|
||||
lockdep_off(); \
|
||||
ftrace_nmi_enter(); \
|
||||
BUG_ON(in_nmi()); \
|
||||
|
@ -85,7 +84,6 @@ extern void irq_exit(void);
|
|||
preempt_count_sub(NMI_OFFSET + HARDIRQ_OFFSET); \
|
||||
ftrace_nmi_exit(); \
|
||||
lockdep_on(); \
|
||||
printk_nmi_exit(); \
|
||||
arch_nmi_exit(); \
|
||||
} while (0)
|
||||
|
||||
|
|
|
@ -146,18 +146,6 @@ static inline __printf(1, 2) __cold
|
|||
void early_printk(const char *s, ...) { }
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PRINTK_NMI
|
||||
extern void printk_nmi_enter(void);
|
||||
extern void printk_nmi_exit(void);
|
||||
extern void printk_nmi_direct_enter(void);
|
||||
extern void printk_nmi_direct_exit(void);
|
||||
#else
|
||||
static inline void printk_nmi_enter(void) { }
|
||||
static inline void printk_nmi_exit(void) { }
|
||||
static inline void printk_nmi_direct_enter(void) { }
|
||||
static inline void printk_nmi_direct_exit(void) { }
|
||||
#endif /* PRINTK_NMI */
|
||||
|
||||
#ifdef CONFIG_PRINTK
|
||||
asmlinkage __printf(5, 0)
|
||||
int vprintk_emit(int facility, int level,
|
||||
|
@ -202,8 +190,6 @@ __printf(1, 2) void dump_stack_set_arch_desc(const char *fmt, ...);
|
|||
void dump_stack_print_info(const char *log_lvl);
|
||||
void show_regs_print_info(const char *log_lvl);
|
||||
extern asmlinkage void dump_stack(void) __cold;
|
||||
extern void printk_safe_flush(void);
|
||||
extern void printk_safe_flush_on_panic(void);
|
||||
#else
|
||||
static inline __printf(1, 0)
|
||||
int vprintk(const char *s, va_list args)
|
||||
|
@ -267,14 +253,6 @@ static inline void show_regs_print_info(const char *log_lvl)
|
|||
static inline void dump_stack(void)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void printk_safe_flush(void)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void printk_safe_flush_on_panic(void)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
extern int kptr_restrict;
|
||||
|
|
|
@ -972,7 +972,6 @@ void crash_kexec(struct pt_regs *regs)
|
|||
old_cpu = atomic_cmpxchg(&panic_cpu, PANIC_CPU_INVALID, this_cpu);
|
||||
if (old_cpu == PANIC_CPU_INVALID) {
|
||||
/* This is the 1st CPU which comes here, so go ahead. */
|
||||
printk_safe_flush_on_panic();
|
||||
__crash_kexec(regs);
|
||||
|
||||
/*
|
||||
|
|
|
@ -237,7 +237,6 @@ void panic(const char *fmt, ...)
|
|||
* Bypass the panic_cpu check and call __crash_kexec directly.
|
||||
*/
|
||||
if (!_crash_kexec_post_notifiers) {
|
||||
printk_safe_flush_on_panic();
|
||||
__crash_kexec(NULL);
|
||||
|
||||
/*
|
||||
|
@ -261,8 +260,6 @@ void panic(const char *fmt, ...)
|
|||
*/
|
||||
atomic_notifier_call_chain(&panic_notifier_list, 0, buf);
|
||||
|
||||
/* Call flush even twice. It tries harder with a single online CPU */
|
||||
printk_safe_flush_on_panic();
|
||||
kmsg_dump(KMSG_DUMP_PANIC);
|
||||
|
||||
/*
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
obj-y = printk.o
|
||||
obj-$(CONFIG_PRINTK) += printk_safe.o
|
||||
obj-$(CONFIG_A11Y_BRAILLE_CONSOLE) += braille.o
|
||||
|
|
|
@ -20,35 +20,6 @@ int vprintk_store(int facility, int level,
|
|||
__printf(1, 0) int vprintk_default(const char *fmt, va_list args);
|
||||
__printf(1, 0) int vprintk_deferred(const char *fmt, va_list args);
|
||||
__printf(1, 0) int vprintk_func(const char *fmt, va_list args);
|
||||
void __printk_safe_enter(void);
|
||||
void __printk_safe_exit(void);
|
||||
|
||||
void printk_safe_init(void);
|
||||
bool printk_percpu_data_ready(void);
|
||||
|
||||
#define printk_safe_enter_irqsave(flags) \
|
||||
do { \
|
||||
local_irq_save(flags); \
|
||||
__printk_safe_enter(); \
|
||||
} while (0)
|
||||
|
||||
#define printk_safe_exit_irqrestore(flags) \
|
||||
do { \
|
||||
__printk_safe_exit(); \
|
||||
local_irq_restore(flags); \
|
||||
} while (0)
|
||||
|
||||
#define printk_safe_enter_irq() \
|
||||
do { \
|
||||
local_irq_disable(); \
|
||||
__printk_safe_enter(); \
|
||||
} while (0)
|
||||
|
||||
#define printk_safe_exit_irq() \
|
||||
do { \
|
||||
__printk_safe_exit(); \
|
||||
local_irq_enable(); \
|
||||
} while (0)
|
||||
|
||||
void defer_console_output(void);
|
||||
|
||||
|
@ -61,12 +32,10 @@ __printf(1, 0) int vprintk_func(const char *fmt, va_list args) { return 0; }
|
|||
* semaphore and some of console functions (console_unlock()/etc.), so
|
||||
* printk-safe must preserve the existing local IRQ guarantees.
|
||||
*/
|
||||
#endif /* CONFIG_PRINTK */
|
||||
|
||||
#define printk_safe_enter_irqsave(flags) local_irq_save(flags)
|
||||
#define printk_safe_exit_irqrestore(flags) local_irq_restore(flags)
|
||||
|
||||
#define printk_safe_enter_irq() local_irq_disable()
|
||||
#define printk_safe_exit_irq() local_irq_enable()
|
||||
|
||||
static inline void printk_safe_init(void) { }
|
||||
static inline bool printk_percpu_data_ready(void) { return false; }
|
||||
#endif /* CONFIG_PRINTK */
|
||||
|
|
|
@ -1764,13 +1764,6 @@ static bool cont_add(u32 caller_id, int facility, int level,
|
|||
}
|
||||
#endif /* 0 */
|
||||
|
||||
int vprintk_store(int facility, int level,
|
||||
const char *dict, size_t dictlen,
|
||||
const char *fmt, va_list args)
|
||||
{
|
||||
return vprintk_emit(facility, level, dict, dictlen, fmt, args);
|
||||
}
|
||||
|
||||
/* ring buffer used as memory allocator for temporary sprint buffers */
|
||||
DECLARE_STATIC_PRINTKRB(sprint_rb,
|
||||
ilog2(PRINTK_RECORD_MAX + sizeof(struct prb_entry) +
|
||||
|
@ -1839,6 +1832,11 @@ asmlinkage int vprintk_emit(int facility, int level,
|
|||
}
|
||||
EXPORT_SYMBOL(vprintk_emit);
|
||||
|
||||
__printf(1, 0) int vprintk_func(const char *fmt, va_list args)
|
||||
{
|
||||
return vprintk_emit(0, LOGLEVEL_DEFAULT, NULL, 0, fmt, args);
|
||||
}
|
||||
|
||||
asmlinkage int vprintk(const char *fmt, va_list args)
|
||||
{
|
||||
return vprintk_func(fmt, args);
|
||||
|
@ -3255,5 +3253,4 @@ void kmsg_dump_rewind(struct kmsg_dumper *dumper)
|
|||
logbuf_unlock_irqrestore(flags);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kmsg_dump_rewind);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -1,414 +0,0 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
/*
|
||||
* printk_safe.c - Safe printk for printk-deadlock-prone contexts
|
||||
*/
|
||||
|
||||
#include <linux/preempt.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/debug_locks.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/cpumask.h>
|
||||
#include <linux/irq_work.h>
|
||||
#include <linux/printk.h>
|
||||
|
||||
#include "internal.h"
|
||||
|
||||
/*
|
||||
* printk() could not take logbuf_lock in NMI context. Instead,
|
||||
* it uses an alternative implementation that temporary stores
|
||||
* the strings into a per-CPU buffer. The content of the buffer
|
||||
* is later flushed into the main ring buffer via IRQ work.
|
||||
*
|
||||
* The alternative implementation is chosen transparently
|
||||
* by examinig current printk() context mask stored in @printk_context
|
||||
* per-CPU variable.
|
||||
*
|
||||
* The implementation allows to flush the strings also from another CPU.
|
||||
* There are situations when we want to make sure that all buffers
|
||||
* were handled or when IRQs are blocked.
|
||||
*/
|
||||
|
||||
#define SAFE_LOG_BUF_LEN ((1 << CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT) - \
|
||||
sizeof(atomic_t) - \
|
||||
sizeof(atomic_t) - \
|
||||
sizeof(struct irq_work))
|
||||
|
||||
struct printk_safe_seq_buf {
|
||||
atomic_t len; /* length of written data */
|
||||
atomic_t message_lost;
|
||||
struct irq_work work; /* IRQ work that flushes the buffer */
|
||||
unsigned char buffer[SAFE_LOG_BUF_LEN];
|
||||
};
|
||||
|
||||
static DEFINE_PER_CPU(struct printk_safe_seq_buf, safe_print_seq);
|
||||
static DEFINE_PER_CPU(int, printk_context);
|
||||
|
||||
static DEFINE_RAW_SPINLOCK(safe_read_lock);
|
||||
|
||||
#ifdef CONFIG_PRINTK_NMI
|
||||
static DEFINE_PER_CPU(struct printk_safe_seq_buf, nmi_print_seq);
|
||||
#endif
|
||||
|
||||
/* Get flushed in a more safe context. */
|
||||
static void queue_flush_work(struct printk_safe_seq_buf *s)
|
||||
{
|
||||
if (printk_percpu_data_ready())
|
||||
irq_work_queue(&s->work);
|
||||
}
|
||||
|
||||
/*
|
||||
* Add a message to per-CPU context-dependent buffer. NMI and printk-safe
|
||||
* have dedicated buffers, because otherwise printk-safe preempted by
|
||||
* NMI-printk would have overwritten the NMI messages.
|
||||
*
|
||||
* The messages are flushed from irq work (or from panic()), possibly,
|
||||
* from other CPU, concurrently with printk_safe_log_store(). Should this
|
||||
* happen, printk_safe_log_store() will notice the buffer->len mismatch
|
||||
* and repeat the write.
|
||||
*/
|
||||
static __printf(2, 0) int printk_safe_log_store(struct printk_safe_seq_buf *s,
|
||||
const char *fmt, va_list args)
|
||||
{
|
||||
int add;
|
||||
size_t len;
|
||||
va_list ap;
|
||||
|
||||
again:
|
||||
len = atomic_read(&s->len);
|
||||
|
||||
/* The trailing '\0' is not counted into len. */
|
||||
if (len >= sizeof(s->buffer) - 1) {
|
||||
atomic_inc(&s->message_lost);
|
||||
queue_flush_work(s);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Make sure that all old data have been read before the buffer
|
||||
* was reset. This is not needed when we just append data.
|
||||
*/
|
||||
if (!len)
|
||||
smp_rmb();
|
||||
|
||||
va_copy(ap, args);
|
||||
add = vscnprintf(s->buffer + len, sizeof(s->buffer) - len, fmt, ap);
|
||||
va_end(ap);
|
||||
if (!add)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Do it once again if the buffer has been flushed in the meantime.
|
||||
* Note that atomic_cmpxchg() is an implicit memory barrier that
|
||||
* makes sure that the data were written before updating s->len.
|
||||
*/
|
||||
if (atomic_cmpxchg(&s->len, len, len + add) != len)
|
||||
goto again;
|
||||
|
||||
queue_flush_work(s);
|
||||
return add;
|
||||
}
|
||||
|
||||
static inline void printk_safe_flush_line(const char *text, int len)
|
||||
{
|
||||
/*
|
||||
* Avoid any console drivers calls from here, because we may be
|
||||
* in NMI or printk_safe context (when in panic). The messages
|
||||
* must go only into the ring buffer at this stage. Consoles will
|
||||
* get explicitly called later when a crashdump is not generated.
|
||||
*/
|
||||
printk_deferred("%.*s", len, text);
|
||||
}
|
||||
|
||||
/* printk part of the temporary buffer line by line */
|
||||
static int printk_safe_flush_buffer(const char *start, size_t len)
|
||||
{
|
||||
const char *c, *end;
|
||||
bool header;
|
||||
|
||||
c = start;
|
||||
end = start + len;
|
||||
header = true;
|
||||
|
||||
/* Print line by line. */
|
||||
while (c < end) {
|
||||
if (*c == '\n') {
|
||||
printk_safe_flush_line(start, c - start + 1);
|
||||
start = ++c;
|
||||
header = true;
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Handle continuous lines or missing new line. */
|
||||
if ((c + 1 < end) && printk_get_level(c)) {
|
||||
if (header) {
|
||||
c = printk_skip_level(c);
|
||||
continue;
|
||||
}
|
||||
|
||||
printk_safe_flush_line(start, c - start);
|
||||
start = c++;
|
||||
header = true;
|
||||
continue;
|
||||
}
|
||||
|
||||
header = false;
|
||||
c++;
|
||||
}
|
||||
|
||||
/* Check if there was a partial line. Ignore pure header. */
|
||||
if (start < end && !header) {
|
||||
static const char newline[] = KERN_CONT "\n";
|
||||
|
||||
printk_safe_flush_line(start, end - start);
|
||||
printk_safe_flush_line(newline, strlen(newline));
|
||||
}
|
||||
|
||||
return len;
|
||||
}
|
||||
|
||||
static void report_message_lost(struct printk_safe_seq_buf *s)
|
||||
{
|
||||
int lost = atomic_xchg(&s->message_lost, 0);
|
||||
|
||||
if (lost)
|
||||
printk_deferred("Lost %d message(s)!\n", lost);
|
||||
}
|
||||
|
||||
/*
|
||||
* Flush data from the associated per-CPU buffer. The function
|
||||
* can be called either via IRQ work or independently.
|
||||
*/
|
||||
static void __printk_safe_flush(struct irq_work *work)
|
||||
{
|
||||
struct printk_safe_seq_buf *s =
|
||||
container_of(work, struct printk_safe_seq_buf, work);
|
||||
unsigned long flags;
|
||||
size_t len;
|
||||
int i;
|
||||
|
||||
/*
|
||||
* The lock has two functions. First, one reader has to flush all
|
||||
* available message to make the lockless synchronization with
|
||||
* writers easier. Second, we do not want to mix messages from
|
||||
* different CPUs. This is especially important when printing
|
||||
* a backtrace.
|
||||
*/
|
||||
raw_spin_lock_irqsave(&safe_read_lock, flags);
|
||||
|
||||
i = 0;
|
||||
more:
|
||||
len = atomic_read(&s->len);
|
||||
|
||||
/*
|
||||
* This is just a paranoid check that nobody has manipulated
|
||||
* the buffer an unexpected way. If we printed something then
|
||||
* @len must only increase. Also it should never overflow the
|
||||
* buffer size.
|
||||
*/
|
||||
if ((i && i >= len) || len > sizeof(s->buffer)) {
|
||||
const char *msg = "printk_safe_flush: internal error\n";
|
||||
|
||||
printk_safe_flush_line(msg, strlen(msg));
|
||||
len = 0;
|
||||
}
|
||||
|
||||
if (!len)
|
||||
goto out; /* Someone else has already flushed the buffer. */
|
||||
|
||||
/* Make sure that data has been written up to the @len */
|
||||
smp_rmb();
|
||||
i += printk_safe_flush_buffer(s->buffer + i, len - i);
|
||||
|
||||
/*
|
||||
* Check that nothing has got added in the meantime and truncate
|
||||
* the buffer. Note that atomic_cmpxchg() is an implicit memory
|
||||
* barrier that makes sure that the data were copied before
|
||||
* updating s->len.
|
||||
*/
|
||||
if (atomic_cmpxchg(&s->len, len, 0) != len)
|
||||
goto more;
|
||||
|
||||
out:
|
||||
report_message_lost(s);
|
||||
raw_spin_unlock_irqrestore(&safe_read_lock, flags);
|
||||
}
|
||||
|
||||
/**
|
||||
* printk_safe_flush - flush all per-cpu nmi buffers.
|
||||
*
|
||||
* The buffers are flushed automatically via IRQ work. This function
|
||||
* is useful only when someone wants to be sure that all buffers have
|
||||
* been flushed at some point.
|
||||
*/
|
||||
void printk_safe_flush(void)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
#ifdef CONFIG_PRINTK_NMI
|
||||
__printk_safe_flush(&per_cpu(nmi_print_seq, cpu).work);
|
||||
#endif
|
||||
__printk_safe_flush(&per_cpu(safe_print_seq, cpu).work);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* printk_safe_flush_on_panic - flush all per-cpu nmi buffers when the system
|
||||
* goes down.
|
||||
*
|
||||
* Similar to printk_safe_flush() but it can be called even in NMI context when
|
||||
* the system goes down. It does the best effort to get NMI messages into
|
||||
* the main ring buffer.
|
||||
*
|
||||
* Note that it could try harder when there is only one CPU online.
|
||||
*/
|
||||
void printk_safe_flush_on_panic(void)
|
||||
{
|
||||
/*
|
||||
* Make sure that we could access the main ring buffer.
|
||||
* Do not risk a double release when more CPUs are up.
|
||||
*/
|
||||
if (raw_spin_is_locked(&logbuf_lock)) {
|
||||
if (num_online_cpus() > 1)
|
||||
return;
|
||||
|
||||
debug_locks_off();
|
||||
raw_spin_lock_init(&logbuf_lock);
|
||||
}
|
||||
|
||||
if (raw_spin_is_locked(&safe_read_lock)) {
|
||||
if (num_online_cpus() > 1)
|
||||
return;
|
||||
|
||||
debug_locks_off();
|
||||
raw_spin_lock_init(&safe_read_lock);
|
||||
}
|
||||
|
||||
printk_safe_flush();
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PRINTK_NMI
|
||||
/*
|
||||
* Safe printk() for NMI context. It uses a per-CPU buffer to
|
||||
* store the message. NMIs are not nested, so there is always only
|
||||
* one writer running. But the buffer might get flushed from another
|
||||
* CPU, so we need to be careful.
|
||||
*/
|
||||
static __printf(1, 0) int vprintk_nmi(const char *fmt, va_list args)
|
||||
{
|
||||
struct printk_safe_seq_buf *s = this_cpu_ptr(&nmi_print_seq);
|
||||
|
||||
return printk_safe_log_store(s, fmt, args);
|
||||
}
|
||||
|
||||
void notrace printk_nmi_enter(void)
|
||||
{
|
||||
this_cpu_or(printk_context, PRINTK_NMI_CONTEXT_MASK);
|
||||
}
|
||||
|
||||
void notrace printk_nmi_exit(void)
|
||||
{
|
||||
this_cpu_and(printk_context, ~PRINTK_NMI_CONTEXT_MASK);
|
||||
}
|
||||
|
||||
/*
|
||||
* Marks a code that might produce many messages in NMI context
|
||||
* and the risk of losing them is more critical than eventual
|
||||
* reordering.
|
||||
*
|
||||
* It has effect only when called in NMI context. Then printk()
|
||||
* will try to store the messages into the main logbuf directly
|
||||
* and use the per-CPU buffers only as a fallback when the lock
|
||||
* is not available.
|
||||
*/
|
||||
void printk_nmi_direct_enter(void)
|
||||
{
|
||||
if (this_cpu_read(printk_context) & PRINTK_NMI_CONTEXT_MASK)
|
||||
this_cpu_or(printk_context, PRINTK_NMI_DIRECT_CONTEXT_MASK);
|
||||
}
|
||||
|
||||
void printk_nmi_direct_exit(void)
|
||||
{
|
||||
this_cpu_and(printk_context, ~PRINTK_NMI_DIRECT_CONTEXT_MASK);
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
static __printf(1, 0) int vprintk_nmi(const char *fmt, va_list args)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_PRINTK_NMI */
|
||||
|
||||
/*
|
||||
* Lock-less printk(), to avoid deadlocks should the printk() recurse
|
||||
* into itself. It uses a per-CPU buffer to store the message, just like
|
||||
* NMI.
|
||||
*/
|
||||
static __printf(1, 0) int vprintk_safe(const char *fmt, va_list args)
|
||||
{
|
||||
struct printk_safe_seq_buf *s = this_cpu_ptr(&safe_print_seq);
|
||||
|
||||
return printk_safe_log_store(s, fmt, args);
|
||||
}
|
||||
|
||||
/* Can be preempted by NMI. */
|
||||
void __printk_safe_enter(void)
|
||||
{
|
||||
this_cpu_inc(printk_context);
|
||||
}
|
||||
|
||||
/* Can be preempted by NMI. */
|
||||
void __printk_safe_exit(void)
|
||||
{
|
||||
this_cpu_dec(printk_context);
|
||||
}
|
||||
|
||||
__printf(1, 0) int vprintk_func(const char *fmt, va_list args)
|
||||
{
|
||||
/*
|
||||
* Try to use the main logbuf even in NMI. But avoid calling console
|
||||
* drivers that might have their own locks.
|
||||
*/
|
||||
if ((this_cpu_read(printk_context) & PRINTK_NMI_DIRECT_CONTEXT_MASK) &&
|
||||
raw_spin_trylock(&logbuf_lock)) {
|
||||
int len;
|
||||
|
||||
len = vprintk_store(0, LOGLEVEL_DEFAULT, NULL, 0, fmt, args);
|
||||
raw_spin_unlock(&logbuf_lock);
|
||||
defer_console_output();
|
||||
return len;
|
||||
}
|
||||
|
||||
/* Use extra buffer in NMI when logbuf_lock is taken or in safe mode. */
|
||||
if (this_cpu_read(printk_context) & PRINTK_NMI_CONTEXT_MASK)
|
||||
return vprintk_nmi(fmt, args);
|
||||
|
||||
/* Use extra buffer to prevent a recursion deadlock in safe mode. */
|
||||
if (this_cpu_read(printk_context) & PRINTK_SAFE_CONTEXT_MASK)
|
||||
return vprintk_safe(fmt, args);
|
||||
|
||||
/* No obstacles. */
|
||||
return vprintk_default(fmt, args);
|
||||
}
|
||||
|
||||
void __init printk_safe_init(void)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
struct printk_safe_seq_buf *s;
|
||||
|
||||
s = &per_cpu(safe_print_seq, cpu);
|
||||
init_irq_work(&s->work, __printk_safe_flush);
|
||||
|
||||
#ifdef CONFIG_PRINTK_NMI
|
||||
s = &per_cpu(nmi_print_seq, cpu);
|
||||
init_irq_work(&s->work, __printk_safe_flush);
|
||||
#endif
|
||||
}
|
||||
|
||||
/* Flush pending messages that did not have scheduled IRQ works. */
|
||||
printk_safe_flush();
|
||||
}
|
|
@ -8959,7 +8959,6 @@ void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
|
|||
tracing_off();
|
||||
|
||||
local_irq_save(flags);
|
||||
printk_nmi_direct_enter();
|
||||
|
||||
/* Simulate the iterator */
|
||||
trace_init_global_iter(&iter);
|
||||
|
@ -9036,7 +9035,6 @@ void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
|
|||
atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
|
||||
}
|
||||
atomic_dec(&dump_running);
|
||||
printk_nmi_direct_exit();
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ftrace_dump);
|
||||
|
|
|
@ -75,12 +75,6 @@ void nmi_trigger_cpumask_backtrace(const cpumask_t *mask,
|
|||
touch_softlockup_watchdog();
|
||||
}
|
||||
|
||||
/*
|
||||
* Force flush any remote buffers that might be stuck in IRQ context
|
||||
* and therefore could not run their irq_work.
|
||||
*/
|
||||
printk_safe_flush();
|
||||
|
||||
clear_bit_unlock(0, &backtrace_flag);
|
||||
put_cpu();
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue