x86: Convert mce timer to hrtimer

mce_timer is started in atomic contexts of cpu bringup. This results
in might_sleep() warnings on RT. Convert mce_timer to a hrtimer to
avoid this.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
fold in:
|From: Mike Galbraith <bitbucket@online.de>
|Date: Wed, 29 May 2013 13:52:13 +0200
|Subject: [PATCH] x86/mce: fix mce timer interval
|
|Seems mce timer fire at the wrong frequency in -rt kernels since roughly
|forever due to 32 bit overflow.  3.8-rt is also missing a multiplier.
|
|Add missing us -> ns conversion and 32 bit overflow prevention.
|
|Signed-off-by: Mike Galbraith <bitbucket@online.de>
|[bigeasy: use ULL instead of u64 cast]
|Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
This commit is contained in:
Thomas Gleixner 2010-12-13 16:33:39 +01:00 committed by Alibek Omarov
parent f9b2a5eee5
commit f962e0c91a
1 changed files with 34 additions and 24 deletions

View File

@ -41,6 +41,7 @@
#include <linux/debugfs.h>
#include <linux/irq_work.h>
#include <linux/export.h>
#include <linux/jiffies.h>
#include <asm/processor.h>
#include <asm/mce.h>
@ -1268,7 +1269,7 @@ void mce_log_therm_throt_event(__u64 status)
static unsigned long check_interval = 5 * 60; /* 5 minutes */
static DEFINE_PER_CPU(unsigned long, mce_next_interval); /* in jiffies */
static DEFINE_PER_CPU(struct timer_list, mce_timer);
static DEFINE_PER_CPU(struct hrtimer, mce_timer);
static unsigned long mce_adjust_timer_default(unsigned long interval)
{
@ -1278,13 +1279,10 @@ static unsigned long mce_adjust_timer_default(unsigned long interval)
static unsigned long (*mce_adjust_timer)(unsigned long interval) =
mce_adjust_timer_default;
static void mce_timer_fn(unsigned long data)
static enum hrtimer_restart mce_timer_fn(struct hrtimer *timer)
{
struct timer_list *t = &__get_cpu_var(mce_timer);
unsigned long iv;
WARN_ON(smp_processor_id() != data);
if (mce_available(__this_cpu_ptr(&cpu_info))) {
machine_check_poll(MCP_TIMESTAMP,
&__get_cpu_var(mce_poll_banks));
@ -1305,9 +1303,11 @@ static void mce_timer_fn(unsigned long data)
__this_cpu_write(mce_next_interval, iv);
/* Might have become 0 after CMCI storm subsided */
if (iv) {
t->expires = jiffies + iv;
add_timer_on(t, smp_processor_id());
hrtimer_forward_now(timer, ns_to_ktime(
jiffies_to_usecs(iv) * 1000ULL));
return HRTIMER_RESTART;
}
return HRTIMER_NORESTART;
}
/*
@ -1315,28 +1315,37 @@ static void mce_timer_fn(unsigned long data)
*/
void mce_timer_kick(unsigned long interval)
{
struct timer_list *t = &__get_cpu_var(mce_timer);
unsigned long when = jiffies + interval;
struct hrtimer *t = &__get_cpu_var(mce_timer);
unsigned long iv = __this_cpu_read(mce_next_interval);
if (timer_pending(t)) {
if (time_before(when, t->expires))
mod_timer_pinned(t, when);
if (hrtimer_active(t)) {
s64 exp;
s64 intv_us;
intv_us = jiffies_to_usecs(interval);
exp = ktime_to_us(hrtimer_expires_remaining(t));
if (intv_us < exp) {
hrtimer_cancel(t);
hrtimer_start_range_ns(t,
ns_to_ktime(intv_us * 1000),
0, HRTIMER_MODE_REL_PINNED);
}
} else {
t->expires = round_jiffies(when);
add_timer_on(t, smp_processor_id());
hrtimer_start_range_ns(t,
ns_to_ktime(jiffies_to_usecs(interval) * 1000ULL),
0, HRTIMER_MODE_REL_PINNED);
}
if (interval < iv)
__this_cpu_write(mce_next_interval, interval);
}
/* Must not be called in IRQ context where del_timer_sync() can deadlock */
/* Must not be called in IRQ context where hrtimer_cancel() can deadlock */
static void mce_timer_delete_all(void)
{
int cpu;
for_each_online_cpu(cpu)
del_timer_sync(&per_cpu(mce_timer, cpu));
hrtimer_cancel(&per_cpu(mce_timer, cpu));
}
static void mce_do_trigger(struct work_struct *work)
@ -1636,7 +1645,7 @@ static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c)
}
}
static void mce_start_timer(unsigned int cpu, struct timer_list *t)
static void mce_start_timer(unsigned int cpu, struct hrtimer *t)
{
unsigned long iv = check_interval * HZ;
@ -1645,16 +1654,17 @@ static void mce_start_timer(unsigned int cpu, struct timer_list *t)
per_cpu(mce_next_interval, cpu) = iv;
t->expires = round_jiffies(jiffies + iv);
add_timer_on(t, cpu);
hrtimer_start_range_ns(t, ns_to_ktime(jiffies_to_usecs(iv) * 1000ULL),
0, HRTIMER_MODE_REL_PINNED);
}
static void __mcheck_cpu_init_timer(void)
{
struct timer_list *t = &__get_cpu_var(mce_timer);
struct hrtimer *t = &__get_cpu_var(mce_timer);
unsigned int cpu = smp_processor_id();
setup_timer(t, mce_timer_fn, cpu);
hrtimer_init(t, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
t->function = mce_timer_fn;
mce_start_timer(cpu, t);
}
@ -2331,6 +2341,8 @@ static void mce_disable_cpu(void *h)
if (!mce_available(__this_cpu_ptr(&cpu_info)))
return;
hrtimer_cancel(&__get_cpu_var(mce_timer));
if (!(action & CPU_TASKS_FROZEN))
cmci_clear();
for (i = 0; i < mca_cfg.banks; i++) {
@ -2357,6 +2369,7 @@ static void mce_reenable_cpu(void *h)
if (b->init)
wrmsrl(MSR_IA32_MCx_CTL(i), b->ctl);
}
__mcheck_cpu_init_timer();
}
/* Get notified when a cpu comes on/off. Be hotplug friendly. */
@ -2364,7 +2377,6 @@ static int
mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
struct timer_list *t = &per_cpu(mce_timer, cpu);
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_ONLINE:
@ -2380,11 +2392,9 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
break;
case CPU_DOWN_PREPARE:
smp_call_function_single(cpu, mce_disable_cpu, &action, 1);
del_timer_sync(t);
break;
case CPU_DOWN_FAILED:
smp_call_function_single(cpu, mce_reenable_cpu, &action, 1);
mce_start_timer(cpu, t);
break;
}