softlockup: remove timestamp checking from hung_task

Impact: saves sizeof(long) bytes per task_struct

By guaranteeing that sysctl_hung_task_timeout_secs have elapsed between
tasklist scans we can avoid using timestamps.

Signed-off-by: Mandeep Singh Baines <msb@google.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Mandeep Singh Baines 2009-02-06 15:37:47 -08:00 committed by Ingo Molnar
parent 94be52dc07
commit 17406b82d6
3 changed files with 12 additions and 45 deletions

View File

@ -1241,7 +1241,6 @@ struct task_struct {
#endif #endif
#ifdef CONFIG_DETECT_HUNG_TASK #ifdef CONFIG_DETECT_HUNG_TASK
/* hung task detection */ /* hung task detection */
unsigned long last_switch_timestamp;
unsigned long last_switch_count; unsigned long last_switch_count;
#endif #endif
/* CPU-specific state of this task */ /* CPU-specific state of this task */

View File

@ -639,6 +639,9 @@ static int copy_mm(unsigned long clone_flags, struct task_struct * tsk)
tsk->min_flt = tsk->maj_flt = 0; tsk->min_flt = tsk->maj_flt = 0;
tsk->nvcsw = tsk->nivcsw = 0; tsk->nvcsw = tsk->nivcsw = 0;
#ifdef CONFIG_DETECT_HUNG_TASK
tsk->last_switch_count = tsk->nvcsw + tsk->nivcsw;
#endif
tsk->mm = NULL; tsk->mm = NULL;
tsk->active_mm = NULL; tsk->active_mm = NULL;
@ -1041,11 +1044,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
p->default_timer_slack_ns = current->timer_slack_ns; p->default_timer_slack_ns = current->timer_slack_ns;
#ifdef CONFIG_DETECT_HUNG_TASK
p->last_switch_count = 0;
p->last_switch_timestamp = 0;
#endif
task_io_accounting_init(&p->ioac); task_io_accounting_init(&p->ioac);
acct_clear_integrals(p); acct_clear_integrals(p);

View File

@ -34,7 +34,6 @@ unsigned long __read_mostly sysctl_hung_task_check_count = PID_MAX_LIMIT;
* Zero means infinite timeout - no checking done: * Zero means infinite timeout - no checking done:
*/ */
unsigned long __read_mostly sysctl_hung_task_timeout_secs = 120; unsigned long __read_mostly sysctl_hung_task_timeout_secs = 120;
static unsigned long __read_mostly hung_task_poll_jiffies;
unsigned long __read_mostly sysctl_hung_task_warnings = 10; unsigned long __read_mostly sysctl_hung_task_warnings = 10;
@ -69,33 +68,17 @@ static struct notifier_block panic_block = {
.notifier_call = hung_task_panic, .notifier_call = hung_task_panic,
}; };
/* static void check_hung_task(struct task_struct *t, unsigned long timeout)
* Returns seconds, approximately. We don't need nanosecond
* resolution, and we don't need to waste time with a big divide when
* 2^30ns == 1.074s.
*/
static unsigned long get_timestamp(void)
{
int this_cpu = raw_smp_processor_id();
return cpu_clock(this_cpu) >> 30LL; /* 2^30 ~= 10^9 */
}
static void check_hung_task(struct task_struct *t, unsigned long now,
unsigned long timeout)
{ {
unsigned long switch_count = t->nvcsw + t->nivcsw; unsigned long switch_count = t->nvcsw + t->nivcsw;
if (t->flags & PF_FROZEN) if (t->flags & PF_FROZEN)
return; return;
if (switch_count != t->last_switch_count || !t->last_switch_timestamp) { if (switch_count != t->last_switch_count) {
t->last_switch_count = switch_count; t->last_switch_count = switch_count;
t->last_switch_timestamp = now;
return; return;
} }
if ((long)(now - t->last_switch_timestamp) < timeout)
return;
if (!sysctl_hung_task_warnings) if (!sysctl_hung_task_warnings)
return; return;
sysctl_hung_task_warnings--; sysctl_hung_task_warnings--;
@ -111,7 +94,6 @@ static void check_hung_task(struct task_struct *t, unsigned long now,
sched_show_task(t); sched_show_task(t);
__debug_show_held_locks(t); __debug_show_held_locks(t);
t->last_switch_timestamp = now;
touch_nmi_watchdog(); touch_nmi_watchdog();
if (sysctl_hung_task_panic) if (sysctl_hung_task_panic)
@ -145,7 +127,6 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout)
{ {
int max_count = sysctl_hung_task_check_count; int max_count = sysctl_hung_task_check_count;
int batch_count = HUNG_TASK_BATCHING; int batch_count = HUNG_TASK_BATCHING;
unsigned long now = get_timestamp();
struct task_struct *g, *t; struct task_struct *g, *t;
/* /*
@ -168,19 +149,16 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout)
} }
/* use "==" to skip the TASK_KILLABLE tasks waiting on NFS */ /* use "==" to skip the TASK_KILLABLE tasks waiting on NFS */
if (t->state == TASK_UNINTERRUPTIBLE) if (t->state == TASK_UNINTERRUPTIBLE)
check_hung_task(t, now, timeout); check_hung_task(t, timeout);
} while_each_thread(g, t); } while_each_thread(g, t);
unlock: unlock:
rcu_read_unlock(); rcu_read_unlock();
} }
static void update_poll_jiffies(void) static unsigned long timeout_jiffies(unsigned long timeout)
{ {
/* timeout of 0 will disable the watchdog */ /* timeout of 0 will disable the watchdog */
if (sysctl_hung_task_timeout_secs == 0) return timeout ? timeout * HZ : MAX_SCHEDULE_TIMEOUT;
hung_task_poll_jiffies = MAX_SCHEDULE_TIMEOUT;
else
hung_task_poll_jiffies = sysctl_hung_task_timeout_secs * HZ / 2;
} }
/* /*
@ -197,8 +175,6 @@ int proc_dohung_task_timeout_secs(struct ctl_table *table, int write,
if (ret || !write) if (ret || !write)
goto out; goto out;
update_poll_jiffies();
wake_up_process(watchdog_task); wake_up_process(watchdog_task);
out: out:
@ -211,20 +187,14 @@ int proc_dohung_task_timeout_secs(struct ctl_table *table, int write,
static int watchdog(void *dummy) static int watchdog(void *dummy)
{ {
set_user_nice(current, 0); set_user_nice(current, 0);
update_poll_jiffies();
for ( ; ; ) { for ( ; ; ) {
unsigned long timeout; unsigned long timeout = sysctl_hung_task_timeout_secs;
while (schedule_timeout_interruptible(hung_task_poll_jiffies)); while (schedule_timeout_interruptible(timeout_jiffies(timeout)))
timeout = sysctl_hung_task_timeout_secs;
/* check_hung_uninterruptible_tasks(timeout);
* Need to cache timeout here to avoid timeout being set
* to 0 via sysctl while inside check_hung_*_tasks().
*/
timeout = sysctl_hung_task_timeout_secs;
if (timeout)
check_hung_uninterruptible_tasks(timeout);
} }
return 0; return 0;