2019-05-19 14:08:55 +02:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2008-10-19 05:27:19 +02:00
|
|
|
/*
|
|
|
|
* kernel/freezer.c - Function to freeze a process
|
|
|
|
*
|
|
|
|
* Originally from kernel/power/process.c
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/interrupt.h>
|
|
|
|
#include <linux/suspend.h>
|
2011-05-23 20:51:41 +02:00
|
|
|
#include <linux/export.h>
|
2008-10-19 05:27:19 +02:00
|
|
|
#include <linux/syscalls.h>
|
|
|
|
#include <linux/freezer.h>
|
2011-11-21 21:32:23 +01:00
|
|
|
#include <linux/kthread.h>
|
2008-10-19 05:27:19 +02:00
|
|
|
|
2011-11-21 21:32:25 +01:00
|
|
|
/* total number of freezing conditions in effect */
|
|
|
|
atomic_t system_freezing_cnt = ATOMIC_INIT(0);
|
|
|
|
EXPORT_SYMBOL(system_freezing_cnt);
|
|
|
|
|
2018-07-31 10:51:32 +02:00
|
|
|
/* indicate whether PM freezing is in effect, protected by
|
|
|
|
* system_transition_mutex
|
|
|
|
*/
|
2011-11-21 21:32:25 +01:00
|
|
|
bool pm_freezing;
|
|
|
|
bool pm_nosig_freezing;
|
|
|
|
|
2011-11-21 21:32:24 +01:00
|
|
|
/* protects freezing and frozen transitions */
|
|
|
|
static DEFINE_SPINLOCK(freezer_lock);
|
2008-10-19 05:27:19 +02:00
|
|
|
|
2011-11-21 21:32:25 +01:00
|
|
|
/**
|
|
|
|
* freezing_slow_path - slow path for testing whether a task needs to be frozen
|
|
|
|
* @p: task to be tested
|
|
|
|
*
|
|
|
|
* This function is called by freezing() if system_freezing_cnt isn't zero
|
|
|
|
* and tests whether @p needs to enter and stay in frozen state. Can be
|
|
|
|
* called under any context. The freezers are responsible for ensuring the
|
|
|
|
* target tasks see the updated state.
|
|
|
|
*/
|
|
|
|
bool freezing_slow_path(struct task_struct *p)
|
|
|
|
{
|
2013-07-25 02:41:33 +02:00
|
|
|
if (p->flags & (PF_NOFREEZE | PF_SUSPEND_TASK))
|
2011-11-21 21:32:25 +01:00
|
|
|
return false;
|
|
|
|
|
2016-07-29 00:45:16 +02:00
|
|
|
if (test_tsk_thread_flag(p, TIF_MEMDIE))
|
2014-10-21 09:27:12 +02:00
|
|
|
return false;
|
|
|
|
|
2011-11-21 21:32:25 +01:00
|
|
|
if (pm_nosig_freezing || cgroup_freezing(p))
|
|
|
|
return true;
|
|
|
|
|
2011-11-23 18:28:17 +01:00
|
|
|
if (pm_freezing && !(p->flags & PF_KTHREAD))
|
2011-11-21 21:32:25 +01:00
|
|
|
return true;
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(freezing_slow_path);
|
|
|
|
|
2008-10-19 05:27:19 +02:00
|
|
|
/* Refrigerator is place where frozen processes are stored :-). */
|
2011-11-21 21:32:23 +01:00
|
|
|
bool __refrigerator(bool check_kthr_stop)
|
2008-10-19 05:27:19 +02:00
|
|
|
{
|
|
|
|
/* Hmm, should we be allowed to suspend when there are realtime
|
|
|
|
processes around? */
|
2011-11-21 21:32:22 +01:00
|
|
|
bool was_frozen = false;
|
2011-11-21 21:32:26 +01:00
|
|
|
long save = current->state;
|
2008-10-19 05:27:19 +02:00
|
|
|
|
|
|
|
pr_debug("%s entered refrigerator\n", current->comm);
|
|
|
|
|
|
|
|
for (;;) {
|
|
|
|
set_current_state(TASK_UNINTERRUPTIBLE);
|
2011-11-21 21:32:26 +01:00
|
|
|
|
|
|
|
spin_lock_irq(&freezer_lock);
|
|
|
|
current->flags |= PF_FROZEN;
|
2011-11-21 21:32:24 +01:00
|
|
|
if (!freezing(current) ||
|
2011-11-21 21:32:23 +01:00
|
|
|
(check_kthr_stop && kthread_should_stop()))
|
2011-11-21 21:32:26 +01:00
|
|
|
current->flags &= ~PF_FROZEN;
|
|
|
|
spin_unlock_irq(&freezer_lock);
|
|
|
|
|
|
|
|
if (!(current->flags & PF_FROZEN))
|
2008-10-19 05:27:19 +02:00
|
|
|
break;
|
2011-11-21 21:32:22 +01:00
|
|
|
was_frozen = true;
|
2008-10-19 05:27:19 +02:00
|
|
|
schedule();
|
|
|
|
}
|
2009-07-17 14:15:47 +02:00
|
|
|
|
2008-10-19 05:27:19 +02:00
|
|
|
pr_debug("%s left refrigerator\n", current->comm);
|
2011-11-21 21:32:22 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Restore saved task state before returning. The mb'd version
|
|
|
|
* needs to be used; otherwise, it might silently break
|
|
|
|
* synchronization which depends on ordered task state change.
|
|
|
|
*/
|
|
|
|
set_current_state(save);
|
2011-11-21 21:32:22 +01:00
|
|
|
|
|
|
|
return was_frozen;
|
2008-10-19 05:27:19 +02:00
|
|
|
}
|
2011-11-21 21:32:22 +01:00
|
|
|
EXPORT_SYMBOL(__refrigerator);
|
2008-10-19 05:27:19 +02:00
|
|
|
|
|
|
|
static void fake_signal_wake_up(struct task_struct *p)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
|
2011-11-21 21:32:26 +01:00
|
|
|
if (lock_task_sighand(p, &flags)) {
|
|
|
|
signal_wake_up(p, 0);
|
|
|
|
unlock_task_sighand(p, &flags);
|
|
|
|
}
|
2008-10-19 05:27:19 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2011-11-21 21:32:26 +01:00
|
|
|
* freeze_task - send a freeze request to given task
|
|
|
|
* @p: task to send the request to
|
2008-10-19 05:27:19 +02:00
|
|
|
*
|
2012-02-21 23:57:47 +01:00
|
|
|
* If @p is freezing, the freeze request is sent either by sending a fake
|
|
|
|
* signal (if it's not a kernel thread) or waking it up (if it's a kernel
|
|
|
|
* thread).
|
2011-11-21 21:32:26 +01:00
|
|
|
*
|
|
|
|
* RETURNS:
|
|
|
|
* %false, if @p is not freezing or already frozen; %true, otherwise
|
2008-10-19 05:27:19 +02:00
|
|
|
*/
|
2011-11-21 21:32:26 +01:00
|
|
|
bool freeze_task(struct task_struct *p)
|
2008-10-19 05:27:19 +02:00
|
|
|
{
|
2011-11-21 21:32:24 +01:00
|
|
|
unsigned long flags;
|
|
|
|
|
2013-05-07 01:50:11 +02:00
|
|
|
/*
|
|
|
|
* This check can race with freezer_do_not_count, but worst case that
|
|
|
|
* will result in an extra wakeup being sent to the task. It does not
|
|
|
|
* race with freezer_count(), the barriers in freezer_count() and
|
|
|
|
* freezer_should_skip() ensure that either freezer_count() sees
|
|
|
|
* freezing == true in try_to_freeze() and freezes, or
|
|
|
|
* freezer_should_skip() sees !PF_FREEZE_SKIP and freezes the task
|
|
|
|
* normally.
|
|
|
|
*/
|
|
|
|
if (freezer_should_skip(p))
|
|
|
|
return false;
|
|
|
|
|
2011-11-21 21:32:24 +01:00
|
|
|
spin_lock_irqsave(&freezer_lock, flags);
|
2011-11-21 21:32:25 +01:00
|
|
|
if (!freezing(p) || frozen(p)) {
|
|
|
|
spin_unlock_irqrestore(&freezer_lock, flags);
|
|
|
|
return false;
|
|
|
|
}
|
2008-10-19 05:27:19 +02:00
|
|
|
|
2012-10-26 19:46:06 +02:00
|
|
|
if (!(p->flags & PF_KTHREAD))
|
2010-11-26 23:07:27 +01:00
|
|
|
fake_signal_wake_up(p);
|
2012-10-26 19:46:06 +02:00
|
|
|
else
|
2008-10-19 05:27:19 +02:00
|
|
|
wake_up_state(p, TASK_INTERRUPTIBLE);
|
2011-11-21 21:32:25 +01:00
|
|
|
|
2011-11-21 21:32:24 +01:00
|
|
|
spin_unlock_irqrestore(&freezer_lock, flags);
|
2011-11-21 21:32:25 +01:00
|
|
|
return true;
|
2008-10-19 05:27:19 +02:00
|
|
|
}
|
|
|
|
|
2011-11-21 21:32:23 +01:00
|
|
|
void __thaw_task(struct task_struct *p)
|
2008-10-19 05:27:21 +02:00
|
|
|
{
|
2011-11-21 21:32:24 +01:00
|
|
|
unsigned long flags;
|
2011-11-21 21:32:23 +01:00
|
|
|
|
2011-11-21 21:32:24 +01:00
|
|
|
spin_lock_irqsave(&freezer_lock, flags);
|
2011-11-23 18:28:17 +01:00
|
|
|
if (frozen(p))
|
2011-11-21 21:32:23 +01:00
|
|
|
wake_up_process(p);
|
2011-11-21 21:32:24 +01:00
|
|
|
spin_unlock_irqrestore(&freezer_lock, flags);
|
2008-10-19 05:27:21 +02:00
|
|
|
}
|
2011-11-21 21:32:25 +01:00
|
|
|
|
|
|
|
/**
|
2011-11-23 18:28:17 +01:00
|
|
|
* set_freezable - make %current freezable
|
2011-11-21 21:32:25 +01:00
|
|
|
*
|
|
|
|
* Mark %current freezable and enter refrigerator if necessary.
|
|
|
|
*/
|
2011-11-23 18:28:17 +01:00
|
|
|
bool set_freezable(void)
|
2011-11-21 21:32:25 +01:00
|
|
|
{
|
|
|
|
might_sleep();
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Modify flags while holding freezer_lock. This ensures the
|
|
|
|
* freezer notices that we aren't frozen yet or the freezing
|
|
|
|
* condition is visible to try_to_freeze() below.
|
|
|
|
*/
|
|
|
|
spin_lock_irq(&freezer_lock);
|
|
|
|
current->flags &= ~PF_NOFREEZE;
|
|
|
|
spin_unlock_irq(&freezer_lock);
|
|
|
|
|
|
|
|
return try_to_freeze();
|
|
|
|
}
|
2011-11-23 18:28:17 +01:00
|
|
|
EXPORT_SYMBOL(set_freezable);
|