More power management updates for v4.12-rc1

- Add Intel Gemini Lake CPU IDs to the intel_idle and intel_rapl
    drivers (David Box).
 
  - Add a NULL pointer check to the cpuidle core to prevent it from
    crashing on platforms with incomplete cpuidle configuration (Fei
    Li).
 
  - Fix DT-related documentation in the generic power domains (genpd)
    framework and add a MAINTAINERS entry for DT-related material in
    genpd (Viresh Kumar).
 
  - Update the system suspend/resume infrastructure to improve the
    handling of aborts of suspend transitions in progress in the
    wakeup framework and rework the suspend-to-idle core loop to make
    it possible to filter out spurious wakeup events (specifically the
    ones coming from ACPI) without resuming all the way up to user
    space every time (Rafael Wysocki).
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v2
 
 iQIcBAABCAAGBQJZEkVZAAoJEILEb/54YlRxypUP/RFSaiK7j/l9oBWfzzZYVqli
 V5WtbaG04/2thHT7kycaxQLxMknT6MrhlHsGw9hM3VtLHHJBoBCYNybl8rTJlWdp
 4eTe5wzYR+depKJcqo2DC4kiKDkghehG6QXG3dr40neV+hc+C54sssYddUXeK3Wm
 nkAGqMEc/xjkul+DzmTy0KQd60cp9vWKedPkj1knrUpqkHa7fNj7YH3nX/OIXn+X
 YMpj6hro5O2Zi3l2bz0M7x0aTqAynAOC+7vXTm+S4GDfTX/aPVpYonEt82N/NSGP
 0esiLQDdX/52XAMwuIxyE092R4zatGMYTPLkG4SvZnkpdcnhUw5uCCJEUtBJYaKQ
 5h9KaJ244XW1EPCrw/dzOnt4ysC7HXJBcqYSiysPlF+GB2vO8WxcOk0Duvzlyifp
 9TPNTo+8h/uX5dCPDUfd4Usv7+yvDxhLJ635XAOsWlhhg8rm3RFYCUaG+snE7/Q0
 GJVyNvywvxHu1CHC+bNeQ9OcMwzeiqn5CgBx2g2UjLlt9H8GS/8jsgQXnug1XPRC
 wBVCYCQm9+JoKvKoCulov/KGAKQEXkJEDNZ0Ulc0vI0x1CWfEI1E07yHLEE8k+Ej
 hcheMCFXlBgiIX6AeMK1asHNTcw4L/xo4gnrS9MS7DEOwZRqifI4CwLfU7iqcFdF
 JBEIg6CNkc90Cszj6Xtx
 =K1Q+
 -----END PGP SIGNATURE-----

Merge tag 'pm-extra-4.12-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm

Pull more power management updates from Rafael Wysocki:
 "These add new CPU IDs to a couple of drivers, fix a possible NULL
  pointer dereference in the cpuidle core, update DT-related things in
  the generic power domains framework and finally update the
  suspend/resume infrastructure to improve the handling of wakeups from
  suspend-to-idle.

  Specifics:

   - Add Intel Gemini Lake CPU IDs to the intel_idle and intel_rapl
     drivers (David Box).

   - Add a NULL pointer check to the cpuidle core to prevent it from
     crashing on platforms with incomplete cpuidle configuration (Fei
     Li).

   - Fix DT-related documentation in the generic power domains (genpd)
     framework and add a MAINTAINERS entry for DT-related material in
     genpd (Viresh Kumar).

   - Update the system suspend/resume infrastructure to improve the
     handling of aborts of suspend transitions in progress in the wakeup
     framework and rework the suspend-to-idle core loop to make it
     possible to filter out spurious wakeup events (specifically the
     ones coming from ACPI) without resuming all the way up to user
     space every time (Rafael Wysocki)"

* tag 'pm-extra-4.12-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm:
  ACPI / sleep: Ignore spurious SCI wakeups from suspend-to-idle
  PM / wakeup: Integrate mechanism to abort transitions in progress
  x86/intel_idle: add Gemini Lake support
  cpuidle: check dev before usage in cpuidle_use_deepest_state()
  powercap: intel_rapl: Add support for Gemini Lake
  PM / Domains: Add DT file to MAINTAINERS
  PM / Domains: Fix DT example
This commit is contained in:
Linus Torvalds 2017-05-10 09:12:30 -07:00
commit 2e4ab937ec
15 changed files with 123 additions and 46 deletions

View File

@ -81,7 +81,7 @@ Example 3:
child: power-controller@12341000 {
compatible = "foo,power-controller";
reg = <0x12341000 0x1000>;
power-domains = <&parent 0>;
power-domains = <&parent>;
#power-domain-cells = <0>;
domain-idle-states = <&DOMAIN_PWR_DN>;
};

View File

@ -5590,6 +5590,7 @@ L: linux-pm@vger.kernel.org
S: Supported
F: drivers/base/power/domain*.c
F: include/linux/pm_domain.h
F: Documentation/devicetree/bindings/power/power_domain.txt
GENERIC UIO DRIVER FOR PCI DEVICES
M: "Michael S. Tsirkin" <mst@redhat.com>

View File

@ -782,7 +782,7 @@ static int acpi_battery_update(struct acpi_battery *battery, bool resume)
if ((battery->state & ACPI_BATTERY_STATE_CRITICAL) ||
(test_bit(ACPI_BATTERY_ALARM_PRESENT, &battery->flags) &&
(battery->capacity_now <= battery->alarm)))
pm_wakeup_event(&battery->device->dev, 0);
pm_wakeup_hard_event(&battery->device->dev);
return result;
}

View File

@ -216,7 +216,7 @@ static int acpi_lid_notify_state(struct acpi_device *device, int state)
}
if (state)
pm_wakeup_event(&device->dev, 0);
pm_wakeup_hard_event(&device->dev);
ret = blocking_notifier_call_chain(&acpi_lid_notifier, state, device);
if (ret == NOTIFY_DONE)
@ -398,7 +398,7 @@ static void acpi_button_notify(struct acpi_device *device, u32 event)
} else {
int keycode;
pm_wakeup_event(&device->dev, 0);
pm_wakeup_hard_event(&device->dev);
if (button->suspended)
break;
@ -530,6 +530,7 @@ static int acpi_button_add(struct acpi_device *device)
lid_device = device;
}
device_init_wakeup(&device->dev, true);
printk(KERN_INFO PREFIX "%s [%s]\n", name, acpi_device_bid(device));
return 0;

View File

@ -24,6 +24,7 @@
#include <linux/pm_qos.h>
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
#include <linux/suspend.h>
#include "internal.h"
@ -399,7 +400,7 @@ static void acpi_pm_notify_handler(acpi_handle handle, u32 val, void *not_used)
mutex_lock(&acpi_pm_notifier_lock);
if (adev->wakeup.flags.notifier_present) {
__pm_wakeup_event(adev->wakeup.ws, 0);
pm_wakeup_ws_event(adev->wakeup.ws, 0, true);
if (adev->wakeup.context.work.func)
queue_pm_work(&adev->wakeup.context.work);
}

View File

@ -662,14 +662,40 @@ static int acpi_freeze_prepare(void)
acpi_os_wait_events_complete();
if (acpi_sci_irq_valid())
enable_irq_wake(acpi_sci_irq);
return 0;
}
static void acpi_freeze_wake(void)
{
/*
* If IRQD_WAKEUP_ARMED is not set for the SCI at this point, it means
* that the SCI has triggered while suspended, so cancel the wakeup in
* case it has not been a wakeup event (the GPEs will be checked later).
*/
if (acpi_sci_irq_valid() &&
!irqd_is_wakeup_armed(irq_get_irq_data(acpi_sci_irq)))
pm_system_cancel_wakeup();
}
static void acpi_freeze_sync(void)
{
/*
* Process all pending events in case there are any wakeup ones.
*
* The EC driver uses the system workqueue, so that one needs to be
* flushed too.
*/
acpi_os_wait_events_complete();
flush_scheduled_work();
}
static void acpi_freeze_restore(void)
{
acpi_disable_wakeup_devices(ACPI_STATE_S0);
if (acpi_sci_irq_valid())
disable_irq_wake(acpi_sci_irq);
acpi_enable_all_runtime_gpes();
}
@ -681,6 +707,8 @@ static void acpi_freeze_end(void)
static const struct platform_freeze_ops acpi_freeze_ops = {
.begin = acpi_freeze_begin,
.prepare = acpi_freeze_prepare,
.wake = acpi_freeze_wake,
.sync = acpi_freeze_sync,
.restore = acpi_freeze_restore,
.end = acpi_freeze_end,
};

View File

@ -1091,11 +1091,6 @@ static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool a
if (async_error)
goto Complete;
if (pm_wakeup_pending()) {
async_error = -EBUSY;
goto Complete;
}
if (dev->power.syscore || dev->power.direct_complete)
goto Complete;

View File

@ -28,8 +28,8 @@ bool events_check_enabled __read_mostly;
/* First wakeup IRQ seen by the kernel in the last cycle. */
unsigned int pm_wakeup_irq __read_mostly;
/* If set and the system is suspending, terminate the suspend. */
static bool pm_abort_suspend __read_mostly;
/* If greater than 0 and the system is suspending, terminate the suspend. */
static atomic_t pm_abort_suspend __read_mostly;
/*
* Combined counters of registered wakeup events and wakeup events in progress.
@ -512,12 +512,13 @@ static bool wakeup_source_not_registered(struct wakeup_source *ws)
/**
* wakup_source_activate - Mark given wakeup source as active.
* @ws: Wakeup source to handle.
* @hard: If set, abort suspends in progress and wake up from suspend-to-idle.
*
* Update the @ws' statistics and, if @ws has just been activated, notify the PM
* core of the event by incrementing the counter of of wakeup events being
* processed.
*/
static void wakeup_source_activate(struct wakeup_source *ws)
static void wakeup_source_activate(struct wakeup_source *ws, bool hard)
{
unsigned int cec;
@ -525,11 +526,8 @@ static void wakeup_source_activate(struct wakeup_source *ws)
"unregistered wakeup source\n"))
return;
/*
* active wakeup source should bring the system
* out of PM_SUSPEND_FREEZE state
*/
freeze_wake();
if (hard)
pm_system_wakeup();
ws->active = true;
ws->active_count++;
@ -546,8 +544,9 @@ static void wakeup_source_activate(struct wakeup_source *ws)
/**
* wakeup_source_report_event - Report wakeup event using the given source.
* @ws: Wakeup source to report the event for.
* @hard: If set, abort suspends in progress and wake up from suspend-to-idle.
*/
static void wakeup_source_report_event(struct wakeup_source *ws)
static void wakeup_source_report_event(struct wakeup_source *ws, bool hard)
{
ws->event_count++;
/* This is racy, but the counter is approximate anyway. */
@ -555,7 +554,7 @@ static void wakeup_source_report_event(struct wakeup_source *ws)
ws->wakeup_count++;
if (!ws->active)
wakeup_source_activate(ws);
wakeup_source_activate(ws, hard);
}
/**
@ -573,7 +572,7 @@ void __pm_stay_awake(struct wakeup_source *ws)
spin_lock_irqsave(&ws->lock, flags);
wakeup_source_report_event(ws);
wakeup_source_report_event(ws, false);
del_timer(&ws->timer);
ws->timer_expires = 0;
@ -739,9 +738,10 @@ static void pm_wakeup_timer_fn(unsigned long data)
}
/**
* __pm_wakeup_event - Notify the PM core of a wakeup event.
* pm_wakeup_ws_event - Notify the PM core of a wakeup event.
* @ws: Wakeup source object associated with the event source.
* @msec: Anticipated event processing time (in milliseconds).
* @hard: If set, abort suspends in progress and wake up from suspend-to-idle.
*
* Notify the PM core of a wakeup event whose source is @ws that will take
* approximately @msec milliseconds to be processed by the kernel. If @ws is
@ -750,7 +750,7 @@ static void pm_wakeup_timer_fn(unsigned long data)
*
* It is safe to call this function from interrupt context.
*/
void __pm_wakeup_event(struct wakeup_source *ws, unsigned int msec)
void pm_wakeup_ws_event(struct wakeup_source *ws, unsigned int msec, bool hard)
{
unsigned long flags;
unsigned long expires;
@ -760,7 +760,7 @@ void __pm_wakeup_event(struct wakeup_source *ws, unsigned int msec)
spin_lock_irqsave(&ws->lock, flags);
wakeup_source_report_event(ws);
wakeup_source_report_event(ws, hard);
if (!msec) {
wakeup_source_deactivate(ws);
@ -779,17 +779,17 @@ void __pm_wakeup_event(struct wakeup_source *ws, unsigned int msec)
unlock:
spin_unlock_irqrestore(&ws->lock, flags);
}
EXPORT_SYMBOL_GPL(__pm_wakeup_event);
EXPORT_SYMBOL_GPL(pm_wakeup_ws_event);
/**
* pm_wakeup_event - Notify the PM core of a wakeup event.
* @dev: Device the wakeup event is related to.
* @msec: Anticipated event processing time (in milliseconds).
* @hard: If set, abort suspends in progress and wake up from suspend-to-idle.
*
* Call __pm_wakeup_event() for the @dev's wakeup source object.
* Call pm_wakeup_ws_event() for the @dev's wakeup source object.
*/
void pm_wakeup_event(struct device *dev, unsigned int msec)
void pm_wakeup_dev_event(struct device *dev, unsigned int msec, bool hard)
{
unsigned long flags;
@ -797,10 +797,10 @@ void pm_wakeup_event(struct device *dev, unsigned int msec)
return;
spin_lock_irqsave(&dev->power.lock, flags);
__pm_wakeup_event(dev->power.wakeup, msec);
pm_wakeup_ws_event(dev->power.wakeup, msec, hard);
spin_unlock_irqrestore(&dev->power.lock, flags);
}
EXPORT_SYMBOL_GPL(pm_wakeup_event);
EXPORT_SYMBOL_GPL(pm_wakeup_dev_event);
void pm_print_active_wakeup_sources(void)
{
@ -856,20 +856,26 @@ bool pm_wakeup_pending(void)
pm_print_active_wakeup_sources();
}
return ret || pm_abort_suspend;
return ret || atomic_read(&pm_abort_suspend) > 0;
}
void pm_system_wakeup(void)
{
pm_abort_suspend = true;
atomic_inc(&pm_abort_suspend);
freeze_wake();
}
EXPORT_SYMBOL_GPL(pm_system_wakeup);
void pm_wakeup_clear(void)
void pm_system_cancel_wakeup(void)
{
atomic_dec(&pm_abort_suspend);
}
void pm_wakeup_clear(bool reset)
{
pm_abort_suspend = false;
pm_wakeup_irq = 0;
if (reset)
atomic_set(&pm_abort_suspend, 0);
}
void pm_system_irq_wakeup(unsigned int irq_number)

View File

@ -111,7 +111,8 @@ void cpuidle_use_deepest_state(bool enable)
preempt_disable();
dev = cpuidle_get_device();
dev->use_deepest_state = enable;
if (dev)
dev->use_deepest_state = enable;
preempt_enable();
}

View File

@ -1097,6 +1097,7 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = {
ICPU(INTEL_FAM6_XEON_PHI_KNL, idle_cpu_knl),
ICPU(INTEL_FAM6_XEON_PHI_KNM, idle_cpu_knl),
ICPU(INTEL_FAM6_ATOM_GOLDMONT, idle_cpu_bxt),
ICPU(INTEL_FAM6_ATOM_GEMINI_LAKE, idle_cpu_bxt),
ICPU(INTEL_FAM6_ATOM_DENVERTON, idle_cpu_dnv),
{}
};
@ -1309,6 +1310,7 @@ static void intel_idle_state_table_update(void)
ivt_idle_state_table_update();
break;
case INTEL_FAM6_ATOM_GOLDMONT:
case INTEL_FAM6_ATOM_GEMINI_LAKE:
bxt_idle_state_table_update();
break;
case INTEL_FAM6_SKYLAKE_DESKTOP:

View File

@ -1164,6 +1164,7 @@ static const struct x86_cpu_id rapl_ids[] __initconst = {
RAPL_CPU(INTEL_FAM6_ATOM_MERRIFIELD, rapl_defaults_tng),
RAPL_CPU(INTEL_FAM6_ATOM_MOOREFIELD, rapl_defaults_ann),
RAPL_CPU(INTEL_FAM6_ATOM_GOLDMONT, rapl_defaults_core),
RAPL_CPU(INTEL_FAM6_ATOM_GEMINI_LAKE, rapl_defaults_core),
RAPL_CPU(INTEL_FAM6_ATOM_DENVERTON, rapl_defaults_core),
RAPL_CPU(INTEL_FAM6_XEON_PHI_KNL, rapl_defaults_hsw_server),

View File

@ -106,8 +106,8 @@ extern void __pm_stay_awake(struct wakeup_source *ws);
extern void pm_stay_awake(struct device *dev);
extern void __pm_relax(struct wakeup_source *ws);
extern void pm_relax(struct device *dev);
extern void __pm_wakeup_event(struct wakeup_source *ws, unsigned int msec);
extern void pm_wakeup_event(struct device *dev, unsigned int msec);
extern void pm_wakeup_ws_event(struct wakeup_source *ws, unsigned int msec, bool hard);
extern void pm_wakeup_dev_event(struct device *dev, unsigned int msec, bool hard);
#else /* !CONFIG_PM_SLEEP */
@ -182,9 +182,11 @@ static inline void __pm_relax(struct wakeup_source *ws) {}
static inline void pm_relax(struct device *dev) {}
static inline void __pm_wakeup_event(struct wakeup_source *ws, unsigned int msec) {}
static inline void pm_wakeup_ws_event(struct wakeup_source *ws,
unsigned int msec, bool hard) {}
static inline void pm_wakeup_event(struct device *dev, unsigned int msec) {}
static inline void pm_wakeup_dev_event(struct device *dev, unsigned int msec,
bool hard) {}
#endif /* !CONFIG_PM_SLEEP */
@ -201,4 +203,19 @@ static inline void wakeup_source_trash(struct wakeup_source *ws)
wakeup_source_drop(ws);
}
static inline void __pm_wakeup_event(struct wakeup_source *ws, unsigned int msec)
{
return pm_wakeup_ws_event(ws, msec, false);
}
static inline void pm_wakeup_event(struct device *dev, unsigned int msec)
{
return pm_wakeup_dev_event(dev, msec, false);
}
static inline void pm_wakeup_hard_event(struct device *dev)
{
return pm_wakeup_dev_event(dev, 0, true);
}
#endif /* _LINUX_PM_WAKEUP_H */

View File

@ -189,6 +189,8 @@ struct platform_suspend_ops {
struct platform_freeze_ops {
int (*begin)(void);
int (*prepare)(void);
void (*wake)(void);
void (*sync)(void);
void (*restore)(void);
void (*end)(void);
};
@ -428,7 +430,8 @@ extern unsigned int pm_wakeup_irq;
extern bool pm_wakeup_pending(void);
extern void pm_system_wakeup(void);
extern void pm_wakeup_clear(void);
extern void pm_system_cancel_wakeup(void);
extern void pm_wakeup_clear(bool reset);
extern void pm_system_irq_wakeup(unsigned int irq_number);
extern bool pm_get_wakeup_count(unsigned int *count, bool block);
extern bool pm_save_wakeup_count(unsigned int count);
@ -478,7 +481,7 @@ static inline int unregister_pm_notifier(struct notifier_block *nb)
static inline bool pm_wakeup_pending(void) { return false; }
static inline void pm_system_wakeup(void) {}
static inline void pm_wakeup_clear(void) {}
static inline void pm_wakeup_clear(bool reset) {}
static inline void pm_system_irq_wakeup(unsigned int irq_number) {}
static inline void lock_system_sleep(void) {}

View File

@ -132,7 +132,7 @@ int freeze_processes(void)
if (!pm_freezing)
atomic_inc(&system_freezing_cnt);
pm_wakeup_clear();
pm_wakeup_clear(true);
pr_info("Freezing user space processes ... ");
pm_freezing = true;
error = try_to_freeze_tasks(true);

View File

@ -72,6 +72,8 @@ static void freeze_begin(void)
static void freeze_enter(void)
{
trace_suspend_resume(TPS("machine_suspend"), PM_SUSPEND_FREEZE, true);
spin_lock_irq(&suspend_freeze_lock);
if (pm_wakeup_pending())
goto out;
@ -98,6 +100,27 @@ static void freeze_enter(void)
out:
suspend_freeze_state = FREEZE_STATE_NONE;
spin_unlock_irq(&suspend_freeze_lock);
trace_suspend_resume(TPS("machine_suspend"), PM_SUSPEND_FREEZE, false);
}
static void s2idle_loop(void)
{
do {
freeze_enter();
if (freeze_ops && freeze_ops->wake)
freeze_ops->wake();
dpm_resume_noirq(PMSG_RESUME);
if (freeze_ops && freeze_ops->sync)
freeze_ops->sync();
if (pm_wakeup_pending())
break;
pm_wakeup_clear(false);
} while (!dpm_suspend_noirq(PMSG_SUSPEND));
}
void freeze_wake(void)
@ -371,10 +394,8 @@ static int suspend_enter(suspend_state_t state, bool *wakeup)
* all the devices are suspended.
*/
if (state == PM_SUSPEND_FREEZE) {
trace_suspend_resume(TPS("machine_suspend"), state, true);
freeze_enter();
trace_suspend_resume(TPS("machine_suspend"), state, false);
goto Platform_wake;
s2idle_loop();
goto Platform_early_resume;
}
error = disable_nonboot_cpus();