forked from Minki/linux
Merge branch 'acpi-pm' into pm-core
This commit is contained in:
commit
69a10ca747
@ -258,19 +258,3 @@ Description:
|
||||
|
||||
This attribute has no effect on system-wide suspend/resume and
|
||||
hibernation.
|
||||
|
||||
What: /sys/devices/.../power/pm_qos_remote_wakeup
|
||||
Date: September 2012
|
||||
Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
|
||||
Description:
|
||||
The /sys/devices/.../power/pm_qos_remote_wakeup attribute
|
||||
is used for manipulating the PM QoS "remote wakeup required"
|
||||
flag. If set, this flag indicates to the kernel that the
|
||||
device is a source of user events that have to be signaled from
|
||||
its low-power states.
|
||||
|
||||
Not all drivers support this attribute. If it isn't supported,
|
||||
it is not present.
|
||||
|
||||
This attribute has no effect on system-wide suspend/resume and
|
||||
hibernation.
|
||||
|
25
Documentation/acpi/lpit.txt
Normal file
25
Documentation/acpi/lpit.txt
Normal file
@ -0,0 +1,25 @@
|
||||
To enumerate platform Low Power Idle states, Intel platforms are using
|
||||
“Low Power Idle Table” (LPIT). More details about this table can be
|
||||
downloaded from:
|
||||
http://www.uefi.org/sites/default/files/resources/Intel_ACPI_Low_Power_S0_Idle.pdf
|
||||
|
||||
Residencies for each low power state can be read via FFH
|
||||
(Function fixed hardware) or a memory mapped interface.
|
||||
|
||||
On platforms supporting S0ix sleep states, there can be two types of
|
||||
residencies:
|
||||
- CPU PKG C10 (Read via FFH interface)
|
||||
- Platform Controller Hub (PCH) SLP_S0 (Read via memory mapped interface)
|
||||
|
||||
The following attributes are added dynamically to the cpuidle
|
||||
sysfs attribute group:
|
||||
/sys/devices/system/cpu/cpuidle/low_power_idle_cpu_residency_us
|
||||
/sys/devices/system/cpu/cpuidle/low_power_idle_system_residency_us
|
||||
|
||||
The "low_power_idle_cpu_residency_us" attribute shows time spent
|
||||
by the CPU package in PKG C10
|
||||
|
||||
The "low_power_idle_system_residency_us" attribute shows SLP_S0
|
||||
residency, or system time spent with the SLP_S0# signal asserted.
|
||||
This is the lowest possible system power state, achieved only when CPU is in
|
||||
PKG C10 and all functional blocks in PCH are in a low power state.
|
@ -98,8 +98,7 @@ Values are updated in response to changes of the request list.
|
||||
The target values of resume latency and active state latency tolerance are
|
||||
simply the minimum of the request values held in the parameter list elements.
|
||||
The PM QoS flags aggregate value is a gather (bitwise OR) of all list elements'
|
||||
values. Two device PM QoS flags are defined currently: PM_QOS_FLAG_NO_POWER_OFF
|
||||
and PM_QOS_FLAG_REMOTE_WAKEUP.
|
||||
values. One device PM QoS flag is defined currently: PM_QOS_FLAG_NO_POWER_OFF.
|
||||
|
||||
Note: The aggregated target values are implemented in such a way that reading
|
||||
the aggregated value does not require any locking mechanism.
|
||||
@ -153,14 +152,14 @@ PM QoS list of resume latency constraints and remove sysfs attribute
|
||||
pm_qos_resume_latency_us from the device's power directory.
|
||||
|
||||
int dev_pm_qos_expose_flags(device, value)
|
||||
Add a request to the device's PM QoS list of flags and create sysfs attributes
|
||||
pm_qos_no_power_off and pm_qos_remote_wakeup under the device's power directory
|
||||
allowing user space to change these flags' value.
|
||||
Add a request to the device's PM QoS list of flags and create sysfs attribute
|
||||
pm_qos_no_power_off under the device's power directory allowing user space to
|
||||
change the value of the PM_QOS_FLAG_NO_POWER_OFF flag.
|
||||
|
||||
void dev_pm_qos_hide_flags(device)
|
||||
Drop the request added by dev_pm_qos_expose_flags() from the device's PM QoS list
|
||||
of flags and remove sysfs attributes pm_qos_no_power_off and pm_qos_remote_wakeup
|
||||
under the device's power directory.
|
||||
of flags and remove sysfs attribute pm_qos_no_power_off from the device's power
|
||||
directory.
|
||||
|
||||
Notification mechanisms:
|
||||
The per-device PM QoS framework has a per-device notification tree.
|
||||
|
@ -80,6 +80,11 @@ endif
|
||||
config ACPI_SPCR_TABLE
|
||||
bool
|
||||
|
||||
config ACPI_LPIT
|
||||
bool
|
||||
depends on X86_64
|
||||
default y
|
||||
|
||||
config ACPI_SLEEP
|
||||
bool
|
||||
depends on SUSPEND || HIBERNATION
|
||||
|
@ -56,6 +56,7 @@ acpi-$(CONFIG_DEBUG_FS) += debugfs.o
|
||||
acpi-$(CONFIG_ACPI_NUMA) += numa.o
|
||||
acpi-$(CONFIG_ACPI_PROCFS_POWER) += cm_sbs.o
|
||||
acpi-y += acpi_lpat.o
|
||||
acpi-$(CONFIG_ACPI_LPIT) += acpi_lpit.o
|
||||
acpi-$(CONFIG_ACPI_GENERIC_GSI) += irq.o
|
||||
acpi-$(CONFIG_ACPI_WATCHDOG) += acpi_watchdog.o
|
||||
|
||||
|
162
drivers/acpi/acpi_lpit.c
Normal file
162
drivers/acpi/acpi_lpit.c
Normal file
@ -0,0 +1,162 @@
|
||||
|
||||
/*
|
||||
* acpi_lpit.c - LPIT table processing functions
|
||||
*
|
||||
* Copyright (C) 2017 Intel Corporation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License version
|
||||
* 2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*/
|
||||
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/acpi.h>
|
||||
#include <asm/msr.h>
|
||||
#include <asm/tsc.h>
|
||||
|
||||
struct lpit_residency_info {
|
||||
struct acpi_generic_address gaddr;
|
||||
u64 frequency;
|
||||
void __iomem *iomem_addr;
|
||||
};
|
||||
|
||||
/* Storage for an memory mapped and FFH based entries */
|
||||
static struct lpit_residency_info residency_info_mem;
|
||||
static struct lpit_residency_info residency_info_ffh;
|
||||
|
||||
static int lpit_read_residency_counter_us(u64 *counter, bool io_mem)
|
||||
{
|
||||
int err;
|
||||
|
||||
if (io_mem) {
|
||||
u64 count = 0;
|
||||
int error;
|
||||
|
||||
error = acpi_os_read_iomem(residency_info_mem.iomem_addr, &count,
|
||||
residency_info_mem.gaddr.bit_width);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
*counter = div64_u64(count * 1000000ULL, residency_info_mem.frequency);
|
||||
return 0;
|
||||
}
|
||||
|
||||
err = rdmsrl_safe(residency_info_ffh.gaddr.address, counter);
|
||||
if (!err) {
|
||||
u64 mask = GENMASK_ULL(residency_info_ffh.gaddr.bit_offset +
|
||||
residency_info_ffh.gaddr. bit_width - 1,
|
||||
residency_info_ffh.gaddr.bit_offset);
|
||||
|
||||
*counter &= mask;
|
||||
*counter >>= residency_info_ffh.gaddr.bit_offset;
|
||||
*counter = div64_u64(*counter * 1000000ULL, residency_info_ffh.frequency);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return -ENODATA;
|
||||
}
|
||||
|
||||
static ssize_t low_power_idle_system_residency_us_show(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
u64 counter;
|
||||
int ret;
|
||||
|
||||
ret = lpit_read_residency_counter_us(&counter, true);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return sprintf(buf, "%llu\n", counter);
|
||||
}
|
||||
static DEVICE_ATTR_RO(low_power_idle_system_residency_us);
|
||||
|
||||
static ssize_t low_power_idle_cpu_residency_us_show(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
u64 counter;
|
||||
int ret;
|
||||
|
||||
ret = lpit_read_residency_counter_us(&counter, false);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return sprintf(buf, "%llu\n", counter);
|
||||
}
|
||||
static DEVICE_ATTR_RO(low_power_idle_cpu_residency_us);
|
||||
|
||||
int lpit_read_residency_count_address(u64 *address)
|
||||
{
|
||||
if (!residency_info_mem.gaddr.address)
|
||||
return -EINVAL;
|
||||
|
||||
*address = residency_info_mem.gaddr.address;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void lpit_update_residency(struct lpit_residency_info *info,
|
||||
struct acpi_lpit_native *lpit_native)
|
||||
{
|
||||
info->frequency = lpit_native->counter_frequency ?
|
||||
lpit_native->counter_frequency : tsc_khz * 1000;
|
||||
if (!info->frequency)
|
||||
info->frequency = 1;
|
||||
|
||||
info->gaddr = lpit_native->residency_counter;
|
||||
if (info->gaddr.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
|
||||
info->iomem_addr = ioremap_nocache(info->gaddr.address,
|
||||
info->gaddr.bit_width / 8);
|
||||
if (!info->iomem_addr)
|
||||
return;
|
||||
|
||||
/* Silently fail, if cpuidle attribute group is not present */
|
||||
sysfs_add_file_to_group(&cpu_subsys.dev_root->kobj,
|
||||
&dev_attr_low_power_idle_system_residency_us.attr,
|
||||
"cpuidle");
|
||||
} else if (info->gaddr.space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) {
|
||||
/* Silently fail, if cpuidle attribute group is not present */
|
||||
sysfs_add_file_to_group(&cpu_subsys.dev_root->kobj,
|
||||
&dev_attr_low_power_idle_cpu_residency_us.attr,
|
||||
"cpuidle");
|
||||
}
|
||||
}
|
||||
|
||||
static void lpit_process(u64 begin, u64 end)
|
||||
{
|
||||
while (begin + sizeof(struct acpi_lpit_native) < end) {
|
||||
struct acpi_lpit_native *lpit_native = (struct acpi_lpit_native *)begin;
|
||||
|
||||
if (!lpit_native->header.type && !lpit_native->header.flags) {
|
||||
if (lpit_native->residency_counter.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY &&
|
||||
!residency_info_mem.gaddr.address) {
|
||||
lpit_update_residency(&residency_info_mem, lpit_native);
|
||||
} else if (lpit_native->residency_counter.space_id == ACPI_ADR_SPACE_FIXED_HARDWARE &&
|
||||
!residency_info_ffh.gaddr.address) {
|
||||
lpit_update_residency(&residency_info_ffh, lpit_native);
|
||||
}
|
||||
}
|
||||
begin += lpit_native->header.length;
|
||||
}
|
||||
}
|
||||
|
||||
void acpi_init_lpit(void)
|
||||
{
|
||||
acpi_status status;
|
||||
u64 lpit_begin;
|
||||
struct acpi_table_lpit *lpit;
|
||||
|
||||
status = acpi_get_table(ACPI_SIG_LPIT, 0, (struct acpi_table_header **)&lpit);
|
||||
|
||||
if (ACPI_FAILURE(status))
|
||||
return;
|
||||
|
||||
lpit_begin = (u64)lpit + sizeof(*lpit);
|
||||
lpit_process(lpit_begin, lpit_begin + lpit->header.length);
|
||||
}
|
@ -713,43 +713,9 @@ static int acpi_lpss_activate(struct device *dev)
|
||||
|
||||
static void acpi_lpss_dismiss(struct device *dev)
|
||||
{
|
||||
acpi_dev_runtime_suspend(dev);
|
||||
acpi_dev_suspend(dev, false);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
static int acpi_lpss_suspend_late(struct device *dev)
|
||||
{
|
||||
struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
|
||||
int ret;
|
||||
|
||||
ret = pm_generic_suspend_late(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (pdata->dev_desc->flags & LPSS_SAVE_CTX)
|
||||
acpi_lpss_save_ctx(dev, pdata);
|
||||
|
||||
return acpi_dev_suspend_late(dev);
|
||||
}
|
||||
|
||||
static int acpi_lpss_resume_early(struct device *dev)
|
||||
{
|
||||
struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
|
||||
int ret;
|
||||
|
||||
ret = acpi_dev_resume(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
acpi_lpss_d3_to_d0_delay(pdata);
|
||||
|
||||
if (pdata->dev_desc->flags & LPSS_SAVE_CTX)
|
||||
acpi_lpss_restore_ctx(dev, pdata);
|
||||
|
||||
return pm_generic_resume_early(dev);
|
||||
}
|
||||
#endif /* CONFIG_PM_SLEEP */
|
||||
|
||||
/* IOSF SB for LPSS island */
|
||||
#define LPSS_IOSF_UNIT_LPIOEP 0xA0
|
||||
#define LPSS_IOSF_UNIT_LPIO1 0xAB
|
||||
@ -835,19 +801,15 @@ static void lpss_iosf_exit_d3_state(void)
|
||||
mutex_unlock(&lpss_iosf_mutex);
|
||||
}
|
||||
|
||||
static int acpi_lpss_runtime_suspend(struct device *dev)
|
||||
static int acpi_lpss_suspend(struct device *dev, bool wakeup)
|
||||
{
|
||||
struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
|
||||
int ret;
|
||||
|
||||
ret = pm_generic_runtime_suspend(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (pdata->dev_desc->flags & LPSS_SAVE_CTX)
|
||||
acpi_lpss_save_ctx(dev, pdata);
|
||||
|
||||
ret = acpi_dev_runtime_suspend(dev);
|
||||
ret = acpi_dev_suspend(dev, wakeup);
|
||||
|
||||
/*
|
||||
* This call must be last in the sequence, otherwise PMC will return
|
||||
@ -860,7 +822,7 @@ static int acpi_lpss_runtime_suspend(struct device *dev)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int acpi_lpss_runtime_resume(struct device *dev)
|
||||
static int acpi_lpss_resume(struct device *dev)
|
||||
{
|
||||
struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
|
||||
int ret;
|
||||
@ -881,7 +843,37 @@ static int acpi_lpss_runtime_resume(struct device *dev)
|
||||
if (pdata->dev_desc->flags & LPSS_SAVE_CTX)
|
||||
acpi_lpss_restore_ctx(dev, pdata);
|
||||
|
||||
return pm_generic_runtime_resume(dev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
static int acpi_lpss_suspend_late(struct device *dev)
|
||||
{
|
||||
int ret = pm_generic_suspend_late(dev);
|
||||
|
||||
return ret ? ret : acpi_lpss_suspend(dev, device_may_wakeup(dev));
|
||||
}
|
||||
|
||||
static int acpi_lpss_resume_early(struct device *dev)
|
||||
{
|
||||
int ret = acpi_lpss_resume(dev);
|
||||
|
||||
return ret ? ret : pm_generic_resume_early(dev);
|
||||
}
|
||||
#endif /* CONFIG_PM_SLEEP */
|
||||
|
||||
static int acpi_lpss_runtime_suspend(struct device *dev)
|
||||
{
|
||||
int ret = pm_generic_runtime_suspend(dev);
|
||||
|
||||
return ret ? ret : acpi_lpss_suspend(dev, true);
|
||||
}
|
||||
|
||||
static int acpi_lpss_runtime_resume(struct device *dev)
|
||||
{
|
||||
int ret = acpi_lpss_resume(dev);
|
||||
|
||||
return ret ? ret : pm_generic_runtime_resume(dev);
|
||||
}
|
||||
#endif /* CONFIG_PM */
|
||||
|
||||
|
@ -581,8 +581,7 @@ static int acpi_dev_pm_get_state(struct device *dev, struct acpi_device *adev,
|
||||
d_min = ret;
|
||||
wakeup = device_may_wakeup(dev) && adev->wakeup.flags.valid
|
||||
&& adev->wakeup.sleep_state >= target_state;
|
||||
} else if (dev_pm_qos_flags(dev, PM_QOS_FLAG_REMOTE_WAKEUP) !=
|
||||
PM_QOS_FLAGS_NONE) {
|
||||
} else {
|
||||
wakeup = adev->wakeup.flags.valid;
|
||||
}
|
||||
|
||||
@ -848,38 +847,39 @@ static int acpi_dev_pm_full_power(struct acpi_device *adev)
|
||||
}
|
||||
|
||||
/**
|
||||
* acpi_dev_runtime_suspend - Put device into a low-power state using ACPI.
|
||||
* acpi_dev_suspend - Put device into a low-power state using ACPI.
|
||||
* @dev: Device to put into a low-power state.
|
||||
* @wakeup: Whether or not to enable wakeup for the device.
|
||||
*
|
||||
* Put the given device into a runtime low-power state using the standard ACPI
|
||||
* Put the given device into a low-power state using the standard ACPI
|
||||
* mechanism. Set up remote wakeup if desired, choose the state to put the
|
||||
* device into (this checks if remote wakeup is expected to work too), and set
|
||||
* the power state of the device.
|
||||
*/
|
||||
int acpi_dev_runtime_suspend(struct device *dev)
|
||||
int acpi_dev_suspend(struct device *dev, bool wakeup)
|
||||
{
|
||||
struct acpi_device *adev = ACPI_COMPANION(dev);
|
||||
bool remote_wakeup;
|
||||
u32 target_state = acpi_target_system_state();
|
||||
int error;
|
||||
|
||||
if (!adev)
|
||||
return 0;
|
||||
|
||||
remote_wakeup = dev_pm_qos_flags(dev, PM_QOS_FLAG_REMOTE_WAKEUP) >
|
||||
PM_QOS_FLAGS_NONE;
|
||||
if (remote_wakeup) {
|
||||
error = acpi_device_wakeup_enable(adev, ACPI_STATE_S0);
|
||||
if (wakeup && acpi_device_can_wakeup(adev)) {
|
||||
error = acpi_device_wakeup_enable(adev, target_state);
|
||||
if (error)
|
||||
return -EAGAIN;
|
||||
} else {
|
||||
wakeup = false;
|
||||
}
|
||||
|
||||
error = acpi_dev_pm_low_power(dev, adev, ACPI_STATE_S0);
|
||||
if (error && remote_wakeup)
|
||||
error = acpi_dev_pm_low_power(dev, adev, target_state);
|
||||
if (error && wakeup)
|
||||
acpi_device_wakeup_disable(adev);
|
||||
|
||||
return error;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(acpi_dev_runtime_suspend);
|
||||
EXPORT_SYMBOL_GPL(acpi_dev_suspend);
|
||||
|
||||
/**
|
||||
* acpi_dev_resume - Put device into the full-power state using ACPI.
|
||||
@ -912,7 +912,7 @@ EXPORT_SYMBOL_GPL(acpi_dev_resume);
|
||||
int acpi_subsys_runtime_suspend(struct device *dev)
|
||||
{
|
||||
int ret = pm_generic_runtime_suspend(dev);
|
||||
return ret ? ret : acpi_dev_runtime_suspend(dev);
|
||||
return ret ? ret : acpi_dev_suspend(dev, true);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(acpi_subsys_runtime_suspend);
|
||||
|
||||
@ -931,41 +931,6 @@ int acpi_subsys_runtime_resume(struct device *dev)
|
||||
EXPORT_SYMBOL_GPL(acpi_subsys_runtime_resume);
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
/**
|
||||
* acpi_dev_suspend_late - Put device into a low-power state using ACPI.
|
||||
* @dev: Device to put into a low-power state.
|
||||
*
|
||||
* Put the given device into a low-power state during system transition to a
|
||||
* sleep state using the standard ACPI mechanism. Set up system wakeup if
|
||||
* desired, choose the state to put the device into (this checks if system
|
||||
* wakeup is expected to work too), and set the power state of the device.
|
||||
*/
|
||||
int acpi_dev_suspend_late(struct device *dev)
|
||||
{
|
||||
struct acpi_device *adev = ACPI_COMPANION(dev);
|
||||
u32 target_state;
|
||||
bool wakeup;
|
||||
int error;
|
||||
|
||||
if (!adev)
|
||||
return 0;
|
||||
|
||||
target_state = acpi_target_system_state();
|
||||
wakeup = device_may_wakeup(dev) && acpi_device_can_wakeup(adev);
|
||||
if (wakeup) {
|
||||
error = acpi_device_wakeup_enable(adev, target_state);
|
||||
if (error)
|
||||
return error;
|
||||
}
|
||||
|
||||
error = acpi_dev_pm_low_power(dev, adev, target_state);
|
||||
if (error && wakeup)
|
||||
acpi_device_wakeup_disable(adev);
|
||||
|
||||
return error;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(acpi_dev_suspend_late);
|
||||
|
||||
static bool acpi_dev_needs_resume(struct device *dev, struct acpi_device *adev)
|
||||
{
|
||||
u32 sys_target = acpi_target_system_state();
|
||||
@ -1048,7 +1013,7 @@ EXPORT_SYMBOL_GPL(acpi_subsys_suspend);
|
||||
int acpi_subsys_suspend_late(struct device *dev)
|
||||
{
|
||||
int ret = pm_generic_suspend_late(dev);
|
||||
return ret ? ret : acpi_dev_suspend_late(dev);
|
||||
return ret ? ret : acpi_dev_suspend(dev, device_may_wakeup(dev));
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(acpi_subsys_suspend_late);
|
||||
|
||||
|
@ -248,4 +248,10 @@ void acpi_watchdog_init(void);
|
||||
static inline void acpi_watchdog_init(void) {}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ACPI_LPIT
|
||||
void acpi_init_lpit(void);
|
||||
#else
|
||||
static inline void acpi_init_lpit(void) { }
|
||||
#endif
|
||||
|
||||
#endif /* _ACPI_INTERNAL_H_ */
|
||||
|
@ -663,26 +663,8 @@ acpi_status acpi_os_write_port(acpi_io_address port, u32 value, u32 width)
|
||||
|
||||
EXPORT_SYMBOL(acpi_os_write_port);
|
||||
|
||||
acpi_status
|
||||
acpi_os_read_memory(acpi_physical_address phys_addr, u64 *value, u32 width)
|
||||
int acpi_os_read_iomem(void __iomem *virt_addr, u64 *value, u32 width)
|
||||
{
|
||||
void __iomem *virt_addr;
|
||||
unsigned int size = width / 8;
|
||||
bool unmap = false;
|
||||
u64 dummy;
|
||||
|
||||
rcu_read_lock();
|
||||
virt_addr = acpi_map_vaddr_lookup(phys_addr, size);
|
||||
if (!virt_addr) {
|
||||
rcu_read_unlock();
|
||||
virt_addr = acpi_os_ioremap(phys_addr, size);
|
||||
if (!virt_addr)
|
||||
return AE_BAD_ADDRESS;
|
||||
unmap = true;
|
||||
}
|
||||
|
||||
if (!value)
|
||||
value = &dummy;
|
||||
|
||||
switch (width) {
|
||||
case 8:
|
||||
@ -698,9 +680,37 @@ acpi_os_read_memory(acpi_physical_address phys_addr, u64 *value, u32 width)
|
||||
*(u64 *) value = readq(virt_addr);
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
acpi_status
|
||||
acpi_os_read_memory(acpi_physical_address phys_addr, u64 *value, u32 width)
|
||||
{
|
||||
void __iomem *virt_addr;
|
||||
unsigned int size = width / 8;
|
||||
bool unmap = false;
|
||||
u64 dummy;
|
||||
int error;
|
||||
|
||||
rcu_read_lock();
|
||||
virt_addr = acpi_map_vaddr_lookup(phys_addr, size);
|
||||
if (!virt_addr) {
|
||||
rcu_read_unlock();
|
||||
virt_addr = acpi_os_ioremap(phys_addr, size);
|
||||
if (!virt_addr)
|
||||
return AE_BAD_ADDRESS;
|
||||
unmap = true;
|
||||
}
|
||||
|
||||
if (!value)
|
||||
value = &dummy;
|
||||
|
||||
error = acpi_os_read_iomem(virt_addr, value, width);
|
||||
BUG_ON(error);
|
||||
|
||||
if (unmap)
|
||||
iounmap(virt_addr);
|
||||
else
|
||||
|
@ -2122,6 +2122,7 @@ int __init acpi_scan_init(void)
|
||||
acpi_int340x_thermal_init();
|
||||
acpi_amba_init();
|
||||
acpi_watchdog_init();
|
||||
acpi_init_lpit();
|
||||
|
||||
acpi_scan_add_handler(&generic_device_handler);
|
||||
|
||||
|
@ -346,9 +346,7 @@ static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on,
|
||||
list_for_each_entry(pdd, &genpd->dev_list, list_node) {
|
||||
enum pm_qos_flags_status stat;
|
||||
|
||||
stat = dev_pm_qos_flags(pdd->dev,
|
||||
PM_QOS_FLAG_NO_POWER_OFF
|
||||
| PM_QOS_FLAG_REMOTE_WAKEUP);
|
||||
stat = dev_pm_qos_flags(pdd->dev, PM_QOS_FLAG_NO_POWER_OFF);
|
||||
if (stat > PM_QOS_FLAGS_NONE)
|
||||
return -EBUSY;
|
||||
|
||||
|
@ -309,33 +309,6 @@ static ssize_t pm_qos_no_power_off_store(struct device *dev,
|
||||
static DEVICE_ATTR(pm_qos_no_power_off, 0644,
|
||||
pm_qos_no_power_off_show, pm_qos_no_power_off_store);
|
||||
|
||||
static ssize_t pm_qos_remote_wakeup_show(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
return sprintf(buf, "%d\n", !!(dev_pm_qos_requested_flags(dev)
|
||||
& PM_QOS_FLAG_REMOTE_WAKEUP));
|
||||
}
|
||||
|
||||
static ssize_t pm_qos_remote_wakeup_store(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
const char *buf, size_t n)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (kstrtoint(buf, 0, &ret))
|
||||
return -EINVAL;
|
||||
|
||||
if (ret != 0 && ret != 1)
|
||||
return -EINVAL;
|
||||
|
||||
ret = dev_pm_qos_update_flags(dev, PM_QOS_FLAG_REMOTE_WAKEUP, ret);
|
||||
return ret < 0 ? ret : n;
|
||||
}
|
||||
|
||||
static DEVICE_ATTR(pm_qos_remote_wakeup, 0644,
|
||||
pm_qos_remote_wakeup_show, pm_qos_remote_wakeup_store);
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
static const char _enabled[] = "enabled";
|
||||
static const char _disabled[] = "disabled";
|
||||
@ -671,7 +644,6 @@ static const struct attribute_group pm_qos_latency_tolerance_attr_group = {
|
||||
|
||||
static struct attribute *pm_qos_flags_attrs[] = {
|
||||
&dev_attr_pm_qos_no_power_off.attr,
|
||||
&dev_attr_pm_qos_remote_wakeup.attr,
|
||||
NULL,
|
||||
};
|
||||
static const struct attribute_group pm_qos_flags_attr_group = {
|
||||
|
@ -287,6 +287,8 @@ acpi_status acpi_os_write_port(acpi_io_address address, u32 value, u32 width);
|
||||
/*
|
||||
* Platform and hardware-independent physical memory interfaces
|
||||
*/
|
||||
int acpi_os_read_iomem(void __iomem *virt_addr, u64 *value, u32 width);
|
||||
|
||||
#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_read_memory
|
||||
acpi_status
|
||||
acpi_os_read_memory(acpi_physical_address address, u64 *value, u32 width);
|
||||
|
@ -864,7 +864,7 @@ static inline void arch_reserve_mem_area(acpi_physical_address addr,
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_ACPI) && defined(CONFIG_PM)
|
||||
int acpi_dev_runtime_suspend(struct device *dev);
|
||||
int acpi_dev_suspend(struct device *dev, bool wakeup);
|
||||
int acpi_dev_resume(struct device *dev);
|
||||
int acpi_subsys_runtime_suspend(struct device *dev);
|
||||
int acpi_subsys_runtime_resume(struct device *dev);
|
||||
@ -889,7 +889,6 @@ int acpi_subsys_resume_early(struct device *dev);
|
||||
int acpi_subsys_suspend(struct device *dev);
|
||||
int acpi_subsys_freeze(struct device *dev);
|
||||
#else
|
||||
static inline int acpi_dev_suspend_late(struct device *dev) { return 0; }
|
||||
static inline int acpi_dev_resume_early(struct device *dev) { return 0; }
|
||||
static inline int acpi_subsys_prepare(struct device *dev) { return 0; }
|
||||
static inline void acpi_subsys_complete(struct device *dev) {}
|
||||
@ -1248,4 +1247,13 @@ int acpi_irq_get(acpi_handle handle, unsigned int index, struct resource *res)
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ACPI_LPIT
|
||||
int lpit_read_residency_count_address(u64 *address);
|
||||
#else
|
||||
static inline int lpit_read_residency_count_address(u64 *address)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /*_LINUX_ACPI_H*/
|
||||
|
@ -39,7 +39,6 @@ enum pm_qos_flags_status {
|
||||
#define PM_QOS_LATENCY_ANY ((s32)(~(__u32)0 >> 1))
|
||||
|
||||
#define PM_QOS_FLAG_NO_POWER_OFF (1 << 0)
|
||||
#define PM_QOS_FLAG_REMOTE_WAKEUP (1 << 1)
|
||||
|
||||
struct pm_qos_request {
|
||||
struct plist_node node;
|
||||
|
Loading…
Reference in New Issue
Block a user