Merge branches 'pm-pci', 'pm-sleep', 'pm-domains' and 'powercap'

* pm-pci:
  PCI: PM: Enable PME if it can be signaled from D3cold
  PCI: PM: Avoid forcing PCI_D0 for wakeup reasons inconsistently
  PCI: Use pci_update_current_state() in pci_enable_device_flags()

* pm-sleep:
  PM: sleep: unmark 'state' functions as kernel-doc
  PM: sleep: check RTC features instead of ops in suspend_test
  PM: sleep: s2idle: Replace deprecated CPU-hotplug functions

* pm-domains:
  PM: domains: Fix domain attach for CONFIG_PM_OPP=n
  arm64: dts: sc7180: Add required-opps for i2c
  PM: domains: Add support for 'required-opps' to set default perf state
  opp: Don't print an error if required-opps is missing

* powercap:
  powercap: Add Power Limit4 support for Alder Lake SoC
  powercap: intel_rapl: Replace deprecated CPU-hotplug functions
This commit is contained in:
Rafael J. Wysocki 2021-08-30 19:25:42 +02:00
10 changed files with 105 additions and 53 deletions

View File

@ -786,6 +786,8 @@
<&aggre1_noc MASTER_QUP_0 0 &mc_virt SLAVE_EBI1 0>;
interconnect-names = "qup-core", "qup-config",
"qup-memory";
power-domains = <&rpmhpd SC7180_CX>;
required-opps = <&rpmhpd_opp_low_svs>;
status = "disabled";
};
@ -838,6 +840,8 @@
<&aggre1_noc MASTER_QUP_0 0 &mc_virt SLAVE_EBI1 0>;
interconnect-names = "qup-core", "qup-config",
"qup-memory";
power-domains = <&rpmhpd SC7180_CX>;
required-opps = <&rpmhpd_opp_low_svs>;
status = "disabled";
};
@ -890,6 +894,8 @@
<&aggre1_noc MASTER_QUP_0 0 &mc_virt SLAVE_EBI1 0>;
interconnect-names = "qup-core", "qup-config",
"qup-memory";
power-domains = <&rpmhpd SC7180_CX>;
required-opps = <&rpmhpd_opp_low_svs>;
status = "disabled";
};
@ -924,6 +930,8 @@
<&aggre1_noc MASTER_QUP_0 0 &mc_virt SLAVE_EBI1 0>;
interconnect-names = "qup-core", "qup-config",
"qup-memory";
power-domains = <&rpmhpd SC7180_CX>;
required-opps = <&rpmhpd_opp_low_svs>;
status = "disabled";
};
@ -976,6 +984,8 @@
<&aggre1_noc MASTER_QUP_0 0 &mc_virt SLAVE_EBI1 0>;
interconnect-names = "qup-core", "qup-config",
"qup-memory";
power-domains = <&rpmhpd SC7180_CX>;
required-opps = <&rpmhpd_opp_low_svs>;
status = "disabled";
};
@ -1010,6 +1020,8 @@
<&aggre1_noc MASTER_QUP_0 0 &mc_virt SLAVE_EBI1 0>;
interconnect-names = "qup-core", "qup-config",
"qup-memory";
power-domains = <&rpmhpd SC7180_CX>;
required-opps = <&rpmhpd_opp_low_svs>;
status = "disabled";
};
@ -1075,6 +1087,8 @@
<&aggre2_noc MASTER_QUP_1 0 &mc_virt SLAVE_EBI1 0>;
interconnect-names = "qup-core", "qup-config",
"qup-memory";
power-domains = <&rpmhpd SC7180_CX>;
required-opps = <&rpmhpd_opp_low_svs>;
status = "disabled";
};
@ -1127,6 +1141,8 @@
<&aggre2_noc MASTER_QUP_1 0 &mc_virt SLAVE_EBI1 0>;
interconnect-names = "qup-core", "qup-config",
"qup-memory";
power-domains = <&rpmhpd SC7180_CX>;
required-opps = <&rpmhpd_opp_low_svs>;
status = "disabled";
};
@ -1161,6 +1177,8 @@
<&aggre2_noc MASTER_QUP_1 0 &mc_virt SLAVE_EBI1 0>;
interconnect-names = "qup-core", "qup-config",
"qup-memory";
power-domains = <&rpmhpd SC7180_CX>;
required-opps = <&rpmhpd_opp_low_svs>;
status = "disabled";
};
@ -1213,6 +1231,8 @@
<&aggre2_noc MASTER_QUP_1 0 &mc_virt SLAVE_EBI1 0>;
interconnect-names = "qup-core", "qup-config",
"qup-memory";
power-domains = <&rpmhpd SC7180_CX>;
required-opps = <&rpmhpd_opp_low_svs>;
status = "disabled";
};
@ -1247,6 +1267,8 @@
<&aggre2_noc MASTER_QUP_1 0 &mc_virt SLAVE_EBI1 0>;
interconnect-names = "qup-core", "qup-config",
"qup-memory";
power-domains = <&rpmhpd SC7180_CX>;
required-opps = <&rpmhpd_opp_low_svs>;
status = "disabled";
};
@ -1299,6 +1321,8 @@
<&aggre2_noc MASTER_QUP_1 0 &mc_virt SLAVE_EBI1 0>;
interconnect-names = "qup-core", "qup-config",
"qup-memory";
power-domains = <&rpmhpd SC7180_CX>;
required-opps = <&rpmhpd_opp_low_svs>;
status = "disabled";
};

View File

@ -2604,6 +2604,12 @@ static void genpd_dev_pm_detach(struct device *dev, bool power_off)
dev_dbg(dev, "removing from PM domain %s\n", pd->name);
/* Drop the default performance state */
if (dev_gpd_data(dev)->default_pstate) {
dev_pm_genpd_set_performance_state(dev, 0);
dev_gpd_data(dev)->default_pstate = 0;
}
for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) {
ret = genpd_remove_device(pd, dev);
if (ret != -EAGAIN)
@ -2643,6 +2649,7 @@ static int __genpd_dev_pm_attach(struct device *dev, struct device *base_dev,
{
struct of_phandle_args pd_args;
struct generic_pm_domain *pd;
int pstate;
int ret;
ret = of_parse_phandle_with_args(dev->of_node, "power-domains",
@ -2681,10 +2688,29 @@ static int __genpd_dev_pm_attach(struct device *dev, struct device *base_dev,
genpd_unlock(pd);
}
if (ret)
if (ret) {
genpd_remove_device(pd, dev);
return -EPROBE_DEFER;
}
return ret ? -EPROBE_DEFER : 1;
/* Set the default performance state */
pstate = of_get_required_opp_performance_state(dev->of_node, index);
if (pstate < 0 && pstate != -ENODEV && pstate != -EOPNOTSUPP) {
ret = pstate;
goto err;
} else if (pstate > 0) {
ret = dev_pm_genpd_set_performance_state(dev, pstate);
if (ret)
goto err;
dev_gpd_data(dev)->default_pstate = pstate;
}
return 1;
err:
dev_err(dev, "failed to set required performance state for power-domain %s: %d\n",
pd->name, ret);
genpd_remove_device(pd, dev);
return ret;
}
/**

View File

@ -95,15 +95,7 @@ static struct dev_pm_opp *_find_opp_of_np(struct opp_table *opp_table,
static struct device_node *of_parse_required_opp(struct device_node *np,
int index)
{
struct device_node *required_np;
required_np = of_parse_phandle(np, "required-opps", index);
if (unlikely(!required_np)) {
pr_err("%s: Unable to parse required-opps: %pOF, index: %d\n",
__func__, np, index);
}
return required_np;
return of_parse_phandle(np, "required-opps", index);
}
/* The caller must call dev_pm_opp_put_opp_table() after the table is used */
@ -1328,7 +1320,7 @@ int of_get_required_opp_performance_state(struct device_node *np, int index)
required_np = of_parse_required_opp(np, index);
if (!required_np)
return -EINVAL;
return -ENODEV;
opp_table = _find_table_of_opp_np(required_np);
if (IS_ERR(opp_table)) {

View File

@ -1906,11 +1906,7 @@ static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags)
* so that things like MSI message writing will behave as expected
* (e.g. if the device really is in D0 at enable time).
*/
if (dev->pm_cap) {
u16 pmcsr;
pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
}
pci_update_current_state(dev, dev->current_state);
if (atomic_inc_return(&dev->enable_cnt) > 1)
return 0; /* already enabled */
@ -2495,7 +2491,14 @@ static int __pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable
if (enable) {
int error;
if (pci_pme_capable(dev, state))
/*
* Enable PME signaling if the device can signal PME from
* D3cold regardless of whether or not it can signal PME from
* the current target state, because that will allow it to
* signal PME when the hierarchy above it goes into D3cold and
* the device itself ends up in D3cold as a result of that.
*/
if (pci_pme_capable(dev, state) || pci_pme_capable(dev, PCI_D3cold))
pci_pme_active(dev, true);
else
ret = 1;
@ -2599,16 +2602,20 @@ static pci_power_t pci_target_state(struct pci_dev *dev, bool wakeup)
if (dev->current_state == PCI_D3cold)
target_state = PCI_D3cold;
if (wakeup) {
if (wakeup && dev->pme_support) {
pci_power_t state = target_state;
/*
* Find the deepest state from which the device can generate
* PME#.
*/
if (dev->pme_support) {
while (target_state
&& !(dev->pme_support & (1 << target_state)))
target_state--;
}
while (state && !(dev->pme_support & (1 << state)))
state--;
if (state)
return state;
else if (dev->pme_support & 1)
return PCI_D0;
}
return target_state;

View File

@ -158,16 +158,16 @@ static int get_energy_counter(struct powercap_zone *power_zone,
/* prevent CPU hotplug, make sure the RAPL domain does not go
* away while reading the counter.
*/
get_online_cpus();
cpus_read_lock();
rd = power_zone_to_rapl_domain(power_zone);
if (!rapl_read_data_raw(rd, ENERGY_COUNTER, true, &energy_now)) {
*energy_raw = energy_now;
put_online_cpus();
cpus_read_unlock();
return 0;
}
put_online_cpus();
cpus_read_unlock();
return -EIO;
}
@ -216,11 +216,11 @@ static int set_domain_enable(struct powercap_zone *power_zone, bool mode)
if (rd->state & DOMAIN_STATE_BIOS_LOCKED)
return -EACCES;
get_online_cpus();
cpus_read_lock();
rapl_write_data_raw(rd, PL1_ENABLE, mode);
if (rapl_defaults->set_floor_freq)
rapl_defaults->set_floor_freq(rd, mode);
put_online_cpus();
cpus_read_unlock();
return 0;
}
@ -234,13 +234,13 @@ static int get_domain_enable(struct powercap_zone *power_zone, bool *mode)
*mode = false;
return 0;
}
get_online_cpus();
cpus_read_lock();
if (rapl_read_data_raw(rd, PL1_ENABLE, true, &val)) {
put_online_cpus();
cpus_read_unlock();
return -EIO;
}
*mode = val;
put_online_cpus();
cpus_read_unlock();
return 0;
}
@ -317,7 +317,7 @@ static int set_power_limit(struct powercap_zone *power_zone, int cid,
int ret = 0;
int id;
get_online_cpus();
cpus_read_lock();
rd = power_zone_to_rapl_domain(power_zone);
id = contraint_to_pl(rd, cid);
if (id < 0) {
@ -350,7 +350,7 @@ static int set_power_limit(struct powercap_zone *power_zone, int cid,
if (!ret)
package_power_limit_irq_save(rp);
set_exit:
put_online_cpus();
cpus_read_unlock();
return ret;
}
@ -363,7 +363,7 @@ static int get_current_power_limit(struct powercap_zone *power_zone, int cid,
int ret = 0;
int id;
get_online_cpus();
cpus_read_lock();
rd = power_zone_to_rapl_domain(power_zone);
id = contraint_to_pl(rd, cid);
if (id < 0) {
@ -382,7 +382,7 @@ static int get_current_power_limit(struct powercap_zone *power_zone, int cid,
prim = POWER_LIMIT4;
break;
default:
put_online_cpus();
cpus_read_unlock();
return -EINVAL;
}
if (rapl_read_data_raw(rd, prim, true, &val))
@ -391,7 +391,7 @@ static int get_current_power_limit(struct powercap_zone *power_zone, int cid,
*data = val;
get_exit:
put_online_cpus();
cpus_read_unlock();
return ret;
}
@ -403,7 +403,7 @@ static int set_time_window(struct powercap_zone *power_zone, int cid,
int ret = 0;
int id;
get_online_cpus();
cpus_read_lock();
rd = power_zone_to_rapl_domain(power_zone);
id = contraint_to_pl(rd, cid);
if (id < 0) {
@ -423,7 +423,7 @@ static int set_time_window(struct powercap_zone *power_zone, int cid,
}
set_time_exit:
put_online_cpus();
cpus_read_unlock();
return ret;
}
@ -435,7 +435,7 @@ static int get_time_window(struct powercap_zone *power_zone, int cid,
int ret = 0;
int id;
get_online_cpus();
cpus_read_lock();
rd = power_zone_to_rapl_domain(power_zone);
id = contraint_to_pl(rd, cid);
if (id < 0) {
@ -458,14 +458,14 @@ static int get_time_window(struct powercap_zone *power_zone, int cid,
val = 0;
break;
default:
put_online_cpus();
cpus_read_unlock();
return -EINVAL;
}
if (!ret)
*data = val;
get_time_exit:
put_online_cpus();
cpus_read_unlock();
return ret;
}
@ -491,7 +491,7 @@ static int get_max_power(struct powercap_zone *power_zone, int id, u64 *data)
int prim;
int ret = 0;
get_online_cpus();
cpus_read_lock();
rd = power_zone_to_rapl_domain(power_zone);
switch (rd->rpl[id].prim_id) {
case PL1_ENABLE:
@ -504,7 +504,7 @@ static int get_max_power(struct powercap_zone *power_zone, int id, u64 *data)
prim = MAX_POWER;
break;
default:
put_online_cpus();
cpus_read_unlock();
return -EINVAL;
}
if (rapl_read_data_raw(rd, prim, true, &val))
@ -516,7 +516,7 @@ static int get_max_power(struct powercap_zone *power_zone, int id, u64 *data)
if (rd->rpl[id].prim_id == PL4_ENABLE)
*data = *data * 2;
put_online_cpus();
cpus_read_unlock();
return ret;
}
@ -1358,7 +1358,7 @@ static void power_limit_state_save(void)
struct rapl_domain *rd;
int nr_pl, ret, i;
get_online_cpus();
cpus_read_lock();
list_for_each_entry(rp, &rapl_packages, plist) {
if (!rp->power_zone)
continue;
@ -1390,7 +1390,7 @@ static void power_limit_state_save(void)
}
}
}
put_online_cpus();
cpus_read_unlock();
}
static void power_limit_state_restore(void)
@ -1399,7 +1399,7 @@ static void power_limit_state_restore(void)
struct rapl_domain *rd;
int nr_pl, i;
get_online_cpus();
cpus_read_lock();
list_for_each_entry(rp, &rapl_packages, plist) {
if (!rp->power_zone)
continue;
@ -1425,7 +1425,7 @@ static void power_limit_state_restore(void)
}
}
}
put_online_cpus();
cpus_read_unlock();
}
static int rapl_pm_callback(struct notifier_block *nb,

View File

@ -138,6 +138,8 @@ static int rapl_msr_write_raw(int cpu, struct reg_action *ra)
/* List of verified CPUs. */
static const struct x86_cpu_id pl4_support_ids[] = {
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_TIGERLAKE_L, X86_FEATURE_ANY },
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ALDERLAKE, X86_FEATURE_ANY },
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ALDERLAKE_L, X86_FEATURE_ANY },
{}
};

View File

@ -198,6 +198,7 @@ struct generic_pm_domain_data {
struct notifier_block *power_nb;
int cpu;
unsigned int performance_state;
unsigned int default_pstate;
unsigned int rpm_pstate;
ktime_t next_wakeup;
void *data;

View File

@ -577,7 +577,7 @@ static inline void pm_print_times_init(void) {}
struct kobject *power_kobj;
/**
/*
* state - control system sleep states.
*
* show() returns available sleep state labels, which may be "mem", "standby",

View File

@ -96,7 +96,7 @@ static void s2idle_enter(void)
s2idle_state = S2IDLE_STATE_ENTER;
raw_spin_unlock_irq(&s2idle_lock);
get_online_cpus();
cpus_read_lock();
cpuidle_resume();
/* Push all the CPUs into the idle loop. */
@ -106,7 +106,7 @@ static void s2idle_enter(void)
s2idle_state == S2IDLE_STATE_WAKE);
cpuidle_pause();
put_online_cpus();
cpus_read_unlock();
raw_spin_lock_irq(&s2idle_lock);

View File

@ -129,7 +129,7 @@ static int __init has_wakealarm(struct device *dev, const void *data)
{
struct rtc_device *candidate = to_rtc_device(dev);
if (!candidate->ops->set_alarm)
if (!test_bit(RTC_FEATURE_ALARM, candidate->features))
return 0;
if (!device_may_wakeup(candidate->dev.parent))
return 0;