forked from Minki/linux
Merge branches 'pm-sleep', 'pm-domains', 'powercap' and 'pm-tools'
* pm-sleep: PM: sleep: spread "const char *" correctness PM: hibernate: fix white space in a few places freezer: Add unsafe version of freezable_schedule_timeout_interruptible() for NFS PM: sleep: core: Emit changed uevent on wakeup_sysfs_add/remove * pm-domains: PM: domains: Restore comment indentation for generic_pm_domain.child_links PM: domains: Fix up terminology with parent/child * powercap: powercap: Add Power Limit4 support powercap: idle_inject: Replace play_idle() with play_idle_precise() in comments powercap: intel_rapl: add support for Sapphire Rapids * pm-tools: pm-graph v5.7 - important s2idle fixes cpupower: Replace HTTP links with HTTPS ones cpupower: Fix NULL but dereferenced coccicheck errors cpupower: Fix comparing pointer to 0 coccicheck warns
This commit is contained in:
commit
86ba54fb08
@ -167,11 +167,13 @@ For example::
|
||||
package-0
|
||||
---------
|
||||
|
||||
The Intel RAPL technology allows two constraints, short term and long term,
|
||||
with two different time windows to be applied to each power zone. Thus for
|
||||
each zone there are 2 attributes representing the constraint names, 2 power
|
||||
limits and 2 attributes representing the sizes of the time windows. Such that,
|
||||
constraint_j_* attributes correspond to the jth constraint (j = 0,1).
|
||||
Depending on different power zones, the Intel RAPL technology allows
|
||||
one or multiple constraints like short term, long term and peak power,
|
||||
with different time windows to be applied to each power zone.
|
||||
All the zones contain attributes representing the constraint names,
|
||||
power limits and the sizes of the time windows. Note that time window
|
||||
is not applicable to peak power. Here, constraint_j_* attributes
|
||||
correspond to the jth constraint (j = 0,1,2).
|
||||
|
||||
For example::
|
||||
|
||||
@ -181,6 +183,9 @@ For example::
|
||||
constraint_1_name
|
||||
constraint_1_power_limit_uw
|
||||
constraint_1_time_window_us
|
||||
constraint_2_name
|
||||
constraint_2_power_limit_uw
|
||||
constraint_2_time_window_us
|
||||
|
||||
Power Zone Attributes
|
||||
=====================
|
||||
|
@ -263,18 +263,18 @@ static int _genpd_reeval_performance_state(struct generic_pm_domain *genpd,
|
||||
/*
|
||||
* Traverse all sub-domains within the domain. This can be
|
||||
* done without any additional locking as the link->performance_state
|
||||
* field is protected by the master genpd->lock, which is already taken.
|
||||
* field is protected by the parent genpd->lock, which is already taken.
|
||||
*
|
||||
* Also note that link->performance_state (subdomain's performance state
|
||||
* requirement to master domain) is different from
|
||||
* link->slave->performance_state (current performance state requirement
|
||||
* requirement to parent domain) is different from
|
||||
* link->child->performance_state (current performance state requirement
|
||||
* of the devices/sub-domains of the subdomain) and so can have a
|
||||
* different value.
|
||||
*
|
||||
* Note that we also take vote from powered-off sub-domains into account
|
||||
* as the same is done for devices right now.
|
||||
*/
|
||||
list_for_each_entry(link, &genpd->master_links, master_node) {
|
||||
list_for_each_entry(link, &genpd->parent_links, parent_node) {
|
||||
if (link->performance_state > state)
|
||||
state = link->performance_state;
|
||||
}
|
||||
@ -285,40 +285,40 @@ static int _genpd_reeval_performance_state(struct generic_pm_domain *genpd,
|
||||
static int _genpd_set_performance_state(struct generic_pm_domain *genpd,
|
||||
unsigned int state, int depth)
|
||||
{
|
||||
struct generic_pm_domain *master;
|
||||
struct generic_pm_domain *parent;
|
||||
struct gpd_link *link;
|
||||
int master_state, ret;
|
||||
int parent_state, ret;
|
||||
|
||||
if (state == genpd->performance_state)
|
||||
return 0;
|
||||
|
||||
/* Propagate to masters of genpd */
|
||||
list_for_each_entry(link, &genpd->slave_links, slave_node) {
|
||||
master = link->master;
|
||||
/* Propagate to parents of genpd */
|
||||
list_for_each_entry(link, &genpd->child_links, child_node) {
|
||||
parent = link->parent;
|
||||
|
||||
if (!master->set_performance_state)
|
||||
if (!parent->set_performance_state)
|
||||
continue;
|
||||
|
||||
/* Find master's performance state */
|
||||
/* Find parent's performance state */
|
||||
ret = dev_pm_opp_xlate_performance_state(genpd->opp_table,
|
||||
master->opp_table,
|
||||
parent->opp_table,
|
||||
state);
|
||||
if (unlikely(ret < 0))
|
||||
goto err;
|
||||
|
||||
master_state = ret;
|
||||
parent_state = ret;
|
||||
|
||||
genpd_lock_nested(master, depth + 1);
|
||||
genpd_lock_nested(parent, depth + 1);
|
||||
|
||||
link->prev_performance_state = link->performance_state;
|
||||
link->performance_state = master_state;
|
||||
master_state = _genpd_reeval_performance_state(master,
|
||||
master_state);
|
||||
ret = _genpd_set_performance_state(master, master_state, depth + 1);
|
||||
link->performance_state = parent_state;
|
||||
parent_state = _genpd_reeval_performance_state(parent,
|
||||
parent_state);
|
||||
ret = _genpd_set_performance_state(parent, parent_state, depth + 1);
|
||||
if (ret)
|
||||
link->performance_state = link->prev_performance_state;
|
||||
|
||||
genpd_unlock(master);
|
||||
genpd_unlock(parent);
|
||||
|
||||
if (ret)
|
||||
goto err;
|
||||
@ -333,26 +333,26 @@ static int _genpd_set_performance_state(struct generic_pm_domain *genpd,
|
||||
|
||||
err:
|
||||
/* Encountered an error, lets rollback */
|
||||
list_for_each_entry_continue_reverse(link, &genpd->slave_links,
|
||||
slave_node) {
|
||||
master = link->master;
|
||||
list_for_each_entry_continue_reverse(link, &genpd->child_links,
|
||||
child_node) {
|
||||
parent = link->parent;
|
||||
|
||||
if (!master->set_performance_state)
|
||||
if (!parent->set_performance_state)
|
||||
continue;
|
||||
|
||||
genpd_lock_nested(master, depth + 1);
|
||||
genpd_lock_nested(parent, depth + 1);
|
||||
|
||||
master_state = link->prev_performance_state;
|
||||
link->performance_state = master_state;
|
||||
parent_state = link->prev_performance_state;
|
||||
link->performance_state = parent_state;
|
||||
|
||||
master_state = _genpd_reeval_performance_state(master,
|
||||
master_state);
|
||||
if (_genpd_set_performance_state(master, master_state, depth + 1)) {
|
||||
parent_state = _genpd_reeval_performance_state(parent,
|
||||
parent_state);
|
||||
if (_genpd_set_performance_state(parent, parent_state, depth + 1)) {
|
||||
pr_err("%s: Failed to roll back to %d performance state\n",
|
||||
master->name, master_state);
|
||||
parent->name, parent_state);
|
||||
}
|
||||
|
||||
genpd_unlock(master);
|
||||
genpd_unlock(parent);
|
||||
}
|
||||
|
||||
return ret;
|
||||
@ -552,7 +552,7 @@ static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on,
|
||||
|
||||
/*
|
||||
* If sd_count > 0 at this point, one of the subdomains hasn't
|
||||
* managed to call genpd_power_on() for the master yet after
|
||||
* managed to call genpd_power_on() for the parent yet after
|
||||
* incrementing it. In that case genpd_power_on() will wait
|
||||
* for us to drop the lock, so we can call .power_off() and let
|
||||
* the genpd_power_on() restore power for us (this shouldn't
|
||||
@ -566,22 +566,22 @@ static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on,
|
||||
genpd->status = GPD_STATE_POWER_OFF;
|
||||
genpd_update_accounting(genpd);
|
||||
|
||||
list_for_each_entry(link, &genpd->slave_links, slave_node) {
|
||||
genpd_sd_counter_dec(link->master);
|
||||
genpd_lock_nested(link->master, depth + 1);
|
||||
genpd_power_off(link->master, false, depth + 1);
|
||||
genpd_unlock(link->master);
|
||||
list_for_each_entry(link, &genpd->child_links, child_node) {
|
||||
genpd_sd_counter_dec(link->parent);
|
||||
genpd_lock_nested(link->parent, depth + 1);
|
||||
genpd_power_off(link->parent, false, depth + 1);
|
||||
genpd_unlock(link->parent);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* genpd_power_on - Restore power to a given PM domain and its masters.
|
||||
* genpd_power_on - Restore power to a given PM domain and its parents.
|
||||
* @genpd: PM domain to power up.
|
||||
* @depth: nesting count for lockdep.
|
||||
*
|
||||
* Restore power to @genpd and all of its masters so that it is possible to
|
||||
* Restore power to @genpd and all of its parents so that it is possible to
|
||||
* resume a device belonging to it.
|
||||
*/
|
||||
static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth)
|
||||
@ -594,20 +594,20 @@ static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth)
|
||||
|
||||
/*
|
||||
* The list is guaranteed not to change while the loop below is being
|
||||
* executed, unless one of the masters' .power_on() callbacks fiddles
|
||||
* executed, unless one of the parents' .power_on() callbacks fiddles
|
||||
* with it.
|
||||
*/
|
||||
list_for_each_entry(link, &genpd->slave_links, slave_node) {
|
||||
struct generic_pm_domain *master = link->master;
|
||||
list_for_each_entry(link, &genpd->child_links, child_node) {
|
||||
struct generic_pm_domain *parent = link->parent;
|
||||
|
||||
genpd_sd_counter_inc(master);
|
||||
genpd_sd_counter_inc(parent);
|
||||
|
||||
genpd_lock_nested(master, depth + 1);
|
||||
ret = genpd_power_on(master, depth + 1);
|
||||
genpd_unlock(master);
|
||||
genpd_lock_nested(parent, depth + 1);
|
||||
ret = genpd_power_on(parent, depth + 1);
|
||||
genpd_unlock(parent);
|
||||
|
||||
if (ret) {
|
||||
genpd_sd_counter_dec(master);
|
||||
genpd_sd_counter_dec(parent);
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
@ -623,12 +623,12 @@ static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth)
|
||||
|
||||
err:
|
||||
list_for_each_entry_continue_reverse(link,
|
||||
&genpd->slave_links,
|
||||
slave_node) {
|
||||
genpd_sd_counter_dec(link->master);
|
||||
genpd_lock_nested(link->master, depth + 1);
|
||||
genpd_power_off(link->master, false, depth + 1);
|
||||
genpd_unlock(link->master);
|
||||
&genpd->child_links,
|
||||
child_node) {
|
||||
genpd_sd_counter_dec(link->parent);
|
||||
genpd_lock_nested(link->parent, depth + 1);
|
||||
genpd_power_off(link->parent, false, depth + 1);
|
||||
genpd_unlock(link->parent);
|
||||
}
|
||||
|
||||
return ret;
|
||||
@ -932,13 +932,13 @@ late_initcall(genpd_power_off_unused);
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
|
||||
/**
|
||||
* genpd_sync_power_off - Synchronously power off a PM domain and its masters.
|
||||
* genpd_sync_power_off - Synchronously power off a PM domain and its parents.
|
||||
* @genpd: PM domain to power off, if possible.
|
||||
* @use_lock: use the lock.
|
||||
* @depth: nesting count for lockdep.
|
||||
*
|
||||
* Check if the given PM domain can be powered off (during system suspend or
|
||||
* hibernation) and do that if so. Also, in that case propagate to its masters.
|
||||
* hibernation) and do that if so. Also, in that case propagate to its parents.
|
||||
*
|
||||
* This function is only called in "noirq" and "syscore" stages of system power
|
||||
* transitions. The "noirq" callbacks may be executed asynchronously, thus in
|
||||
@ -963,21 +963,21 @@ static void genpd_sync_power_off(struct generic_pm_domain *genpd, bool use_lock,
|
||||
|
||||
genpd->status = GPD_STATE_POWER_OFF;
|
||||
|
||||
list_for_each_entry(link, &genpd->slave_links, slave_node) {
|
||||
genpd_sd_counter_dec(link->master);
|
||||
list_for_each_entry(link, &genpd->child_links, child_node) {
|
||||
genpd_sd_counter_dec(link->parent);
|
||||
|
||||
if (use_lock)
|
||||
genpd_lock_nested(link->master, depth + 1);
|
||||
genpd_lock_nested(link->parent, depth + 1);
|
||||
|
||||
genpd_sync_power_off(link->master, use_lock, depth + 1);
|
||||
genpd_sync_power_off(link->parent, use_lock, depth + 1);
|
||||
|
||||
if (use_lock)
|
||||
genpd_unlock(link->master);
|
||||
genpd_unlock(link->parent);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* genpd_sync_power_on - Synchronously power on a PM domain and its masters.
|
||||
* genpd_sync_power_on - Synchronously power on a PM domain and its parents.
|
||||
* @genpd: PM domain to power on.
|
||||
* @use_lock: use the lock.
|
||||
* @depth: nesting count for lockdep.
|
||||
@ -994,16 +994,16 @@ static void genpd_sync_power_on(struct generic_pm_domain *genpd, bool use_lock,
|
||||
if (genpd_status_on(genpd))
|
||||
return;
|
||||
|
||||
list_for_each_entry(link, &genpd->slave_links, slave_node) {
|
||||
genpd_sd_counter_inc(link->master);
|
||||
list_for_each_entry(link, &genpd->child_links, child_node) {
|
||||
genpd_sd_counter_inc(link->parent);
|
||||
|
||||
if (use_lock)
|
||||
genpd_lock_nested(link->master, depth + 1);
|
||||
genpd_lock_nested(link->parent, depth + 1);
|
||||
|
||||
genpd_sync_power_on(link->master, use_lock, depth + 1);
|
||||
genpd_sync_power_on(link->parent, use_lock, depth + 1);
|
||||
|
||||
if (use_lock)
|
||||
genpd_unlock(link->master);
|
||||
genpd_unlock(link->parent);
|
||||
}
|
||||
|
||||
_genpd_power_on(genpd, false);
|
||||
@ -1443,12 +1443,12 @@ static void genpd_update_cpumask(struct generic_pm_domain *genpd,
|
||||
if (!genpd_is_cpu_domain(genpd))
|
||||
return;
|
||||
|
||||
list_for_each_entry(link, &genpd->slave_links, slave_node) {
|
||||
struct generic_pm_domain *master = link->master;
|
||||
list_for_each_entry(link, &genpd->child_links, child_node) {
|
||||
struct generic_pm_domain *parent = link->parent;
|
||||
|
||||
genpd_lock_nested(master, depth + 1);
|
||||
genpd_update_cpumask(master, cpu, set, depth + 1);
|
||||
genpd_unlock(master);
|
||||
genpd_lock_nested(parent, depth + 1);
|
||||
genpd_update_cpumask(parent, cpu, set, depth + 1);
|
||||
genpd_unlock(parent);
|
||||
}
|
||||
|
||||
if (set)
|
||||
@ -1636,17 +1636,17 @@ static int genpd_add_subdomain(struct generic_pm_domain *genpd,
|
||||
goto out;
|
||||
}
|
||||
|
||||
list_for_each_entry(itr, &genpd->master_links, master_node) {
|
||||
if (itr->slave == subdomain && itr->master == genpd) {
|
||||
list_for_each_entry(itr, &genpd->parent_links, parent_node) {
|
||||
if (itr->child == subdomain && itr->parent == genpd) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
link->master = genpd;
|
||||
list_add_tail(&link->master_node, &genpd->master_links);
|
||||
link->slave = subdomain;
|
||||
list_add_tail(&link->slave_node, &subdomain->slave_links);
|
||||
link->parent = genpd;
|
||||
list_add_tail(&link->parent_node, &genpd->parent_links);
|
||||
link->child = subdomain;
|
||||
list_add_tail(&link->child_node, &subdomain->child_links);
|
||||
if (genpd_status_on(subdomain))
|
||||
genpd_sd_counter_inc(genpd);
|
||||
|
||||
@ -1660,7 +1660,7 @@ static int genpd_add_subdomain(struct generic_pm_domain *genpd,
|
||||
|
||||
/**
|
||||
* pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
|
||||
* @genpd: Master PM domain to add the subdomain to.
|
||||
* @genpd: Leader PM domain to add the subdomain to.
|
||||
* @subdomain: Subdomain to be added.
|
||||
*/
|
||||
int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
|
||||
@ -1678,7 +1678,7 @@ EXPORT_SYMBOL_GPL(pm_genpd_add_subdomain);
|
||||
|
||||
/**
|
||||
* pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
|
||||
* @genpd: Master PM domain to remove the subdomain from.
|
||||
* @genpd: Leader PM domain to remove the subdomain from.
|
||||
* @subdomain: Subdomain to be removed.
|
||||
*/
|
||||
int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
|
||||
@ -1693,19 +1693,19 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
|
||||
genpd_lock(subdomain);
|
||||
genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
|
||||
|
||||
if (!list_empty(&subdomain->master_links) || subdomain->device_count) {
|
||||
if (!list_empty(&subdomain->parent_links) || subdomain->device_count) {
|
||||
pr_warn("%s: unable to remove subdomain %s\n",
|
||||
genpd->name, subdomain->name);
|
||||
ret = -EBUSY;
|
||||
goto out;
|
||||
}
|
||||
|
||||
list_for_each_entry_safe(link, l, &genpd->master_links, master_node) {
|
||||
if (link->slave != subdomain)
|
||||
list_for_each_entry_safe(link, l, &genpd->parent_links, parent_node) {
|
||||
if (link->child != subdomain)
|
||||
continue;
|
||||
|
||||
list_del(&link->master_node);
|
||||
list_del(&link->slave_node);
|
||||
list_del(&link->parent_node);
|
||||
list_del(&link->child_node);
|
||||
kfree(link);
|
||||
if (genpd_status_on(subdomain))
|
||||
genpd_sd_counter_dec(genpd);
|
||||
@ -1770,8 +1770,8 @@ int pm_genpd_init(struct generic_pm_domain *genpd,
|
||||
if (IS_ERR_OR_NULL(genpd))
|
||||
return -EINVAL;
|
||||
|
||||
INIT_LIST_HEAD(&genpd->master_links);
|
||||
INIT_LIST_HEAD(&genpd->slave_links);
|
||||
INIT_LIST_HEAD(&genpd->parent_links);
|
||||
INIT_LIST_HEAD(&genpd->child_links);
|
||||
INIT_LIST_HEAD(&genpd->dev_list);
|
||||
genpd_lock_init(genpd);
|
||||
genpd->gov = gov;
|
||||
@ -1848,15 +1848,15 @@ static int genpd_remove(struct generic_pm_domain *genpd)
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
if (!list_empty(&genpd->master_links) || genpd->device_count) {
|
||||
if (!list_empty(&genpd->parent_links) || genpd->device_count) {
|
||||
genpd_unlock(genpd);
|
||||
pr_err("%s: unable to remove %s\n", __func__, genpd->name);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
list_for_each_entry_safe(link, l, &genpd->slave_links, slave_node) {
|
||||
list_del(&link->master_node);
|
||||
list_del(&link->slave_node);
|
||||
list_for_each_entry_safe(link, l, &genpd->child_links, child_node) {
|
||||
list_del(&link->parent_node);
|
||||
list_del(&link->child_node);
|
||||
kfree(link);
|
||||
}
|
||||
|
||||
@ -2827,12 +2827,12 @@ static int genpd_summary_one(struct seq_file *s,
|
||||
|
||||
/*
|
||||
* Modifications on the list require holding locks on both
|
||||
* master and slave, so we are safe.
|
||||
* parent and child, so we are safe.
|
||||
* Also genpd->name is immutable.
|
||||
*/
|
||||
list_for_each_entry(link, &genpd->master_links, master_node) {
|
||||
seq_printf(s, "%s", link->slave->name);
|
||||
if (!list_is_last(&link->master_node, &genpd->master_links))
|
||||
list_for_each_entry(link, &genpd->parent_links, parent_node) {
|
||||
seq_printf(s, "%s", link->child->name);
|
||||
if (!list_is_last(&link->parent_node, &genpd->parent_links))
|
||||
seq_puts(s, ", ");
|
||||
}
|
||||
|
||||
@ -2860,7 +2860,7 @@ static int summary_show(struct seq_file *s, void *data)
|
||||
struct generic_pm_domain *genpd;
|
||||
int ret = 0;
|
||||
|
||||
seq_puts(s, "domain status slaves\n");
|
||||
seq_puts(s, "domain status children\n");
|
||||
seq_puts(s, " /device runtime status\n");
|
||||
seq_puts(s, "----------------------------------------------------------------------\n");
|
||||
|
||||
@ -2915,8 +2915,8 @@ static int sub_domains_show(struct seq_file *s, void *data)
|
||||
if (ret)
|
||||
return -ERESTARTSYS;
|
||||
|
||||
list_for_each_entry(link, &genpd->master_links, master_node)
|
||||
seq_printf(s, "%s\n", link->slave->name);
|
||||
list_for_each_entry(link, &genpd->parent_links, parent_node)
|
||||
seq_printf(s, "%s\n", link->child->name);
|
||||
|
||||
genpd_unlock(genpd);
|
||||
return ret;
|
||||
|
@ -135,8 +135,8 @@ static bool __default_power_down_ok(struct dev_pm_domain *pd,
|
||||
*
|
||||
* All subdomains have been powered off already at this point.
|
||||
*/
|
||||
list_for_each_entry(link, &genpd->master_links, master_node) {
|
||||
struct generic_pm_domain *sd = link->slave;
|
||||
list_for_each_entry(link, &genpd->parent_links, parent_node) {
|
||||
struct generic_pm_domain *sd = link->child;
|
||||
s64 sd_max_off_ns = sd->max_off_time_ns;
|
||||
|
||||
if (sd_max_off_ns < 0)
|
||||
@ -217,13 +217,13 @@ static bool default_power_down_ok(struct dev_pm_domain *pd)
|
||||
}
|
||||
|
||||
/*
|
||||
* We have to invalidate the cached results for the masters, so
|
||||
* We have to invalidate the cached results for the parents, so
|
||||
* use the observation that default_power_down_ok() is not
|
||||
* going to be called for any master until this instance
|
||||
* going to be called for any parent until this instance
|
||||
* returns.
|
||||
*/
|
||||
list_for_each_entry(link, &genpd->slave_links, slave_node)
|
||||
link->master->max_off_time_changed = true;
|
||||
list_for_each_entry(link, &genpd->child_links, child_node)
|
||||
link->parent->max_off_time_changed = true;
|
||||
|
||||
genpd->max_off_time_ns = -1;
|
||||
genpd->max_off_time_changed = false;
|
||||
|
@ -1,6 +1,7 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* sysfs entries for device PM */
|
||||
#include <linux/device.h>
|
||||
#include <linux/kobject.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/pm_qos.h>
|
||||
@ -739,12 +740,18 @@ int dpm_sysfs_change_owner(struct device *dev, kuid_t kuid, kgid_t kgid)
|
||||
|
||||
int wakeup_sysfs_add(struct device *dev)
|
||||
{
|
||||
return sysfs_merge_group(&dev->kobj, &pm_wakeup_attr_group);
|
||||
int ret = sysfs_merge_group(&dev->kobj, &pm_wakeup_attr_group);
|
||||
|
||||
if (!ret)
|
||||
kobject_uevent(&dev->kobj, KOBJ_CHANGE);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void wakeup_sysfs_remove(struct device *dev)
|
||||
{
|
||||
sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group);
|
||||
kobject_uevent(&dev->kobj, KOBJ_CHANGE);
|
||||
}
|
||||
|
||||
int pm_qos_sysfs_add_resume_latency(struct device *dev)
|
||||
|
@ -19,8 +19,8 @@
|
||||
* The idle + run duration is specified via separate helpers and that allows
|
||||
* idle injection to be started.
|
||||
*
|
||||
* The idle injection kthreads will call play_idle() with the idle duration
|
||||
* specified as per the above.
|
||||
* The idle injection kthreads will call play_idle_precise() with the idle
|
||||
* duration and max allowed latency specified as per the above.
|
||||
*
|
||||
* After all of them have been woken up, a timer is set to start the next idle
|
||||
* injection cycle.
|
||||
@ -100,7 +100,7 @@ static void idle_inject_wakeup(struct idle_inject_device *ii_dev)
|
||||
*
|
||||
* This function is called when the idle injection timer expires. It wakes up
|
||||
* idle injection tasks associated with the timer and they, in turn, invoke
|
||||
* play_idle() to inject a specified amount of CPU idle time.
|
||||
* play_idle_precise() to inject a specified amount of CPU idle time.
|
||||
*
|
||||
* Return: HRTIMER_RESTART.
|
||||
*/
|
||||
@ -124,8 +124,8 @@ static enum hrtimer_restart idle_inject_timer_fn(struct hrtimer *timer)
|
||||
* idle_inject_fn - idle injection work function
|
||||
* @cpu: the CPU owning the task
|
||||
*
|
||||
* This function calls play_idle() to inject a specified amount of CPU idle
|
||||
* time.
|
||||
* This function calls play_idle_precise() to inject a specified amount of CPU
|
||||
* idle time.
|
||||
*/
|
||||
static void idle_inject_fn(unsigned int cpu)
|
||||
{
|
||||
|
@ -39,6 +39,8 @@
|
||||
#define POWER_HIGH_LOCK BIT_ULL(63)
|
||||
#define POWER_LOW_LOCK BIT(31)
|
||||
|
||||
#define POWER_LIMIT4_MASK 0x1FFF
|
||||
|
||||
#define TIME_WINDOW1_MASK (0x7FULL<<17)
|
||||
#define TIME_WINDOW2_MASK (0x7FULL<<49)
|
||||
|
||||
@ -82,6 +84,7 @@ enum unit_type {
|
||||
|
||||
static const char pl1_name[] = "long_term";
|
||||
static const char pl2_name[] = "short_term";
|
||||
static const char pl4_name[] = "peak_power";
|
||||
|
||||
#define power_zone_to_rapl_domain(_zone) \
|
||||
container_of(_zone, struct rapl_domain, power_zone)
|
||||
@ -93,6 +96,7 @@ struct rapl_defaults {
|
||||
u64 (*compute_time_window)(struct rapl_package *rp, u64 val,
|
||||
bool to_raw);
|
||||
unsigned int dram_domain_energy_unit;
|
||||
unsigned int psys_domain_energy_unit;
|
||||
};
|
||||
static struct rapl_defaults *rapl_defaults;
|
||||
|
||||
@ -337,6 +341,9 @@ static int set_power_limit(struct powercap_zone *power_zone, int cid,
|
||||
case PL2_ENABLE:
|
||||
rapl_write_data_raw(rd, POWER_LIMIT2, power_limit);
|
||||
break;
|
||||
case PL4_ENABLE:
|
||||
rapl_write_data_raw(rd, POWER_LIMIT4, power_limit);
|
||||
break;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
}
|
||||
@ -371,6 +378,9 @@ static int get_current_power_limit(struct powercap_zone *power_zone, int cid,
|
||||
case PL2_ENABLE:
|
||||
prim = POWER_LIMIT2;
|
||||
break;
|
||||
case PL4_ENABLE:
|
||||
prim = POWER_LIMIT4;
|
||||
break;
|
||||
default:
|
||||
put_online_cpus();
|
||||
return -EINVAL;
|
||||
@ -440,6 +450,13 @@ static int get_time_window(struct powercap_zone *power_zone, int cid,
|
||||
case PL2_ENABLE:
|
||||
ret = rapl_read_data_raw(rd, TIME_WINDOW2, true, &val);
|
||||
break;
|
||||
case PL4_ENABLE:
|
||||
/*
|
||||
* Time window parameter is not applicable for PL4 entry
|
||||
* so assigining '0' as default value.
|
||||
*/
|
||||
val = 0;
|
||||
break;
|
||||
default:
|
||||
put_online_cpus();
|
||||
return -EINVAL;
|
||||
@ -483,6 +500,9 @@ static int get_max_power(struct powercap_zone *power_zone, int id, u64 *data)
|
||||
case PL2_ENABLE:
|
||||
prim = MAX_POWER;
|
||||
break;
|
||||
case PL4_ENABLE:
|
||||
prim = MAX_POWER;
|
||||
break;
|
||||
default:
|
||||
put_online_cpus();
|
||||
return -EINVAL;
|
||||
@ -492,6 +512,10 @@ static int get_max_power(struct powercap_zone *power_zone, int id, u64 *data)
|
||||
else
|
||||
*data = val;
|
||||
|
||||
/* As a generalization rule, PL4 would be around two times PL2. */
|
||||
if (rd->rpl[id].prim_id == PL4_ENABLE)
|
||||
*data = *data * 2;
|
||||
|
||||
put_online_cpus();
|
||||
|
||||
return ret;
|
||||
@ -524,21 +548,42 @@ static void rapl_init_domains(struct rapl_package *rp)
|
||||
rd->id = i;
|
||||
rd->rpl[0].prim_id = PL1_ENABLE;
|
||||
rd->rpl[0].name = pl1_name;
|
||||
/* some domain may support two power limits */
|
||||
if (rp->priv->limits[i] == 2) {
|
||||
|
||||
/*
|
||||
* The PL2 power domain is applicable for limits two
|
||||
* and limits three
|
||||
*/
|
||||
if (rp->priv->limits[i] >= 2) {
|
||||
rd->rpl[1].prim_id = PL2_ENABLE;
|
||||
rd->rpl[1].name = pl2_name;
|
||||
}
|
||||
|
||||
/* Enable PL4 domain if the total power limits are three */
|
||||
if (rp->priv->limits[i] == 3) {
|
||||
rd->rpl[2].prim_id = PL4_ENABLE;
|
||||
rd->rpl[2].name = pl4_name;
|
||||
}
|
||||
|
||||
for (j = 0; j < RAPL_DOMAIN_REG_MAX; j++)
|
||||
rd->regs[j] = rp->priv->regs[i][j];
|
||||
|
||||
if (i == RAPL_DOMAIN_DRAM) {
|
||||
switch (i) {
|
||||
case RAPL_DOMAIN_DRAM:
|
||||
rd->domain_energy_unit =
|
||||
rapl_defaults->dram_domain_energy_unit;
|
||||
if (rd->domain_energy_unit)
|
||||
pr_info("DRAM domain energy unit %dpj\n",
|
||||
rd->domain_energy_unit);
|
||||
break;
|
||||
case RAPL_DOMAIN_PLATFORM:
|
||||
rd->domain_energy_unit =
|
||||
rapl_defaults->psys_domain_energy_unit;
|
||||
if (rd->domain_energy_unit)
|
||||
pr_info("Platform domain energy unit %dpj\n",
|
||||
rd->domain_energy_unit);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
rd++;
|
||||
}
|
||||
@ -587,6 +632,8 @@ static struct rapl_primitive_info rpi[] = {
|
||||
RAPL_DOMAIN_REG_LIMIT, POWER_UNIT, 0),
|
||||
PRIMITIVE_INFO_INIT(POWER_LIMIT2, POWER_LIMIT2_MASK, 32,
|
||||
RAPL_DOMAIN_REG_LIMIT, POWER_UNIT, 0),
|
||||
PRIMITIVE_INFO_INIT(POWER_LIMIT4, POWER_LIMIT4_MASK, 0,
|
||||
RAPL_DOMAIN_REG_PL4, POWER_UNIT, 0),
|
||||
PRIMITIVE_INFO_INIT(FW_LOCK, POWER_LOW_LOCK, 31,
|
||||
RAPL_DOMAIN_REG_LIMIT, ARBITRARY_UNIT, 0),
|
||||
PRIMITIVE_INFO_INIT(PL1_ENABLE, POWER_LIMIT1_ENABLE, 15,
|
||||
@ -597,6 +644,8 @@ static struct rapl_primitive_info rpi[] = {
|
||||
RAPL_DOMAIN_REG_LIMIT, ARBITRARY_UNIT, 0),
|
||||
PRIMITIVE_INFO_INIT(PL2_CLAMP, POWER_LIMIT2_CLAMP, 48,
|
||||
RAPL_DOMAIN_REG_LIMIT, ARBITRARY_UNIT, 0),
|
||||
PRIMITIVE_INFO_INIT(PL4_ENABLE, POWER_LIMIT4_MASK, 0,
|
||||
RAPL_DOMAIN_REG_PL4, ARBITRARY_UNIT, 0),
|
||||
PRIMITIVE_INFO_INIT(TIME_WINDOW1, TIME_WINDOW1_MASK, 17,
|
||||
RAPL_DOMAIN_REG_LIMIT, TIME_UNIT, 0),
|
||||
PRIMITIVE_INFO_INIT(TIME_WINDOW2, TIME_WINDOW2_MASK, 49,
|
||||
@ -919,6 +968,14 @@ static const struct rapl_defaults rapl_defaults_hsw_server = {
|
||||
.dram_domain_energy_unit = 15300,
|
||||
};
|
||||
|
||||
static const struct rapl_defaults rapl_defaults_spr_server = {
|
||||
.check_unit = rapl_check_unit_core,
|
||||
.set_floor_freq = set_floor_freq_default,
|
||||
.compute_time_window = rapl_compute_time_window_core,
|
||||
.dram_domain_energy_unit = 15300,
|
||||
.psys_domain_energy_unit = 1000000000,
|
||||
};
|
||||
|
||||
static const struct rapl_defaults rapl_defaults_byt = {
|
||||
.floor_freq_reg_addr = IOSF_CPU_POWER_BUDGET_CTL_BYT,
|
||||
.check_unit = rapl_check_unit_atom,
|
||||
@ -978,6 +1035,7 @@ static const struct x86_cpu_id rapl_ids[] __initconst = {
|
||||
X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE_L, &rapl_defaults_core),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE, &rapl_defaults_core),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, &rapl_defaults_core),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &rapl_defaults_spr_server),
|
||||
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT, &rapl_defaults_byt),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ATOM_AIRMONT, &rapl_defaults_cht),
|
||||
@ -1252,6 +1310,7 @@ void rapl_remove_package(struct rapl_package *rp)
|
||||
if (find_nr_power_limit(rd) > 1) {
|
||||
rapl_write_data_raw(rd, PL2_ENABLE, 0);
|
||||
rapl_write_data_raw(rd, PL2_CLAMP, 0);
|
||||
rapl_write_data_raw(rd, PL4_ENABLE, 0);
|
||||
}
|
||||
if (rd->id == RAPL_DOMAIN_PACKAGE) {
|
||||
rd_package = rd;
|
||||
@ -1360,6 +1419,13 @@ static void power_limit_state_save(void)
|
||||
if (ret)
|
||||
rd->rpl[i].last_power_limit = 0;
|
||||
break;
|
||||
case PL4_ENABLE:
|
||||
ret = rapl_read_data_raw(rd,
|
||||
POWER_LIMIT4, true,
|
||||
&rd->rpl[i].last_power_limit);
|
||||
if (ret)
|
||||
rd->rpl[i].last_power_limit = 0;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1390,6 +1456,11 @@ static void power_limit_state_restore(void)
|
||||
rapl_write_data_raw(rd, POWER_LIMIT2,
|
||||
rd->rpl[i].last_power_limit);
|
||||
break;
|
||||
case PL4_ENABLE:
|
||||
if (rd->rpl[i].last_power_limit)
|
||||
rapl_write_data_raw(rd, POWER_LIMIT4,
|
||||
rd->rpl[i].last_power_limit);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -28,6 +28,7 @@
|
||||
|
||||
/* Local defines */
|
||||
#define MSR_PLATFORM_POWER_LIMIT 0x0000065C
|
||||
#define MSR_VR_CURRENT_CONFIG 0x00000601
|
||||
|
||||
/* private data for RAPL MSR Interface */
|
||||
static struct rapl_if_priv rapl_msr_priv = {
|
||||
@ -123,13 +124,27 @@ static int rapl_msr_write_raw(int cpu, struct reg_action *ra)
|
||||
return ra->err;
|
||||
}
|
||||
|
||||
/* List of verified CPUs. */
|
||||
static const struct x86_cpu_id pl4_support_ids[] = {
|
||||
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_TIGERLAKE_L, X86_FEATURE_ANY },
|
||||
{}
|
||||
};
|
||||
|
||||
static int rapl_msr_probe(struct platform_device *pdev)
|
||||
{
|
||||
const struct x86_cpu_id *id = x86_match_cpu(pl4_support_ids);
|
||||
int ret;
|
||||
|
||||
rapl_msr_priv.read_raw = rapl_msr_read_raw;
|
||||
rapl_msr_priv.write_raw = rapl_msr_write_raw;
|
||||
|
||||
if (id) {
|
||||
rapl_msr_priv.limits[RAPL_DOMAIN_PACKAGE] = 3;
|
||||
rapl_msr_priv.regs[RAPL_DOMAIN_PACKAGE][RAPL_DOMAIN_REG_PL4] =
|
||||
MSR_VR_CURRENT_CONFIG;
|
||||
pr_info("PL4 support detected.\n");
|
||||
}
|
||||
|
||||
rapl_msr_priv.control_type = powercap_register_control_type(NULL, "intel-rapl", NULL);
|
||||
if (IS_ERR(rapl_msr_priv.control_type)) {
|
||||
pr_debug("failed to register powercap control_type.\n");
|
||||
|
@ -414,7 +414,7 @@ static int nfs4_delay_interruptible(long *timeout)
|
||||
{
|
||||
might_sleep();
|
||||
|
||||
freezable_schedule_timeout_interruptible(nfs4_update_delay(timeout));
|
||||
freezable_schedule_timeout_interruptible_unsafe(nfs4_update_delay(timeout));
|
||||
if (!signal_pending(current))
|
||||
return 0;
|
||||
return __fatal_signal_pending(current) ? -EINTR :-ERESTARTSYS;
|
||||
|
@ -207,6 +207,17 @@ static inline long freezable_schedule_timeout_interruptible(long timeout)
|
||||
return __retval;
|
||||
}
|
||||
|
||||
/* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
|
||||
static inline long freezable_schedule_timeout_interruptible_unsafe(long timeout)
|
||||
{
|
||||
long __retval;
|
||||
|
||||
freezer_do_not_count();
|
||||
__retval = schedule_timeout_interruptible(timeout);
|
||||
freezer_count_unsafe();
|
||||
return __retval;
|
||||
}
|
||||
|
||||
/* Like schedule_timeout_killable(), but should not block the freezer. */
|
||||
static inline long freezable_schedule_timeout_killable(long timeout)
|
||||
{
|
||||
@ -285,6 +296,9 @@ static inline void set_freezable(void) {}
|
||||
#define freezable_schedule_timeout_interruptible(timeout) \
|
||||
schedule_timeout_interruptible(timeout)
|
||||
|
||||
#define freezable_schedule_timeout_interruptible_unsafe(timeout) \
|
||||
schedule_timeout_interruptible(timeout)
|
||||
|
||||
#define freezable_schedule_timeout_killable(timeout) \
|
||||
schedule_timeout_killable(timeout)
|
||||
|
||||
|
@ -29,6 +29,7 @@ enum rapl_domain_reg_id {
|
||||
RAPL_DOMAIN_REG_PERF,
|
||||
RAPL_DOMAIN_REG_POLICY,
|
||||
RAPL_DOMAIN_REG_INFO,
|
||||
RAPL_DOMAIN_REG_PL4,
|
||||
RAPL_DOMAIN_REG_MAX,
|
||||
};
|
||||
|
||||
@ -38,12 +39,14 @@ enum rapl_primitives {
|
||||
ENERGY_COUNTER,
|
||||
POWER_LIMIT1,
|
||||
POWER_LIMIT2,
|
||||
POWER_LIMIT4,
|
||||
FW_LOCK,
|
||||
|
||||
PL1_ENABLE, /* power limit 1, aka long term */
|
||||
PL1_CLAMP, /* allow frequency to go below OS request */
|
||||
PL2_ENABLE, /* power limit 2, aka short term, instantaneous */
|
||||
PL2_CLAMP,
|
||||
PL4_ENABLE, /* power limit 4, aka max peak power */
|
||||
|
||||
TIME_WINDOW1, /* long term */
|
||||
TIME_WINDOW2, /* short term */
|
||||
@ -65,7 +68,7 @@ struct rapl_domain_data {
|
||||
unsigned long timestamp;
|
||||
};
|
||||
|
||||
#define NR_POWER_LIMITS (2)
|
||||
#define NR_POWER_LIMITS (3)
|
||||
struct rapl_power_limit {
|
||||
struct powercap_zone_constraint *constraint;
|
||||
int prim_id; /* primitive ID used to enable */
|
||||
|
@ -95,8 +95,8 @@ struct generic_pm_domain {
|
||||
struct device dev;
|
||||
struct dev_pm_domain domain; /* PM domain operations */
|
||||
struct list_head gpd_list_node; /* Node in the global PM domains list */
|
||||
struct list_head master_links; /* Links with PM domain as a master */
|
||||
struct list_head slave_links; /* Links with PM domain as a slave */
|
||||
struct list_head parent_links; /* Links with PM domain as a parent */
|
||||
struct list_head child_links; /* Links with PM domain as a child */
|
||||
struct list_head dev_list; /* List of devices */
|
||||
struct dev_power_governor *gov;
|
||||
struct work_struct power_off_work;
|
||||
@ -151,10 +151,10 @@ static inline struct generic_pm_domain *pd_to_genpd(struct dev_pm_domain *pd)
|
||||
}
|
||||
|
||||
struct gpd_link {
|
||||
struct generic_pm_domain *master;
|
||||
struct list_head master_node;
|
||||
struct generic_pm_domain *slave;
|
||||
struct list_head slave_node;
|
||||
struct generic_pm_domain *parent;
|
||||
struct list_head parent_node;
|
||||
struct generic_pm_domain *child;
|
||||
struct list_head child_node;
|
||||
|
||||
/* Sub-domain's per-master domain performance state */
|
||||
unsigned int performance_state;
|
||||
|
@ -1062,7 +1062,7 @@ power_attr(disk);
|
||||
static ssize_t resume_show(struct kobject *kobj, struct kobj_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
return sprintf(buf,"%d:%d\n", MAJOR(swsusp_resume_device),
|
||||
return sprintf(buf, "%d:%d\n", MAJOR(swsusp_resume_device),
|
||||
MINOR(swsusp_resume_device));
|
||||
}
|
||||
|
||||
@ -1162,7 +1162,7 @@ static ssize_t reserved_size_store(struct kobject *kobj,
|
||||
|
||||
power_attr(reserved_size);
|
||||
|
||||
static struct attribute * g[] = {
|
||||
static struct attribute *g[] = {
|
||||
&disk_attr.attr,
|
||||
&resume_offset_attr.attr,
|
||||
&resume_attr.attr,
|
||||
@ -1190,7 +1190,7 @@ static int __init resume_setup(char *str)
|
||||
if (noresume)
|
||||
return 1;
|
||||
|
||||
strncpy( resume_file, str, 255 );
|
||||
strncpy(resume_file, str, 255);
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
@ -32,7 +32,7 @@ static inline int init_header_complete(struct swsusp_info *info)
|
||||
return arch_hibernation_header_save(info, MAX_ARCH_HEADER_SIZE);
|
||||
}
|
||||
|
||||
static inline char *check_image_kernel(struct swsusp_info *info)
|
||||
static inline const char *check_image_kernel(struct swsusp_info *info)
|
||||
{
|
||||
return arch_hibernation_header_restore(info) ?
|
||||
"architecture specific data" : NULL;
|
||||
|
@ -2023,7 +2023,7 @@ static int init_header_complete(struct swsusp_info *info)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static char *check_image_kernel(struct swsusp_info *info)
|
||||
static const char *check_image_kernel(struct swsusp_info *info)
|
||||
{
|
||||
if (info->version_code != LINUX_VERSION_CODE)
|
||||
return "kernel version";
|
||||
@ -2176,7 +2176,7 @@ static void mark_unsafe_pages(struct memory_bitmap *bm)
|
||||
|
||||
static int check_header(struct swsusp_info *info)
|
||||
{
|
||||
char *reason;
|
||||
const char *reason;
|
||||
|
||||
reason = check_image_kernel(info);
|
||||
if (!reason && info->num_physpages != get_num_physpages())
|
||||
|
@ -49,17 +49,17 @@ Output is similar to /sys/kernel/debug/pm_genpd/pm_genpd_summary'''
|
||||
else:
|
||||
status_string = 'off-{}'.format(genpd['state_idx'])
|
||||
|
||||
slave_names = []
|
||||
child_names = []
|
||||
for link in list_for_each_entry(
|
||||
genpd['master_links'],
|
||||
genpd['parent_links'],
|
||||
device_link_type.get_type().pointer(),
|
||||
'master_node'):
|
||||
slave_names.apend(link['slave']['name'])
|
||||
'parent_node'):
|
||||
child_names.append(link['child']['name'])
|
||||
|
||||
gdb.write('%-30s %-15s %s\n' % (
|
||||
genpd['name'].string(),
|
||||
status_string,
|
||||
', '.join(slave_names)))
|
||||
', '.join(child_names)))
|
||||
|
||||
# Print devices in domain
|
||||
for pm_data in list_for_each_entry(genpd['dev_list'],
|
||||
@ -70,7 +70,7 @@ Output is similar to /sys/kernel/debug/pm_genpd/pm_genpd_summary'''
|
||||
gdb.write(' %-50s %s\n' % (kobj_path, rtpm_status_str(dev)))
|
||||
|
||||
def invoke(self, arg, from_tty):
|
||||
gdb.write('domain status slaves\n');
|
||||
gdb.write('domain status children\n');
|
||||
gdb.write(' /device runtime status\n');
|
||||
gdb.write('----------------------------------------------------------------------\n');
|
||||
for genpd in list_for_each_entry(
|
||||
|
@ -285,7 +285,7 @@ struct cpufreq_available_governors *cpufreq_get_available_governors(unsigned
|
||||
} else {
|
||||
first = malloc(sizeof(*first));
|
||||
if (!first)
|
||||
goto error_out;
|
||||
return NULL;
|
||||
current = first;
|
||||
}
|
||||
current->first = first;
|
||||
@ -362,7 +362,7 @@ struct cpufreq_available_frequencies
|
||||
} else {
|
||||
first = malloc(sizeof(*first));
|
||||
if (!first)
|
||||
goto error_out;
|
||||
return NULL;
|
||||
current = first;
|
||||
}
|
||||
current->first = first;
|
||||
@ -418,7 +418,7 @@ struct cpufreq_available_frequencies
|
||||
} else {
|
||||
first = malloc(sizeof(*first));
|
||||
if (!first)
|
||||
goto error_out;
|
||||
return NULL;
|
||||
current = first;
|
||||
}
|
||||
current->first = first;
|
||||
@ -493,7 +493,7 @@ static struct cpufreq_affected_cpus *sysfs_get_cpu_list(unsigned int cpu,
|
||||
} else {
|
||||
first = malloc(sizeof(*first));
|
||||
if (!first)
|
||||
goto error_out;
|
||||
return NULL;
|
||||
current = first;
|
||||
}
|
||||
current->first = first;
|
||||
@ -726,7 +726,7 @@ struct cpufreq_stats *cpufreq_get_stats(unsigned int cpu,
|
||||
} else {
|
||||
first = malloc(sizeof(*first));
|
||||
if (!first)
|
||||
goto error_out;
|
||||
return NULL;
|
||||
current = first;
|
||||
}
|
||||
current->first = first;
|
||||
|
@ -170,7 +170,7 @@ displayed.
|
||||
|
||||
.SH REFERENCES
|
||||
"BIOS and Kernel Developer’s Guide (BKDG) for AMD Family 14h Processors"
|
||||
http://support.amd.com/us/Processor_TechDocs/43170.pdf
|
||||
https://support.amd.com/us/Processor_TechDocs/43170.pdf
|
||||
|
||||
"Intel® Turbo Boost Technology
|
||||
in Intel® Core™ Microarchitecture (Nehalem) Based Processors"
|
||||
@ -178,7 +178,7 @@ http://download.intel.com/design/processor/applnots/320354.pdf
|
||||
|
||||
"Intel® 64 and IA-32 Architectures Software Developer's Manual
|
||||
Volume 3B: System Programming Guide"
|
||||
http://www.intel.com/products/processor/manuals
|
||||
https://www.intel.com/products/processor/manuals
|
||||
|
||||
.SH FILES
|
||||
.ta
|
||||
|
@ -26,11 +26,11 @@ struct bitmask *bitmask_alloc(unsigned int n)
|
||||
struct bitmask *bmp;
|
||||
|
||||
bmp = malloc(sizeof(*bmp));
|
||||
if (bmp == 0)
|
||||
if (!bmp)
|
||||
return 0;
|
||||
bmp->size = n;
|
||||
bmp->maskp = calloc(longsperbits(n), sizeof(unsigned long));
|
||||
if (bmp->maskp == 0) {
|
||||
if (!bmp->maskp) {
|
||||
free(bmp);
|
||||
return 0;
|
||||
}
|
||||
@ -40,7 +40,7 @@ struct bitmask *bitmask_alloc(unsigned int n)
|
||||
/* Free `struct bitmask` */
|
||||
void bitmask_free(struct bitmask *bmp)
|
||||
{
|
||||
if (bmp == 0)
|
||||
if (!bmp)
|
||||
return;
|
||||
free(bmp->maskp);
|
||||
bmp->maskp = (unsigned long *)0xdeadcdef; /* double free tripwire */
|
||||
|
@ -6,7 +6,7 @@
|
||||
|_| |___/ |_|
|
||||
|
||||
pm-graph: suspend/resume/boot timing analysis tools
|
||||
Version: 5.6
|
||||
Version: 5.7
|
||||
Author: Todd Brandt <todd.e.brandt@intel.com>
|
||||
Home Page: https://01.org/pm-graph
|
||||
|
||||
|
@ -81,7 +81,7 @@ def ascii(text):
|
||||
# store system values and test parameters
|
||||
class SystemValues:
|
||||
title = 'SleepGraph'
|
||||
version = '5.6'
|
||||
version = '5.7'
|
||||
ansi = False
|
||||
rs = 0
|
||||
display = ''
|
||||
@ -198,7 +198,7 @@ class SystemValues:
|
||||
'suspend_console': {},
|
||||
'acpi_pm_prepare': {},
|
||||
'syscore_suspend': {},
|
||||
'arch_thaw_secondary_cpus_end': {},
|
||||
'arch_enable_nonboot_cpus_end': {},
|
||||
'syscore_resume': {},
|
||||
'acpi_pm_finish': {},
|
||||
'resume_console': {},
|
||||
@ -924,10 +924,7 @@ class SystemValues:
|
||||
tp = TestProps()
|
||||
tf = self.openlog(self.ftracefile, 'r')
|
||||
for line in tf:
|
||||
# determine the trace data type (required for further parsing)
|
||||
m = re.match(tp.tracertypefmt, line)
|
||||
if(m):
|
||||
tp.setTracerType(m.group('t'))
|
||||
if tp.stampInfo(line, self):
|
||||
continue
|
||||
# parse only valid lines, if this is not one move on
|
||||
m = re.match(tp.ftrace_line_fmt, line)
|
||||
@ -1244,8 +1241,8 @@ class DevProps:
|
||||
if self.xtraclass:
|
||||
return ' '+self.xtraclass
|
||||
if self.isasync:
|
||||
return ' async_device'
|
||||
return ' sync_device'
|
||||
return ' (async)'
|
||||
return ' (sync)'
|
||||
|
||||
# Class: DeviceNode
|
||||
# Description:
|
||||
@ -1301,6 +1298,7 @@ class Data:
|
||||
'FAIL' : r'(?i).*\bFAILED\b.*',
|
||||
'INVALID' : r'(?i).*\bINVALID\b.*',
|
||||
'CRASH' : r'(?i).*\bCRASHED\b.*',
|
||||
'TIMEOUT' : r'(?i).*\bTIMEOUT\b.*',
|
||||
'IRQ' : r'.*\bgenirq: .*',
|
||||
'TASKFAIL': r'.*Freezing of tasks *.*',
|
||||
'ACPI' : r'.*\bACPI *(?P<b>[A-Za-z]*) *Error[: ].*',
|
||||
@ -1358,11 +1356,11 @@ class Data:
|
||||
if self.dmesg[p]['order'] == order:
|
||||
return p
|
||||
return ''
|
||||
def lastPhase(self):
|
||||
def lastPhase(self, depth=1):
|
||||
plist = self.sortedPhases()
|
||||
if len(plist) < 1:
|
||||
if len(plist) < depth:
|
||||
return ''
|
||||
return plist[-1]
|
||||
return plist[-1*depth]
|
||||
def turbostatInfo(self):
|
||||
tp = TestProps()
|
||||
out = {'syslpi':'N/A','pkgpc10':'N/A'}
|
||||
@ -1382,9 +1380,12 @@ class Data:
|
||||
if len(self.dmesgtext) < 1 and sysvals.dmesgfile:
|
||||
lf = sysvals.openlog(sysvals.dmesgfile, 'r')
|
||||
i = 0
|
||||
tp = TestProps()
|
||||
list = []
|
||||
for line in lf:
|
||||
i += 1
|
||||
if tp.stampInfo(line, sysvals):
|
||||
continue
|
||||
m = re.match('[ \t]*(\[ *)(?P<ktime>[0-9\.]*)(\]) (?P<msg>.*)', line)
|
||||
if not m:
|
||||
continue
|
||||
@ -1400,15 +1401,15 @@ class Data:
|
||||
list.append((msg, err, dir, t, i, i))
|
||||
self.kerror = True
|
||||
break
|
||||
msglist = []
|
||||
tp.msglist = []
|
||||
for msg, type, dir, t, idx1, idx2 in list:
|
||||
msglist.append(msg)
|
||||
tp.msglist.append(msg)
|
||||
self.errorinfo[dir].append((type, t, idx1, idx2))
|
||||
if self.kerror:
|
||||
sysvals.dmesglog = True
|
||||
if len(self.dmesgtext) < 1 and sysvals.dmesgfile:
|
||||
lf.close()
|
||||
return msglist
|
||||
return tp
|
||||
def setStart(self, time, msg=''):
|
||||
self.start = time
|
||||
if msg:
|
||||
@ -1623,6 +1624,8 @@ class Data:
|
||||
if('src' in d):
|
||||
for e in d['src']:
|
||||
e.time = self.trimTimeVal(e.time, t0, dT, left)
|
||||
e.end = self.trimTimeVal(e.end, t0, dT, left)
|
||||
e.length = e.end - e.time
|
||||
for dir in ['suspend', 'resume']:
|
||||
list = []
|
||||
for e in self.errorinfo[dir]:
|
||||
@ -1640,7 +1643,12 @@ class Data:
|
||||
if tL > 0:
|
||||
left = True if tR > tZero else False
|
||||
self.trimTime(tS, tL, left)
|
||||
self.tLow.append('%.0f'%(tL*1000))
|
||||
if 'trying' in self.dmesg[lp] and self.dmesg[lp]['trying'] >= 0.001:
|
||||
tTry = round(self.dmesg[lp]['trying'] * 1000)
|
||||
text = '%.0f (-%.0f waking)' % (tL * 1000, tTry)
|
||||
else:
|
||||
text = '%.0f' % (tL * 1000)
|
||||
self.tLow.append(text)
|
||||
lp = phase
|
||||
def getMemTime(self):
|
||||
if not self.hwstart or not self.hwend:
|
||||
@ -1776,7 +1784,7 @@ class Data:
|
||||
length = -1.0
|
||||
if(start >= 0 and end >= 0):
|
||||
length = end - start
|
||||
if pid == -2:
|
||||
if pid == -2 or name not in sysvals.tracefuncs.keys():
|
||||
i = 2
|
||||
origname = name
|
||||
while(name in list):
|
||||
@ -1789,6 +1797,15 @@ class Data:
|
||||
if color:
|
||||
list[name]['color'] = color
|
||||
return name
|
||||
def findDevice(self, phase, name):
|
||||
list = self.dmesg[phase]['list']
|
||||
mydev = ''
|
||||
for devname in sorted(list):
|
||||
if name == devname or re.match('^%s\[(?P<num>[0-9]*)\]$' % name, devname):
|
||||
mydev = devname
|
||||
if mydev:
|
||||
return list[mydev]
|
||||
return False
|
||||
def deviceChildren(self, devname, phase):
|
||||
devlist = []
|
||||
list = self.dmesg[phase]['list']
|
||||
@ -2779,6 +2796,7 @@ class TestProps:
|
||||
testerrfmt = '^# enter_sleep_error (?P<e>.*)'
|
||||
sysinfofmt = '^# sysinfo .*'
|
||||
cmdlinefmt = '^# command \| (?P<cmd>.*)'
|
||||
kparamsfmt = '^# kparams \| (?P<kp>.*)'
|
||||
devpropfmt = '# Device Properties: .*'
|
||||
pinfofmt = '# platform-(?P<val>[a-z,A-Z,0-9]*): (?P<info>.*)'
|
||||
tracertypefmt = '# tracer: (?P<t>.*)'
|
||||
@ -2790,8 +2808,9 @@ class TestProps:
|
||||
'[ +!#\*@$]*(?P<dur>[0-9\.]*) .*\| (?P<msg>.*)'
|
||||
ftrace_line_fmt_nop = \
|
||||
' *(?P<proc>.*)-(?P<pid>[0-9]*) *\[(?P<cpu>[0-9]*)\] *'+\
|
||||
'(?P<flags>.{4}) *(?P<time>[0-9\.]*): *'+\
|
||||
'(?P<flags>\S*) *(?P<time>[0-9\.]*): *'+\
|
||||
'(?P<msg>.*)'
|
||||
machinesuspend = 'machine_suspend\[.*'
|
||||
def __init__(self):
|
||||
self.stamp = ''
|
||||
self.sysinfo = ''
|
||||
@ -2812,16 +2831,13 @@ class TestProps:
|
||||
self.ftrace_line_fmt = self.ftrace_line_fmt_nop
|
||||
else:
|
||||
doError('Invalid tracer format: [%s]' % tracer)
|
||||
def stampInfo(self, line):
|
||||
def stampInfo(self, line, sv):
|
||||
if re.match(self.stampfmt, line):
|
||||
self.stamp = line
|
||||
return True
|
||||
elif re.match(self.sysinfofmt, line):
|
||||
self.sysinfo = line
|
||||
return True
|
||||
elif re.match(self.cmdlinefmt, line):
|
||||
self.cmdline = line
|
||||
return True
|
||||
elif re.match(self.tstatfmt, line):
|
||||
self.turbostat.append(line)
|
||||
return True
|
||||
@ -2834,6 +2850,20 @@ class TestProps:
|
||||
elif re.match(self.firmwarefmt, line):
|
||||
self.fwdata.append(line)
|
||||
return True
|
||||
elif(re.match(self.devpropfmt, line)):
|
||||
self.parseDevprops(line, sv)
|
||||
return True
|
||||
elif(re.match(self.pinfofmt, line)):
|
||||
self.parsePlatformInfo(line, sv)
|
||||
return True
|
||||
m = re.match(self.cmdlinefmt, line)
|
||||
if m:
|
||||
self.cmdline = m.group('cmd')
|
||||
return True
|
||||
m = re.match(self.tracertypefmt, line)
|
||||
if(m):
|
||||
self.setTracerType(m.group('t'))
|
||||
return True
|
||||
return False
|
||||
def parseStamp(self, data, sv):
|
||||
# global test data
|
||||
@ -2858,9 +2888,13 @@ class TestProps:
|
||||
data.stamp[key] = val
|
||||
sv.hostname = data.stamp['host']
|
||||
sv.suspendmode = data.stamp['mode']
|
||||
if sv.suspendmode == 'freeze':
|
||||
self.machinesuspend = 'timekeeping_freeze\[.*'
|
||||
else:
|
||||
self.machinesuspend = 'machine_suspend\[.*'
|
||||
if sv.suspendmode == 'command' and sv.ftracefile != '':
|
||||
modes = ['on', 'freeze', 'standby', 'mem', 'disk']
|
||||
fp = sysvals.openlog(sv.ftracefile, 'r')
|
||||
fp = sv.openlog(sv.ftracefile, 'r')
|
||||
for line in fp:
|
||||
m = re.match('.* machine_suspend\[(?P<mode>.*)\]', line)
|
||||
if m and m.group('mode') in ['1', '2', '3', '4']:
|
||||
@ -2868,9 +2902,7 @@ class TestProps:
|
||||
data.stamp['mode'] = sv.suspendmode
|
||||
break
|
||||
fp.close()
|
||||
m = re.match(self.cmdlinefmt, self.cmdline)
|
||||
if m:
|
||||
sv.cmdline = m.group('cmd')
|
||||
sv.cmdline = self.cmdline
|
||||
if not sv.stamp:
|
||||
sv.stamp = data.stamp
|
||||
# firmware data
|
||||
@ -3052,20 +3084,7 @@ def appendIncompleteTraceLog(testruns):
|
||||
for line in tf:
|
||||
# remove any latent carriage returns
|
||||
line = line.replace('\r\n', '')
|
||||
if tp.stampInfo(line):
|
||||
continue
|
||||
# determine the trace data type (required for further parsing)
|
||||
m = re.match(tp.tracertypefmt, line)
|
||||
if(m):
|
||||
tp.setTracerType(m.group('t'))
|
||||
continue
|
||||
# device properties line
|
||||
if(re.match(tp.devpropfmt, line)):
|
||||
tp.parseDevprops(line, sysvals)
|
||||
continue
|
||||
# platform info line
|
||||
if(re.match(tp.pinfofmt, line)):
|
||||
tp.parsePlatformInfo(line, sysvals)
|
||||
if tp.stampInfo(line, sysvals):
|
||||
continue
|
||||
# parse only valid lines, if this is not one move on
|
||||
m = re.match(tp.ftrace_line_fmt, line)
|
||||
@ -3166,33 +3185,19 @@ def parseTraceLog(live=False):
|
||||
if sysvals.usekprobes:
|
||||
tracewatch += ['sync_filesystems', 'freeze_processes', 'syscore_suspend',
|
||||
'syscore_resume', 'resume_console', 'thaw_processes', 'CPU_ON',
|
||||
'CPU_OFF', 'timekeeping_freeze', 'acpi_suspend']
|
||||
'CPU_OFF', 'acpi_suspend']
|
||||
|
||||
# extract the callgraph and traceevent data
|
||||
s2idle_enter = hwsus = False
|
||||
tp = TestProps()
|
||||
testruns = []
|
||||
testdata = []
|
||||
testrun = 0
|
||||
data, limbo = 0, True
|
||||
testruns, testdata = [], []
|
||||
testrun, data, limbo = 0, 0, True
|
||||
tf = sysvals.openlog(sysvals.ftracefile, 'r')
|
||||
phase = 'suspend_prepare'
|
||||
for line in tf:
|
||||
# remove any latent carriage returns
|
||||
line = line.replace('\r\n', '')
|
||||
if tp.stampInfo(line):
|
||||
continue
|
||||
# tracer type line: determine the trace data type
|
||||
m = re.match(tp.tracertypefmt, line)
|
||||
if(m):
|
||||
tp.setTracerType(m.group('t'))
|
||||
continue
|
||||
# device properties line
|
||||
if(re.match(tp.devpropfmt, line)):
|
||||
tp.parseDevprops(line, sysvals)
|
||||
continue
|
||||
# platform info line
|
||||
if(re.match(tp.pinfofmt, line)):
|
||||
tp.parsePlatformInfo(line, sysvals)
|
||||
if tp.stampInfo(line, sysvals):
|
||||
continue
|
||||
# ignore all other commented lines
|
||||
if line[0] == '#':
|
||||
@ -3303,16 +3308,29 @@ def parseTraceLog(live=False):
|
||||
phase = data.setPhase('suspend_noirq', t.time, isbegin)
|
||||
continue
|
||||
# suspend_machine/resume_machine
|
||||
elif(re.match('machine_suspend\[.*', t.name)):
|
||||
elif(re.match(tp.machinesuspend, t.name)):
|
||||
lp = data.lastPhase()
|
||||
if(isbegin):
|
||||
lp = data.lastPhase()
|
||||
hwsus = True
|
||||
if lp.startswith('resume_machine'):
|
||||
data.dmesg[lp]['end'] = t.time
|
||||
# trim out s2idle loops, track time trying to freeze
|
||||
llp = data.lastPhase(2)
|
||||
if llp.startswith('suspend_machine'):
|
||||
if 'trying' not in data.dmesg[llp]:
|
||||
data.dmesg[llp]['trying'] = 0
|
||||
data.dmesg[llp]['trying'] += \
|
||||
t.time - data.dmesg[lp]['start']
|
||||
data.currphase = ''
|
||||
del data.dmesg[lp]
|
||||
continue
|
||||
phase = data.setPhase('suspend_machine', data.dmesg[lp]['end'], True)
|
||||
data.setPhase(phase, t.time, False)
|
||||
if data.tSuspended == 0:
|
||||
data.tSuspended = t.time
|
||||
else:
|
||||
if lp.startswith('resume_machine'):
|
||||
data.dmesg[lp]['end'] = t.time
|
||||
continue
|
||||
phase = data.setPhase('resume_machine', t.time, True)
|
||||
if(sysvals.suspendmode in ['mem', 'disk']):
|
||||
susp = phase.replace('resume', 'suspend')
|
||||
@ -3343,6 +3361,19 @@ def parseTraceLog(live=False):
|
||||
# global events (outside device calls) are graphed
|
||||
if(name not in testrun.ttemp):
|
||||
testrun.ttemp[name] = []
|
||||
# special handling for s2idle_enter
|
||||
if name == 'machine_suspend':
|
||||
if hwsus:
|
||||
s2idle_enter = hwsus = False
|
||||
elif s2idle_enter and not isbegin:
|
||||
if(len(testrun.ttemp[name]) > 0):
|
||||
testrun.ttemp[name][-1]['end'] = t.time
|
||||
testrun.ttemp[name][-1]['loop'] += 1
|
||||
elif not s2idle_enter and isbegin:
|
||||
s2idle_enter = True
|
||||
testrun.ttemp[name].append({'begin': t.time,
|
||||
'end': t.time, 'pid': pid, 'loop': 0})
|
||||
continue
|
||||
if(isbegin):
|
||||
# create a new list entry
|
||||
testrun.ttemp[name].append(\
|
||||
@ -3374,9 +3405,8 @@ def parseTraceLog(live=False):
|
||||
if(not m):
|
||||
continue
|
||||
n = m.group('d')
|
||||
list = data.dmesg[phase]['list']
|
||||
if(n in list):
|
||||
dev = list[n]
|
||||
dev = data.findDevice(phase, n)
|
||||
if dev:
|
||||
dev['length'] = t.time - dev['start']
|
||||
dev['end'] = t.time
|
||||
# kprobe event processing
|
||||
@ -3479,7 +3509,12 @@ def parseTraceLog(live=False):
|
||||
# add actual trace funcs
|
||||
for name in sorted(test.ttemp):
|
||||
for event in test.ttemp[name]:
|
||||
data.newActionGlobal(name, event['begin'], event['end'], event['pid'])
|
||||
if event['end'] - event['begin'] <= 0:
|
||||
continue
|
||||
title = name
|
||||
if name == 'machine_suspend' and 'loop' in event:
|
||||
title = 's2idle_enter_%dx' % event['loop']
|
||||
data.newActionGlobal(title, event['begin'], event['end'], event['pid'])
|
||||
# add the kprobe based virtual tracefuncs as actual devices
|
||||
for key in sorted(tp.ktemp):
|
||||
name, pid = key
|
||||
@ -3548,8 +3583,9 @@ def parseTraceLog(live=False):
|
||||
for p in sorted(phasedef, key=lambda k:phasedef[k]['order']):
|
||||
if p not in data.dmesg:
|
||||
if not terr:
|
||||
pprint('TEST%s FAILED: %s failed in %s phase' % (tn, sysvals.suspendmode, lp))
|
||||
terr = '%s%s failed in %s phase' % (sysvals.suspendmode, tn, lp)
|
||||
ph = p if 'machine' in p else lp
|
||||
terr = '%s%s failed in %s phase' % (sysvals.suspendmode, tn, ph)
|
||||
pprint('TEST%s FAILED: %s' % (tn, terr))
|
||||
error.append(terr)
|
||||
if data.tSuspended == 0:
|
||||
data.tSuspended = data.dmesg[lp]['end']
|
||||
@ -3611,7 +3647,7 @@ def loadKernelLog():
|
||||
idx = line.find('[')
|
||||
if idx > 1:
|
||||
line = line[idx:]
|
||||
if tp.stampInfo(line):
|
||||
if tp.stampInfo(line, sysvals):
|
||||
continue
|
||||
m = re.match('[ \t]*(\[ *)(?P<ktime>[0-9\.]*)(\]) (?P<msg>.*)', line)
|
||||
if(not m):
|
||||
@ -3959,18 +3995,20 @@ def addCallgraphs(sv, hf, data):
|
||||
if sv.cgphase and p != sv.cgphase:
|
||||
continue
|
||||
list = data.dmesg[p]['list']
|
||||
for devname in data.sortedDevices(p):
|
||||
if len(sv.cgfilter) > 0 and devname not in sv.cgfilter:
|
||||
for d in data.sortedDevices(p):
|
||||
if len(sv.cgfilter) > 0 and d not in sv.cgfilter:
|
||||
continue
|
||||
dev = list[devname]
|
||||
dev = list[d]
|
||||
color = 'white'
|
||||
if 'color' in data.dmesg[p]:
|
||||
color = data.dmesg[p]['color']
|
||||
if 'color' in dev:
|
||||
color = dev['color']
|
||||
name = devname
|
||||
if(devname in sv.devprops):
|
||||
name = sv.devprops[devname].altName(devname)
|
||||
name = d if '[' not in d else d.split('[')[0]
|
||||
if(d in sv.devprops):
|
||||
name = sv.devprops[d].altName(d)
|
||||
if 'drv' in dev and dev['drv']:
|
||||
name += ' {%s}' % dev['drv']
|
||||
if sv.suspendmode in suspendmodename:
|
||||
name += ' '+p
|
||||
if('ftrace' in dev):
|
||||
@ -4517,12 +4555,9 @@ def createHTML(testruns, testfail):
|
||||
# draw the devices for this phase
|
||||
phaselist = data.dmesg[b]['list']
|
||||
for d in sorted(data.tdevlist[b]):
|
||||
name = d
|
||||
drv = ''
|
||||
dev = phaselist[d]
|
||||
xtraclass = ''
|
||||
xtrainfo = ''
|
||||
xtrastyle = ''
|
||||
dname = d if '[' not in d else d.split('[')[0]
|
||||
name, dev = dname, phaselist[d]
|
||||
drv = xtraclass = xtrainfo = xtrastyle = ''
|
||||
if 'htmlclass' in dev:
|
||||
xtraclass = dev['htmlclass']
|
||||
if 'color' in dev:
|
||||
@ -4553,7 +4588,7 @@ def createHTML(testruns, testfail):
|
||||
title += b
|
||||
devtl.html += devtl.html_device.format(dev['id'], \
|
||||
title, left, top, '%.3f'%rowheight, width, \
|
||||
d+drv, xtraclass, xtrastyle)
|
||||
dname+drv, xtraclass, xtrastyle)
|
||||
if('cpuexec' in dev):
|
||||
for t in sorted(dev['cpuexec']):
|
||||
start, end = t
|
||||
@ -4571,6 +4606,8 @@ def createHTML(testruns, testfail):
|
||||
continue
|
||||
# draw any trace events for this device
|
||||
for e in dev['src']:
|
||||
if e.length == 0:
|
||||
continue
|
||||
height = '%.3f' % devtl.rowH
|
||||
top = '%.3f' % (rowtop + devtl.scaleH + (e.row*devtl.rowH))
|
||||
left = '%f' % (((e.time-m0)*100)/mTotal)
|
||||
@ -5876,7 +5913,7 @@ def getArgFloat(name, args, min, max, main=True):
|
||||
|
||||
def processData(live=False, quiet=False):
|
||||
if not quiet:
|
||||
pprint('PROCESSING DATA')
|
||||
pprint('PROCESSING: %s' % sysvals.htmlfile)
|
||||
sysvals.vprint('usetraceevents=%s, usetracemarkers=%s, usekprobes=%s' % \
|
||||
(sysvals.usetraceevents, sysvals.usetracemarkers, sysvals.usekprobes))
|
||||
error = ''
|
||||
@ -5928,7 +5965,7 @@ def processData(live=False, quiet=False):
|
||||
sysvals.vprint('Creating the html timeline (%s)...' % sysvals.htmlfile)
|
||||
createHTML(testruns, error)
|
||||
if not quiet:
|
||||
pprint('DONE')
|
||||
pprint('DONE: %s' % sysvals.htmlfile)
|
||||
data = testruns[0]
|
||||
stamp = data.stamp
|
||||
stamp['suspend'], stamp['resume'] = data.getTimeValues()
|
||||
@ -5984,25 +6021,27 @@ def runTest(n=0, quiet=False):
|
||||
return 0
|
||||
|
||||
def find_in_html(html, start, end, firstonly=True):
|
||||
n, cnt, out = 0, len(html), []
|
||||
while n < cnt:
|
||||
e = cnt if (n + 10000 > cnt or n == 0) else n + 10000
|
||||
m = re.search(start, html[n:e])
|
||||
cnt, out, list = len(html), [], []
|
||||
if firstonly:
|
||||
m = re.search(start, html)
|
||||
if m:
|
||||
list.append(m)
|
||||
else:
|
||||
list = re.finditer(start, html)
|
||||
for match in list:
|
||||
s = match.end()
|
||||
e = cnt if (len(out) < 1 or s + 10000 > cnt) else s + 10000
|
||||
m = re.search(end, html[s:e])
|
||||
if not m:
|
||||
break
|
||||
i = m.end()
|
||||
m = re.search(end, html[n+i:e])
|
||||
if not m:
|
||||
break
|
||||
j = m.start()
|
||||
str = html[n+i:n+i+j]
|
||||
e = s + m.start()
|
||||
str = html[s:e]
|
||||
if end == 'ms':
|
||||
num = re.search(r'[-+]?\d*\.\d+|\d+', str)
|
||||
str = num.group() if num else 'NaN'
|
||||
if firstonly:
|
||||
return str
|
||||
out.append(str)
|
||||
n += i+j
|
||||
if firstonly:
|
||||
return ''
|
||||
return out
|
||||
@ -6034,7 +6073,7 @@ def data_from_html(file, outpath, issues, fulldetail=False):
|
||||
else:
|
||||
result = 'pass'
|
||||
# extract error info
|
||||
ilist = []
|
||||
tp, ilist = False, []
|
||||
extra = dict()
|
||||
log = find_in_html(html, '<div id="dmesglog" style="display:none;">',
|
||||
'</div>').strip()
|
||||
@ -6042,8 +6081,8 @@ def data_from_html(file, outpath, issues, fulldetail=False):
|
||||
d = Data(0)
|
||||
d.end = 999999999
|
||||
d.dmesgtext = log.split('\n')
|
||||
msglist = d.extractErrorInfo()
|
||||
for msg in msglist:
|
||||
tp = d.extractErrorInfo()
|
||||
for msg in tp.msglist:
|
||||
sysvals.errorSummary(issues, msg)
|
||||
if stmp[2] == 'freeze':
|
||||
extra = d.turbostatInfo()
|
||||
@ -6059,8 +6098,8 @@ def data_from_html(file, outpath, issues, fulldetail=False):
|
||||
if wifi:
|
||||
extra['wifi'] = wifi
|
||||
low = find_in_html(html, 'freeze time: <b>', ' ms</b>')
|
||||
if low and '|' in low:
|
||||
issue = 'FREEZEx%d' % len(low.split('|'))
|
||||
if low and 'waking' in low:
|
||||
issue = 'FREEZEWAKE'
|
||||
match = [i for i in issues if i['match'] == issue]
|
||||
if len(match) > 0:
|
||||
match[0]['count'] += 1
|
||||
@ -6126,6 +6165,11 @@ def data_from_html(file, outpath, issues, fulldetail=False):
|
||||
data[key] = extra[key]
|
||||
if fulldetail:
|
||||
data['funclist'] = find_in_html(html, '<div title="', '" class="traceevent"', False)
|
||||
if tp:
|
||||
for arg in ['-multi ', '-info ']:
|
||||
if arg in tp.cmdline:
|
||||
data['target'] = tp.cmdline[tp.cmdline.find(arg):].split()[1]
|
||||
break
|
||||
return data
|
||||
|
||||
def genHtml(subdir, force=False):
|
||||
@ -6155,8 +6199,7 @@ def runSummary(subdir, local=True, genhtml=False):
|
||||
pprint('Generating a summary of folder:\n %s' % inpath)
|
||||
if genhtml:
|
||||
genHtml(subdir)
|
||||
issues = []
|
||||
testruns = []
|
||||
target, issues, testruns = '', [], []
|
||||
desc = {'host':[],'mode':[],'kernel':[]}
|
||||
for dirname, dirnames, filenames in os.walk(subdir):
|
||||
for filename in filenames:
|
||||
@ -6165,6 +6208,8 @@ def runSummary(subdir, local=True, genhtml=False):
|
||||
data = data_from_html(os.path.join(dirname, filename), outpath, issues)
|
||||
if(not data):
|
||||
continue
|
||||
if 'target' in data:
|
||||
target = data['target']
|
||||
testruns.append(data)
|
||||
for key in desc:
|
||||
if data[key] not in desc[key]:
|
||||
@ -6172,6 +6217,8 @@ def runSummary(subdir, local=True, genhtml=False):
|
||||
pprint('Summary files:')
|
||||
if len(desc['host']) == len(desc['mode']) == len(desc['kernel']) == 1:
|
||||
title = '%s %s %s' % (desc['host'][0], desc['kernel'][0], desc['mode'][0])
|
||||
if target:
|
||||
title += ' %s' % target
|
||||
else:
|
||||
title = inpath
|
||||
createHTMLSummarySimple(testruns, os.path.join(outpath, 'summary.html'), title)
|
||||
|
Loading…
Reference in New Issue
Block a user