forked from Minki/linux
a7d6ba14ef
The code was reorganized in 2012 with the commit 0c01ebbfd3
.
The main change is a loop on the trip points array and a unconditional
call to the throttle() ops of the governors for each of them even if
the trip temperature is not reached yet.
With this change, the 'forced_passive' is no longer checked in the
thermal_zone_device_update() function but in the step wise governor's
throttle() callback.
As the force_passive does no belong to the trip point array, the
thermal_zone_device_update() can not compare with the specified
passive temperature, thus does not detect the passive limit has been
crossed. Consequently, throttle() is never called and the
'forced_passive' branch is unreached.
In addition, the default processor cooling device is not automatically
bound to the thermal zone if there is not passive trip point, thus the
'forced_passive' can not operate.
If there is an active trip point, then the throttle function will be
called to mitigate at this temperature and the 'forced_passive' will
override the mitigation of the active trip point in this case but with
the default cooling device bound to the thermal zone, so usually a
fan, and that is not a passive cooling effect.
Given the regression exists since more than 8 years, nobody complained
and at the best of my knowledge there is no bug open in
https://bugzilla.kernel.org, it is reasonable to say it is unused.
Remove the 'forced_passive' related code.
Signed-off-by: Daniel Lezcano <daniel.lezcano@linaro.org>
Reviewed-by: Thara Gopinath <thara.gopinath@linaro.org>
Link: https://lore.kernel.org/r/20201214233811.485669-1-daniel.lezcano@linaro.org
202 lines
5.7 KiB
C
202 lines
5.7 KiB
C
// SPDX-License-Identifier: GPL-2.0-only
|
|
/*
|
|
* step_wise.c - A step-by-step Thermal throttling governor
|
|
*
|
|
* Copyright (C) 2012 Intel Corp
|
|
* Copyright (C) 2012 Durgadoss R <durgadoss.r@intel.com>
|
|
*
|
|
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
|
*
|
|
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
|
*/
|
|
|
|
#include <linux/thermal.h>
|
|
#include <trace/events/thermal.h>
|
|
|
|
#include "thermal_core.h"
|
|
|
|
/*
|
|
* If the temperature is higher than a trip point,
|
|
* a. if the trend is THERMAL_TREND_RAISING, use higher cooling
|
|
* state for this trip point
|
|
* b. if the trend is THERMAL_TREND_DROPPING, do nothing
|
|
* c. if the trend is THERMAL_TREND_RAISE_FULL, use upper limit
|
|
* for this trip point
|
|
* d. if the trend is THERMAL_TREND_DROP_FULL, use lower limit
|
|
* for this trip point
|
|
* If the temperature is lower than a trip point,
|
|
* a. if the trend is THERMAL_TREND_RAISING, do nothing
|
|
* b. if the trend is THERMAL_TREND_DROPPING, use lower cooling
|
|
* state for this trip point, if the cooling state already
|
|
* equals lower limit, deactivate the thermal instance
|
|
* c. if the trend is THERMAL_TREND_RAISE_FULL, do nothing
|
|
* d. if the trend is THERMAL_TREND_DROP_FULL, use lower limit,
|
|
* if the cooling state already equals lower limit,
|
|
* deactivate the thermal instance
|
|
*/
|
|
static unsigned long get_target_state(struct thermal_instance *instance,
|
|
enum thermal_trend trend, bool throttle)
|
|
{
|
|
struct thermal_cooling_device *cdev = instance->cdev;
|
|
unsigned long cur_state;
|
|
unsigned long next_target;
|
|
|
|
/*
|
|
* We keep this instance the way it is by default.
|
|
* Otherwise, we use the current state of the
|
|
* cdev in use to determine the next_target.
|
|
*/
|
|
cdev->ops->get_cur_state(cdev, &cur_state);
|
|
next_target = instance->target;
|
|
dev_dbg(&cdev->device, "cur_state=%ld\n", cur_state);
|
|
|
|
if (!instance->initialized) {
|
|
if (throttle) {
|
|
next_target = (cur_state + 1) >= instance->upper ?
|
|
instance->upper :
|
|
((cur_state + 1) < instance->lower ?
|
|
instance->lower : (cur_state + 1));
|
|
} else {
|
|
next_target = THERMAL_NO_TARGET;
|
|
}
|
|
|
|
return next_target;
|
|
}
|
|
|
|
switch (trend) {
|
|
case THERMAL_TREND_RAISING:
|
|
if (throttle) {
|
|
next_target = cur_state < instance->upper ?
|
|
(cur_state + 1) : instance->upper;
|
|
if (next_target < instance->lower)
|
|
next_target = instance->lower;
|
|
}
|
|
break;
|
|
case THERMAL_TREND_RAISE_FULL:
|
|
if (throttle)
|
|
next_target = instance->upper;
|
|
break;
|
|
case THERMAL_TREND_DROPPING:
|
|
if (cur_state <= instance->lower) {
|
|
if (!throttle)
|
|
next_target = THERMAL_NO_TARGET;
|
|
} else {
|
|
if (!throttle) {
|
|
next_target = cur_state - 1;
|
|
if (next_target > instance->upper)
|
|
next_target = instance->upper;
|
|
}
|
|
}
|
|
break;
|
|
case THERMAL_TREND_DROP_FULL:
|
|
if (cur_state == instance->lower) {
|
|
if (!throttle)
|
|
next_target = THERMAL_NO_TARGET;
|
|
} else
|
|
next_target = instance->lower;
|
|
break;
|
|
default:
|
|
break;
|
|
}
|
|
|
|
return next_target;
|
|
}
|
|
|
|
static void update_passive_instance(struct thermal_zone_device *tz,
|
|
enum thermal_trip_type type, int value)
|
|
{
|
|
/*
|
|
* If value is +1, activate a passive instance.
|
|
* If value is -1, deactivate a passive instance.
|
|
*/
|
|
if (type == THERMAL_TRIP_PASSIVE)
|
|
tz->passive += value;
|
|
}
|
|
|
|
static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip)
|
|
{
|
|
int trip_temp;
|
|
enum thermal_trip_type trip_type;
|
|
enum thermal_trend trend;
|
|
struct thermal_instance *instance;
|
|
bool throttle = false;
|
|
int old_target;
|
|
|
|
tz->ops->get_trip_temp(tz, trip, &trip_temp);
|
|
tz->ops->get_trip_type(tz, trip, &trip_type);
|
|
|
|
trend = get_tz_trend(tz, trip);
|
|
|
|
if (tz->temperature >= trip_temp) {
|
|
throttle = true;
|
|
trace_thermal_zone_trip(tz, trip, trip_type);
|
|
}
|
|
|
|
dev_dbg(&tz->device, "Trip%d[type=%d,temp=%d]:trend=%d,throttle=%d\n",
|
|
trip, trip_type, trip_temp, trend, throttle);
|
|
|
|
mutex_lock(&tz->lock);
|
|
|
|
list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
|
|
if (instance->trip != trip)
|
|
continue;
|
|
|
|
old_target = instance->target;
|
|
instance->target = get_target_state(instance, trend, throttle);
|
|
dev_dbg(&instance->cdev->device, "old_target=%d, target=%d\n",
|
|
old_target, (int)instance->target);
|
|
|
|
if (instance->initialized && old_target == instance->target)
|
|
continue;
|
|
|
|
/* Activate a passive thermal instance */
|
|
if (old_target == THERMAL_NO_TARGET &&
|
|
instance->target != THERMAL_NO_TARGET)
|
|
update_passive_instance(tz, trip_type, 1);
|
|
/* Deactivate a passive thermal instance */
|
|
else if (old_target != THERMAL_NO_TARGET &&
|
|
instance->target == THERMAL_NO_TARGET)
|
|
update_passive_instance(tz, trip_type, -1);
|
|
|
|
instance->initialized = true;
|
|
mutex_lock(&instance->cdev->lock);
|
|
instance->cdev->updated = false; /* cdev needs update */
|
|
mutex_unlock(&instance->cdev->lock);
|
|
}
|
|
|
|
mutex_unlock(&tz->lock);
|
|
}
|
|
|
|
/**
|
|
* step_wise_throttle - throttles devices associated with the given zone
|
|
* @tz: thermal_zone_device
|
|
* @trip: trip point index
|
|
*
|
|
* Throttling Logic: This uses the trend of the thermal zone to throttle.
|
|
* If the thermal zone is 'heating up' this throttles all the cooling
|
|
* devices associated with the zone and its particular trip point, by one
|
|
* step. If the zone is 'cooling down' it brings back the performance of
|
|
* the devices by one step.
|
|
*/
|
|
static int step_wise_throttle(struct thermal_zone_device *tz, int trip)
|
|
{
|
|
struct thermal_instance *instance;
|
|
|
|
thermal_zone_trip_update(tz, trip);
|
|
|
|
mutex_lock(&tz->lock);
|
|
|
|
list_for_each_entry(instance, &tz->thermal_instances, tz_node)
|
|
thermal_cdev_update(instance->cdev);
|
|
|
|
mutex_unlock(&tz->lock);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static struct thermal_governor thermal_gov_step_wise = {
|
|
.name = "step_wise",
|
|
.throttle = step_wise_throttle,
|
|
};
|
|
THERMAL_GOVERNOR_DECLARE(thermal_gov_step_wise);
|