Merge branch 'ipa-runtime-pm'

Alex Elder says:

====================
net: ipa: more work toward runtime PM

The first two patches in this series are basically bug fixes, but in
practice I don't think we've seen the problems they might cause.

The third patch moves clock and interconnect related error messages
around a bit, reporting better information and doing so in the
functions where they are enabled or disabled (rather than those
functions' callers).

The last three patches move power-related code into "ipa_clock.c",
as a step toward generalizing the purpose of that source file.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2021-08-05 11:27:05 +01:00
commit 839454801e
5 changed files with 171 additions and 128 deletions

View File

@ -27,20 +27,9 @@ struct ipa_clock;
struct ipa_smp2p;
struct ipa_interrupt;
/**
* enum ipa_flag - IPA state flags
* @IPA_FLAG_RESUMED: Whether resume from suspend has been signaled
* @IPA_FLAG_COUNT: Number of defined IPA flags
*/
enum ipa_flag {
IPA_FLAG_RESUMED,
IPA_FLAG_COUNT, /* Last; not a flag */
};
/**
* struct ipa - IPA information
* @gsi: Embedded GSI structure
* @flags: Boolean state flags
* @version: IPA hardware version
* @pdev: Platform device
* @completion: Used to signal pipeline clear transfer complete
@ -83,7 +72,6 @@ enum ipa_flag {
*/
struct ipa {
struct gsi gsi;
DECLARE_BITMAP(flags, IPA_FLAG_COUNT);
enum ipa_version version;
struct platform_device *pdev;
struct completion completion;

View File

@ -9,9 +9,12 @@
#include <linux/clk.h>
#include <linux/device.h>
#include <linux/interconnect.h>
#include <linux/pm.h>
#include <linux/bitops.h>
#include "ipa.h"
#include "ipa_clock.h"
#include "ipa_endpoint.h"
#include "ipa_modem.h"
#include "ipa_data.h"
@ -42,11 +45,22 @@ struct ipa_interconnect {
u32 peak_bandwidth;
};
/**
* enum ipa_power_flag - IPA power flags
* @IPA_POWER_FLAG_RESUMED: Whether resume from suspend has been signaled
* @IPA_POWER_FLAG_COUNT: Number of defined power flags
*/
enum ipa_power_flag {
IPA_POWER_FLAG_RESUMED,
IPA_POWER_FLAG_COUNT, /* Last; not a flag */
};
/**
* struct ipa_clock - IPA clocking information
* @count: Clocking reference count
* @mutex: Protects clock enable/disable
* @core: IPA core clock
* @flags: Boolean state flags
* @interconnect_count: Number of elements in interconnect[]
* @interconnect: Interconnect array
*/
@ -54,6 +68,7 @@ struct ipa_clock {
refcount_t count;
struct mutex mutex; /* protects clock enable/disable */
struct clk *core;
DECLARE_BITMAP(flags, IPA_POWER_FLAG_COUNT);
u32 interconnect_count;
struct ipa_interconnect *interconnect;
};
@ -144,8 +159,12 @@ static int ipa_interconnect_enable(struct ipa *ipa)
ret = icc_set_bw(interconnect->path,
interconnect->average_bandwidth,
interconnect->peak_bandwidth);
if (ret)
if (ret) {
dev_err(&ipa->pdev->dev,
"error %d enabling %s interconnect\n",
ret, icc_get_name(interconnect->path));
goto out_unwind;
}
interconnect++;
}
@ -159,10 +178,11 @@ out_unwind:
}
/* To disable an interconnect, we just its bandwidth to 0 */
static void ipa_interconnect_disable(struct ipa *ipa)
static int ipa_interconnect_disable(struct ipa *ipa)
{
struct ipa_interconnect *interconnect;
struct ipa_clock *clock = ipa->clock;
struct device *dev = &ipa->pdev->dev;
int result = 0;
u32 count;
int ret;
@ -172,13 +192,16 @@ static void ipa_interconnect_disable(struct ipa *ipa)
while (count--) {
interconnect--;
ret = icc_set_bw(interconnect->path, 0, 0);
if (ret && !result)
result = ret;
if (ret) {
dev_err(dev, "error %d disabling %s interconnect\n",
ret, icc_get_name(interconnect->path));
/* Try to disable all; record only the first error */
if (!result)
result = ret;
}
}
if (result)
dev_err(&ipa->pdev->dev,
"error %d disabling IPA interconnects\n", ret);
return result;
}
/* Turn on IPA clocks, including interconnects */
@ -191,8 +214,10 @@ static int ipa_clock_enable(struct ipa *ipa)
return ret;
ret = clk_prepare_enable(ipa->clock->core);
if (ret)
ipa_interconnect_disable(ipa);
if (ret) {
dev_err(&ipa->pdev->dev, "error %d enabling core clock\n", ret);
(void)ipa_interconnect_disable(ipa);
}
return ret;
}
@ -201,7 +226,7 @@ static int ipa_clock_enable(struct ipa *ipa)
static void ipa_clock_disable(struct ipa *ipa)
{
clk_disable_unprepare(ipa->clock->core);
ipa_interconnect_disable(ipa);
(void)ipa_interconnect_disable(ipa);
}
/* Get an IPA clock reference, but only if the reference count is
@ -238,13 +263,8 @@ void ipa_clock_get(struct ipa *ipa)
goto out_mutex_unlock;
ret = ipa_clock_enable(ipa);
if (ret) {
dev_err(&ipa->pdev->dev, "error %d enabling IPA clock\n", ret);
goto out_mutex_unlock;
}
refcount_set(&clock->count, 1);
if (!ret)
refcount_set(&clock->count, 1);
out_mutex_unlock:
mutex_unlock(&clock->mutex);
}
@ -271,6 +291,40 @@ u32 ipa_clock_rate(struct ipa *ipa)
return ipa->clock ? (u32)clk_get_rate(ipa->clock->core) : 0;
}
/**
* ipa_suspend_handler() - Handle the suspend IPA interrupt
* @ipa: IPA pointer
* @irq_id: IPA interrupt type (unused)
*
* If an RX endpoint is suspended, and the IPA has a packet destined for
* that endpoint, the IPA generates a SUSPEND interrupt to inform the AP
* that it should resume the endpoint. If we get one of these interrupts
* we just wake up the system.
*/
static void ipa_suspend_handler(struct ipa *ipa, enum ipa_irq_id irq_id)
{
/* Just report the event, and let system resume handle the rest.
* More than one endpoint could signal this; if so, ignore
* all but the first.
*/
if (!test_and_set_bit(IPA_POWER_FLAG_RESUMED, ipa->clock->flags))
pm_wakeup_dev_event(&ipa->pdev->dev, 0, true);
/* Acknowledge/clear the suspend interrupt on all endpoints */
ipa_interrupt_suspend_clear_all(ipa->interrupt);
}
void ipa_power_setup(struct ipa *ipa)
{
ipa_interrupt_add(ipa->interrupt, IPA_IRQ_TX_SUSPEND,
ipa_suspend_handler);
}
void ipa_power_teardown(struct ipa *ipa)
{
ipa_interrupt_remove(ipa->interrupt, IPA_IRQ_TX_SUSPEND);
}
/* Initialize IPA clocking */
struct ipa_clock *
ipa_clock_init(struct device *dev, const struct ipa_clock_data *data)
@ -329,3 +383,62 @@ void ipa_clock_exit(struct ipa_clock *clock)
kfree(clock);
clk_put(clk);
}
/**
* ipa_suspend() - Power management system suspend callback
* @dev: IPA device structure
*
* Return: Always returns zero
*
* Called by the PM framework when a system suspend operation is invoked.
* Suspends endpoints and releases the clock reference held to keep
* the IPA clock running until this point.
*/
static int ipa_suspend(struct device *dev)
{
struct ipa *ipa = dev_get_drvdata(dev);
/* Endpoints aren't usable until setup is complete */
if (ipa->setup_complete) {
__clear_bit(IPA_POWER_FLAG_RESUMED, ipa->clock->flags);
ipa_endpoint_suspend(ipa);
gsi_suspend(&ipa->gsi);
}
ipa_clock_put(ipa);
return 0;
}
/**
* ipa_resume() - Power management system resume callback
* @dev: IPA device structure
*
* Return: Always returns 0
*
* Called by the PM framework when a system resume operation is invoked.
* Takes an IPA clock reference to keep the clock running until suspend,
* and resumes endpoints.
*/
static int ipa_resume(struct device *dev)
{
struct ipa *ipa = dev_get_drvdata(dev);
/* This clock reference will keep the IPA out of suspend
* until we get a power management suspend request.
*/
ipa_clock_get(ipa);
/* Endpoints aren't usable until setup is complete */
if (ipa->setup_complete) {
gsi_resume(&ipa->gsi);
ipa_endpoint_resume(ipa);
}
return 0;
}
const struct dev_pm_ops ipa_pm_ops = {
.suspend = ipa_suspend,
.resume = ipa_resume,
};

View File

@ -11,6 +11,9 @@ struct device;
struct ipa;
struct ipa_clock_data;
/* IPA device power management function block */
extern const struct dev_pm_ops ipa_pm_ops;
/**
* ipa_clock_rate() - Return the current IPA core clock rate
* @ipa: IPA structure
@ -19,6 +22,18 @@ struct ipa_clock_data;
*/
u32 ipa_clock_rate(struct ipa *ipa);
/**
* ipa_power_setup() - Set up IPA power management
* @ipa: IPA pointer
*/
void ipa_power_setup(struct ipa *ipa);
/**
* ipa_power_teardown() - Inverse of ipa_power_setup()
* @ipa: IPA pointer
*/
void ipa_power_teardown(struct ipa *ipa);
/**
* ipa_clock_init() - Initialize IPA clocking
* @dev: IPA device

View File

@ -79,29 +79,6 @@
/* Divider for 19.2 MHz crystal oscillator clock to get common timer clock */
#define IPA_XO_CLOCK_DIVIDER 192 /* 1 is subtracted where used */
/**
* ipa_suspend_handler() - Handle the suspend IPA interrupt
* @ipa: IPA pointer
* @irq_id: IPA interrupt type (unused)
*
* If an RX endpoint is in suspend state, and the IPA has a packet
* destined for that endpoint, the IPA generates a SUSPEND interrupt
* to inform the AP that it should resume the endpoint. If we get
* one of these interrupts we just resume everything.
*/
static void ipa_suspend_handler(struct ipa *ipa, enum ipa_irq_id irq_id)
{
/* Just report the event, and let system resume handle the rest.
* More than one endpoint could signal this; if so, ignore
* all but the first.
*/
if (!test_and_set_bit(IPA_FLAG_RESUMED, ipa->flags))
pm_wakeup_dev_event(&ipa->pdev->dev, 0, true);
/* Acknowledge/clear the suspend interrupt on all endpoints */
ipa_interrupt_suspend_clear_all(ipa->interrupt);
}
/**
* ipa_setup() - Set up IPA hardware
* @ipa: IPA pointer
@ -124,12 +101,11 @@ int ipa_setup(struct ipa *ipa)
if (ret)
return ret;
ipa_interrupt_add(ipa->interrupt, IPA_IRQ_TX_SUSPEND,
ipa_suspend_handler);
ipa_power_setup(ipa);
ret = device_init_wakeup(dev, true);
if (ret)
goto err_interrupt_remove;
goto err_gsi_teardown;
ipa_endpoint_setup(ipa);
@ -177,9 +153,9 @@ err_command_disable:
ipa_endpoint_disable_one(command_endpoint);
err_endpoint_teardown:
ipa_endpoint_teardown(ipa);
ipa_power_teardown(ipa);
(void)device_init_wakeup(dev, false);
err_interrupt_remove:
ipa_interrupt_remove(ipa->interrupt, IPA_IRQ_TX_SUSPEND);
err_gsi_teardown:
gsi_teardown(&ipa->gsi);
return ret;
@ -204,8 +180,8 @@ static void ipa_teardown(struct ipa *ipa)
command_endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX];
ipa_endpoint_disable_one(command_endpoint);
ipa_endpoint_teardown(ipa);
ipa_power_teardown(ipa);
(void)device_init_wakeup(&ipa->pdev->dev, false);
ipa_interrupt_remove(ipa->interrupt, IPA_IRQ_TX_SUSPEND);
gsi_teardown(&ipa->gsi);
}
@ -474,7 +450,7 @@ static int ipa_config(struct ipa *ipa, const struct ipa_data *data)
ret = ipa_endpoint_config(ipa);
if (ret)
goto err_interrupt_deconfig;
goto err_uc_deconfig;
ipa_table_config(ipa); /* No deconfig required */
@ -491,7 +467,7 @@ static int ipa_config(struct ipa *ipa, const struct ipa_data *data)
err_endpoint_deconfig:
ipa_endpoint_deconfig(ipa);
err_interrupt_deconfig:
err_uc_deconfig:
ipa_uc_deconfig(ipa);
ipa_interrupt_deconfig(ipa->interrupt);
ipa->interrupt = NULL;
@ -874,65 +850,6 @@ static void ipa_shutdown(struct platform_device *pdev)
dev_err(&pdev->dev, "shutdown: remove returned %d\n", ret);
}
/**
* ipa_suspend() - Power management system suspend callback
* @dev: IPA device structure
*
* Return: Always returns zero
*
* Called by the PM framework when a system suspend operation is invoked.
* Suspends endpoints and releases the clock reference held to keep
* the IPA clock running until this point.
*/
static int ipa_suspend(struct device *dev)
{
struct ipa *ipa = dev_get_drvdata(dev);
/* Endpoints aren't usable until setup is complete */
if (ipa->setup_complete) {
__clear_bit(IPA_FLAG_RESUMED, ipa->flags);
ipa_endpoint_suspend(ipa);
gsi_suspend(&ipa->gsi);
}
ipa_clock_put(ipa);
return 0;
}
/**
* ipa_resume() - Power management system resume callback
* @dev: IPA device structure
*
* Return: Always returns 0
*
* Called by the PM framework when a system resume operation is invoked.
* Takes an IPA clock reference to keep the clock running until suspend,
* and resumes endpoints.
*/
static int ipa_resume(struct device *dev)
{
struct ipa *ipa = dev_get_drvdata(dev);
/* This clock reference will keep the IPA out of suspend
* until we get a power management suspend request.
*/
ipa_clock_get(ipa);
/* Endpoints aren't usable until setup is complete */
if (ipa->setup_complete) {
gsi_resume(&ipa->gsi);
ipa_endpoint_resume(ipa);
}
return 0;
}
static const struct dev_pm_ops ipa_pm_ops = {
.suspend = ipa_suspend,
.resume = ipa_resume,
};
static const struct attribute_group *ipa_attribute_groups[] = {
&ipa_attribute_group,
&ipa_feature_attribute_group,

View File

@ -178,6 +178,9 @@ void ipa_modem_suspend(struct net_device *netdev)
struct ipa_priv *priv = netdev_priv(netdev);
struct ipa *ipa = priv->ipa;
if (!(netdev->flags & IFF_UP))
return;
netif_stop_queue(netdev);
ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]);
@ -194,6 +197,9 @@ void ipa_modem_resume(struct net_device *netdev)
struct ipa_priv *priv = netdev_priv(netdev);
struct ipa *ipa = priv->ipa;
if (!(netdev->flags & IFF_UP))
return;
ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]);
ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]);
@ -225,13 +231,15 @@ int ipa_modem_start(struct ipa *ipa)
SET_NETDEV_DEV(netdev, &ipa->pdev->dev);
priv = netdev_priv(netdev);
priv->ipa = ipa;
ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]->netdev = netdev;
ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]->netdev = netdev;
ipa->modem_netdev = netdev;
ret = register_netdev(netdev);
if (!ret) {
ipa->modem_netdev = netdev;
ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]->netdev = netdev;
ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]->netdev = netdev;
} else {
if (ret) {
ipa->modem_netdev = NULL;
ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]->netdev = NULL;
ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]->netdev = NULL;
free_netdev(netdev);
}
@ -265,13 +273,15 @@ int ipa_modem_stop(struct ipa *ipa)
/* Prevent the modem from triggering a call to ipa_setup() */
ipa_smp2p_disable(ipa);
/* Stop the queue and disable the endpoints if it's open */
/* Clean up the netdev and endpoints if it was started */
if (netdev) {
(void)ipa_stop(netdev);
/* If it was opened, stop it first */
if (netdev->flags & IFF_UP)
(void)ipa_stop(netdev);
unregister_netdev(netdev);
ipa->modem_netdev = NULL;
ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]->netdev = NULL;
ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]->netdev = NULL;
ipa->modem_netdev = NULL;
unregister_netdev(netdev);
free_netdev(netdev);
}