Merge branch 'icc-rpm' into icc-next

This patch set is to support bucket in icc-rpm driver, so it implements
the similar mechanism in the icc-rpmh driver.

It uses interconnect path tag to indicate the bandwidth voting is for
which buckets, and there have three kinds of buckets: AWC, WAKE and
SLEEP, finally the wake and sleep bucket values are used to set the
corresponding clock (active and sleep clocks).  So far, we keep the AWC
bucket but doesn't really use it.

Link: https://lore.kernel.org/r/20220712015929.2789881-1-leo.yan@linaro.org
Signed-off-by: Georgi Djakov <djakov@kernel.org>
This commit is contained in:
Georgi Djakov 2022-07-15 17:56:31 +03:00
commit 009c963eef
9 changed files with 207 additions and 55 deletions

View File

@ -45,7 +45,11 @@ properties:
- qcom,sdm660-snoc
'#interconnect-cells':
const: 1
description: |
Value: <1> is one cell in an interconnect specifier for the
interconnect node id, <2> requires the interconnect node id and an
extra path tag.
enum: [ 1, 2 ]
clocks:
minItems: 2

View File

@ -1,5 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_INTERCONNECT_QCOM) += interconnect_qcom.o
interconnect_qcom-y := icc-common.o
icc-bcm-voter-objs := bcm-voter.o
qnoc-msm8916-objs := msm8916.o
qnoc-msm8939-objs := msm8939.o

View File

@ -0,0 +1,34 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2022 Linaro Ltd.
*/
#include <linux/of.h>
#include <linux/slab.h>
#include "icc-common.h"
struct icc_node_data *qcom_icc_xlate_extended(struct of_phandle_args *spec, void *data)
{
struct icc_node_data *ndata;
struct icc_node *node;
node = of_icc_xlate_onecell(spec, data);
if (IS_ERR(node))
return ERR_CAST(node);
ndata = kzalloc(sizeof(*ndata), GFP_KERNEL);
if (!ndata)
return ERR_PTR(-ENOMEM);
ndata->node = node;
if (spec->args_count == 2)
ndata->tag = spec->args[1];
if (spec->args_count > 2)
pr_warn("%pOF: Too many arguments, path tag is not parsed\n", spec->np);
return ndata;
}
EXPORT_SYMBOL_GPL(qcom_icc_xlate_extended);

View File

@ -0,0 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2022 Linaro Ltd.
*/
#ifndef __DRIVERS_INTERCONNECT_QCOM_ICC_COMMON_H__
#define __DRIVERS_INTERCONNECT_QCOM_ICC_COMMON_H__
#include <linux/interconnect-provider.h>
struct icc_node_data *qcom_icc_xlate_extended(struct of_phandle_args *spec, void *data);
#endif

View File

@ -16,6 +16,7 @@
#include <linux/slab.h>
#include "smd-rpm.h"
#include "icc-common.h"
#include "icc-rpm.h"
/* QNOC QoS */
@ -233,29 +234,10 @@ static int qcom_icc_rpm_set(int mas_rpm_id, int slv_rpm_id, u64 sum_bw)
return ret;
}
static int qcom_icc_set(struct icc_node *src, struct icc_node *dst)
static int __qcom_icc_set(struct icc_node *n, struct qcom_icc_node *qn,
u64 sum_bw)
{
struct qcom_icc_provider *qp;
struct qcom_icc_node *qn;
struct icc_provider *provider;
struct icc_node *n;
u64 sum_bw;
u64 max_peak_bw;
u64 rate;
u32 agg_avg = 0;
u32 agg_peak = 0;
int ret, i;
qn = src->data;
provider = src->provider;
qp = to_qcom_provider(provider);
list_for_each_entry(n, &provider->nodes, node_list)
provider->aggregate(n, 0, n->avg_bw, n->peak_bw,
&agg_avg, &agg_peak);
sum_bw = icc_units_to_bps(agg_avg);
max_peak_bw = icc_units_to_bps(agg_peak);
int ret;
if (!qn->qos.ap_owned) {
/* send bandwidth request message to the RPM processor */
@ -264,17 +246,150 @@ static int qcom_icc_set(struct icc_node *src, struct icc_node *dst)
return ret;
} else if (qn->qos.qos_mode != -1) {
/* set bandwidth directly from the AP */
ret = qcom_icc_qos_set(src, sum_bw);
ret = qcom_icc_qos_set(n, sum_bw);
if (ret)
return ret;
}
rate = max(sum_bw, max_peak_bw);
return 0;
}
do_div(rate, qn->buswidth);
rate = min_t(u64, rate, LONG_MAX);
/**
* qcom_icc_pre_bw_aggregate - cleans up values before re-aggregate requests
* @node: icc node to operate on
*/
static void qcom_icc_pre_bw_aggregate(struct icc_node *node)
{
struct qcom_icc_node *qn;
size_t i;
qn = node->data;
for (i = 0; i < QCOM_ICC_NUM_BUCKETS; i++) {
qn->sum_avg[i] = 0;
qn->max_peak[i] = 0;
}
}
/**
* qcom_icc_bw_aggregate - aggregate bw for buckets indicated by tag
* @node: node to aggregate
* @tag: tag to indicate which buckets to aggregate
* @avg_bw: new bw to sum aggregate
* @peak_bw: new bw to max aggregate
* @agg_avg: existing aggregate avg bw val
* @agg_peak: existing aggregate peak bw val
*/
static int qcom_icc_bw_aggregate(struct icc_node *node, u32 tag, u32 avg_bw,
u32 peak_bw, u32 *agg_avg, u32 *agg_peak)
{
size_t i;
struct qcom_icc_node *qn;
qn = node->data;
if (!tag)
tag = QCOM_ICC_TAG_ALWAYS;
for (i = 0; i < QCOM_ICC_NUM_BUCKETS; i++) {
if (tag & BIT(i)) {
qn->sum_avg[i] += avg_bw;
qn->max_peak[i] = max_t(u32, qn->max_peak[i], peak_bw);
}
}
*agg_avg += avg_bw;
*agg_peak = max_t(u32, *agg_peak, peak_bw);
return 0;
}
/**
* qcom_icc_bus_aggregate - aggregate bandwidth by traversing all nodes
* @provider: generic interconnect provider
* @agg_avg: an array for aggregated average bandwidth of buckets
* @agg_peak: an array for aggregated peak bandwidth of buckets
* @max_agg_avg: pointer to max value of aggregated average bandwidth
*/
static void qcom_icc_bus_aggregate(struct icc_provider *provider,
u64 *agg_avg, u64 *agg_peak,
u64 *max_agg_avg)
{
struct icc_node *node;
struct qcom_icc_node *qn;
int i;
/* Initialise aggregate values */
for (i = 0; i < QCOM_ICC_NUM_BUCKETS; i++) {
agg_avg[i] = 0;
agg_peak[i] = 0;
}
*max_agg_avg = 0;
/*
* Iterate nodes on the interconnect and aggregate bandwidth
* requests for every bucket.
*/
list_for_each_entry(node, &provider->nodes, node_list) {
qn = node->data;
for (i = 0; i < QCOM_ICC_NUM_BUCKETS; i++) {
agg_avg[i] += qn->sum_avg[i];
agg_peak[i] = max_t(u64, agg_peak[i], qn->max_peak[i]);
}
}
/* Find maximum values across all buckets */
for (i = 0; i < QCOM_ICC_NUM_BUCKETS; i++)
*max_agg_avg = max_t(u64, *max_agg_avg, agg_avg[i]);
}
static int qcom_icc_set(struct icc_node *src, struct icc_node *dst)
{
struct qcom_icc_provider *qp;
struct qcom_icc_node *src_qn = NULL, *dst_qn = NULL;
struct icc_provider *provider;
u64 sum_bw;
u64 rate;
u64 agg_avg[QCOM_ICC_NUM_BUCKETS], agg_peak[QCOM_ICC_NUM_BUCKETS];
u64 max_agg_avg;
int ret, i;
int bucket;
src_qn = src->data;
if (dst)
dst_qn = dst->data;
provider = src->provider;
qp = to_qcom_provider(provider);
qcom_icc_bus_aggregate(provider, agg_avg, agg_peak, &max_agg_avg);
sum_bw = icc_units_to_bps(max_agg_avg);
ret = __qcom_icc_set(src, src_qn, sum_bw);
if (ret)
return ret;
if (dst_qn) {
ret = __qcom_icc_set(dst, dst_qn, sum_bw);
if (ret)
return ret;
}
for (i = 0; i < qp->num_clks; i++) {
/*
* Use WAKE bucket for active clock, otherwise, use SLEEP bucket
* for other clocks. If a platform doesn't set interconnect
* path tags, by default use sleep bucket for all clocks.
*
* Note, AMC bucket is not supported yet.
*/
if (!strcmp(qp->bus_clks[i].id, "bus_a"))
bucket = QCOM_ICC_BUCKET_WAKE;
else
bucket = QCOM_ICC_BUCKET_SLEEP;
rate = icc_units_to_bps(max(agg_avg[bucket], agg_peak[bucket]));
do_div(rate, src_qn->buswidth);
rate = min_t(u64, rate, LONG_MAX);
if (qp->bus_clk_rate[i] == rate)
continue;
@ -394,8 +509,9 @@ regmap_done:
INIT_LIST_HEAD(&provider->nodes);
provider->dev = dev;
provider->set = qcom_icc_set;
provider->aggregate = icc_std_aggregate;
provider->xlate = of_icc_xlate_onecell;
provider->pre_aggregate = qcom_icc_pre_bw_aggregate;
provider->aggregate = qcom_icc_bw_aggregate;
provider->xlate_extended = qcom_icc_xlate_extended;
provider->data = data;
ret = icc_provider_add(provider);

View File

@ -6,6 +6,8 @@
#ifndef __DRIVERS_INTERCONNECT_QCOM_ICC_RPM_H
#define __DRIVERS_INTERCONNECT_QCOM_ICC_RPM_H
#include <dt-bindings/interconnect/qcom,icc.h>
#define RPM_BUS_MASTER_REQ 0x73616d62
#define RPM_BUS_SLAVE_REQ 0x766c7362
@ -65,6 +67,8 @@ struct qcom_icc_qos {
* @links: an array of nodes where we can go next while traversing
* @num_links: the total number of @links
* @buswidth: width of the interconnect between a node and the bus (bytes)
* @sum_avg: current sum aggregate value of all avg bw requests
* @max_peak: current max aggregate value of all peak bw requests
* @mas_rpm_id: RPM id for devices that are bus masters
* @slv_rpm_id: RPM id for devices that are bus slaves
* @qos: NoC QoS setting parameters
@ -75,6 +79,8 @@ struct qcom_icc_node {
const u16 *links;
u16 num_links;
u16 buswidth;
u64 sum_avg[QCOM_ICC_NUM_BUCKETS];
u64 max_peak[QCOM_ICC_NUM_BUCKETS];
int mas_rpm_id;
int slv_rpm_id;
struct qcom_icc_qos qos;

View File

@ -11,6 +11,7 @@
#include <linux/slab.h>
#include "bcm-voter.h"
#include "icc-common.h"
#include "icc-rpmh.h"
/**
@ -100,31 +101,6 @@ int qcom_icc_set(struct icc_node *src, struct icc_node *dst)
}
EXPORT_SYMBOL_GPL(qcom_icc_set);
struct icc_node_data *qcom_icc_xlate_extended(struct of_phandle_args *spec, void *data)
{
struct icc_node_data *ndata;
struct icc_node *node;
node = of_icc_xlate_onecell(spec, data);
if (IS_ERR(node))
return ERR_CAST(node);
ndata = kzalloc(sizeof(*ndata), GFP_KERNEL);
if (!ndata)
return ERR_PTR(-ENOMEM);
ndata->node = node;
if (spec->args_count == 2)
ndata->tag = spec->args[1];
if (spec->args_count > 2)
pr_warn("%pOF: Too many arguments, path tag is not parsed\n", spec->np);
return ndata;
}
EXPORT_SYMBOL_GPL(qcom_icc_xlate_extended);
/**
* qcom_icc_bcm_init - populates bcm aux data and connect qnodes
* @bcm: bcm to be initialized

View File

@ -131,7 +131,6 @@ struct qcom_icc_desc {
int qcom_icc_aggregate(struct icc_node *node, u32 tag, u32 avg_bw,
u32 peak_bw, u32 *agg_avg, u32 *agg_peak);
int qcom_icc_set(struct icc_node *src, struct icc_node *dst);
struct icc_node_data *qcom_icc_xlate_extended(struct of_phandle_args *spec, void *data);
int qcom_icc_bcm_init(struct qcom_icc_bcm *bcm, struct device *dev);
void qcom_icc_pre_aggregate(struct icc_node *node);
int qcom_icc_rpmh_probe(struct platform_device *pdev);

View File

@ -12,6 +12,7 @@
#include <dt-bindings/interconnect/qcom,sm8450.h>
#include "bcm-voter.h"
#include "icc-common.h"
#include "icc-rpmh.h"
#include "sm8450.h"