Arm SMMU updates for 5.15

- SMMUv3
 
   * Minor optimisation to avoid zeroing struct members on CMD submission
 
   * Increased use of batched commands to reduce submission latency
 
   * Refactoring in preparation for ECMDQ support
 
 - SMMUv2
 
   * Fix races when probing devices with identical StreamIDs
 
   * Optimise walk cache flushing for Qualcomm implementations
 
   * Allow deep sleep states for some Qualcomm SoCs with shared clocks
 -----BEGIN PGP SIGNATURE-----
 
 iQFEBAABCgAuFiEEPxTL6PPUbjXGY88ct6xw3ITBYzQFAmEWlToQHHdpbGxAa2Vy
 bmVsLm9yZwAKCRC3rHDchMFjNGdHB/9iz9jooiYm2LcK1BKiKAnr4TRr80eI8/xu
 sbj3khN+faN2M1n05up8FHSKGa7973Bu8wXBvKgUH5gFFWoiKBUoTYWoZKu4Sxre
 2WfSRrsZ9yQd6+exK80ex9wkkLrK0kXYJrUzVaicQFCz6pKRvnj77+8qmzTCJDeV
 18JuA3Y5nonwxaBfmToHj4hoPZi6jTiR0Q/MMG88FfR4Bv46Ef6cpv+EEgL3PNmd
 kstRBbha8Dketo4VXiHSGHZ9sn6CqYbZ0IkTdctrJbApI2DZW0H9SlBYKJSodXB3
 4pNJADQZXiot79CtXEjiGNunBBFJUnJTFEGdmaFfOAJqTmW38mjV
 =9PBi
 -----END PGP SIGNATURE-----

Merge tag 'arm-smmu-updates' of git://git.kernel.org/pub/scm/linux/kernel/git/will/linux into arm/smmu

Arm SMMU updates for 5.15

- SMMUv3

  * Minor optimisation to avoid zeroing struct members on CMD submission

  * Increased use of batched commands to reduce submission latency

  * Refactoring in preparation for ECMDQ support

- SMMUv2

  * Fix races when probing devices with identical StreamIDs

  * Optimise walk cache flushing for Qualcomm implementations

  * Allow deep sleep states for some Qualcomm SoCs with shared clocks
This commit is contained in:
Joerg Roedel 2021-08-18 13:30:35 +02:00
commit af1d321c2e
5 changed files with 108 additions and 44 deletions

View File

@ -335,10 +335,14 @@ static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent)
return 0;
}
static void arm_smmu_cmdq_build_sync_cmd(u64 *cmd, struct arm_smmu_device *smmu,
u32 prod)
static struct arm_smmu_cmdq *arm_smmu_get_cmdq(struct arm_smmu_device *smmu)
{
return &smmu->cmdq;
}
static void arm_smmu_cmdq_build_sync_cmd(u64 *cmd, struct arm_smmu_device *smmu,
struct arm_smmu_queue *q, u32 prod)
{
struct arm_smmu_queue *q = &smmu->cmdq.q;
struct arm_smmu_cmdq_ent ent = {
.opcode = CMDQ_OP_CMD_SYNC,
};
@ -355,7 +359,8 @@ static void arm_smmu_cmdq_build_sync_cmd(u64 *cmd, struct arm_smmu_device *smmu,
arm_smmu_cmdq_build_cmd(cmd, &ent);
}
static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu)
static void __arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu,
struct arm_smmu_queue *q)
{
static const char * const cerror_str[] = {
[CMDQ_ERR_CERROR_NONE_IDX] = "No error",
@ -366,7 +371,6 @@ static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu)
int i;
u64 cmd[CMDQ_ENT_DWORDS];
struct arm_smmu_queue *q = &smmu->cmdq.q;
u32 cons = readl_relaxed(q->cons_reg);
u32 idx = FIELD_GET(CMDQ_CONS_ERR, cons);
struct arm_smmu_cmdq_ent cmd_sync = {
@ -413,6 +417,11 @@ static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu)
queue_write(Q_ENT(q, cons), cmd, q->ent_dwords);
}
static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu)
{
__arm_smmu_cmdq_skip_err(smmu, &smmu->cmdq.q);
}
/*
* Command queue locking.
* This is a form of bastardised rwlock with the following major changes:
@ -579,7 +588,7 @@ static int arm_smmu_cmdq_poll_until_not_full(struct arm_smmu_device *smmu,
{
unsigned long flags;
struct arm_smmu_queue_poll qp;
struct arm_smmu_cmdq *cmdq = &smmu->cmdq;
struct arm_smmu_cmdq *cmdq = arm_smmu_get_cmdq(smmu);
int ret = 0;
/*
@ -595,7 +604,7 @@ static int arm_smmu_cmdq_poll_until_not_full(struct arm_smmu_device *smmu,
queue_poll_init(smmu, &qp);
do {
llq->val = READ_ONCE(smmu->cmdq.q.llq.val);
llq->val = READ_ONCE(cmdq->q.llq.val);
if (!queue_full(llq))
break;
@ -614,7 +623,7 @@ static int __arm_smmu_cmdq_poll_until_msi(struct arm_smmu_device *smmu,
{
int ret = 0;
struct arm_smmu_queue_poll qp;
struct arm_smmu_cmdq *cmdq = &smmu->cmdq;
struct arm_smmu_cmdq *cmdq = arm_smmu_get_cmdq(smmu);
u32 *cmd = (u32 *)(Q_ENT(&cmdq->q, llq->prod));
queue_poll_init(smmu, &qp);
@ -637,12 +646,12 @@ static int __arm_smmu_cmdq_poll_until_consumed(struct arm_smmu_device *smmu,
struct arm_smmu_ll_queue *llq)
{
struct arm_smmu_queue_poll qp;
struct arm_smmu_cmdq *cmdq = &smmu->cmdq;
struct arm_smmu_cmdq *cmdq = arm_smmu_get_cmdq(smmu);
u32 prod = llq->prod;
int ret = 0;
queue_poll_init(smmu, &qp);
llq->val = READ_ONCE(smmu->cmdq.q.llq.val);
llq->val = READ_ONCE(cmdq->q.llq.val);
do {
if (queue_consumed(llq, prod))
break;
@ -732,12 +741,12 @@ static int arm_smmu_cmdq_issue_cmdlist(struct arm_smmu_device *smmu,
u32 prod;
unsigned long flags;
bool owner;
struct arm_smmu_cmdq *cmdq = &smmu->cmdq;
struct arm_smmu_ll_queue llq = {
.max_n_shift = cmdq->q.llq.max_n_shift,
}, head = llq;
struct arm_smmu_cmdq *cmdq = arm_smmu_get_cmdq(smmu);
struct arm_smmu_ll_queue llq, head;
int ret = 0;
llq.max_n_shift = cmdq->q.llq.max_n_shift;
/* 1. Allocate some space in the queue */
local_irq_save(flags);
llq.val = READ_ONCE(cmdq->q.llq.val);
@ -772,7 +781,7 @@ static int arm_smmu_cmdq_issue_cmdlist(struct arm_smmu_device *smmu,
arm_smmu_cmdq_write_entries(cmdq, cmds, llq.prod, n);
if (sync) {
prod = queue_inc_prod_n(&llq, n);
arm_smmu_cmdq_build_sync_cmd(cmd_sync, smmu, prod);
arm_smmu_cmdq_build_sync_cmd(cmd_sync, smmu, &cmdq->q, prod);
queue_write(Q_ENT(&cmdq->q, prod), cmd_sync, CMDQ_ENT_DWORDS);
/*
@ -845,8 +854,9 @@ static int arm_smmu_cmdq_issue_cmdlist(struct arm_smmu_device *smmu,
return ret;
}
static int arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu,
struct arm_smmu_cmdq_ent *ent)
static int __arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu,
struct arm_smmu_cmdq_ent *ent,
bool sync)
{
u64 cmd[CMDQ_ENT_DWORDS];
@ -856,12 +866,19 @@ static int arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu,
return -EINVAL;
}
return arm_smmu_cmdq_issue_cmdlist(smmu, cmd, 1, false);
return arm_smmu_cmdq_issue_cmdlist(smmu, cmd, 1, sync);
}
static int arm_smmu_cmdq_issue_sync(struct arm_smmu_device *smmu)
static int arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu,
struct arm_smmu_cmdq_ent *ent)
{
return arm_smmu_cmdq_issue_cmdlist(smmu, NULL, 0, true);
return __arm_smmu_cmdq_issue_cmd(smmu, ent, false);
}
static int arm_smmu_cmdq_issue_cmd_with_sync(struct arm_smmu_device *smmu,
struct arm_smmu_cmdq_ent *ent)
{
return __arm_smmu_cmdq_issue_cmd(smmu, ent, true);
}
static void arm_smmu_cmdq_batch_add(struct arm_smmu_device *smmu,
@ -929,8 +946,7 @@ void arm_smmu_tlb_inv_asid(struct arm_smmu_device *smmu, u16 asid)
.tlbi.asid = asid,
};
arm_smmu_cmdq_issue_cmd(smmu, &cmd);
arm_smmu_cmdq_issue_sync(smmu);
arm_smmu_cmdq_issue_cmd_with_sync(smmu, &cmd);
}
static void arm_smmu_sync_cd(struct arm_smmu_domain *smmu_domain,
@ -939,7 +955,7 @@ static void arm_smmu_sync_cd(struct arm_smmu_domain *smmu_domain,
size_t i;
unsigned long flags;
struct arm_smmu_master *master;
struct arm_smmu_cmdq_batch cmds = {};
struct arm_smmu_cmdq_batch cmds;
struct arm_smmu_device *smmu = smmu_domain->smmu;
struct arm_smmu_cmdq_ent cmd = {
.opcode = CMDQ_OP_CFGI_CD,
@ -949,6 +965,8 @@ static void arm_smmu_sync_cd(struct arm_smmu_domain *smmu_domain,
},
};
cmds.num = 0;
spin_lock_irqsave(&smmu_domain->devices_lock, flags);
list_for_each_entry(master, &smmu_domain->devices, domain_head) {
for (i = 0; i < master->num_streams; i++) {
@ -1211,8 +1229,7 @@ static void arm_smmu_sync_ste_for_sid(struct arm_smmu_device *smmu, u32 sid)
},
};
arm_smmu_cmdq_issue_cmd(smmu, &cmd);
arm_smmu_cmdq_issue_sync(smmu);
arm_smmu_cmdq_issue_cmd_with_sync(smmu, &cmd);
}
static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
@ -1747,15 +1764,16 @@ static int arm_smmu_atc_inv_master(struct arm_smmu_master *master)
{
int i;
struct arm_smmu_cmdq_ent cmd;
struct arm_smmu_cmdq_batch cmds = {};
arm_smmu_atc_inv_to_cmd(0, 0, 0, &cmd);
for (i = 0; i < master->num_streams; i++) {
cmd.atc.sid = master->streams[i].id;
arm_smmu_cmdq_issue_cmd(master->smmu, &cmd);
arm_smmu_cmdq_batch_add(master->smmu, &cmds, &cmd);
}
return arm_smmu_cmdq_issue_sync(master->smmu);
return arm_smmu_cmdq_batch_submit(master->smmu, &cmds);
}
int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain, int ssid,
@ -1765,7 +1783,7 @@ int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain, int ssid,
unsigned long flags;
struct arm_smmu_cmdq_ent cmd;
struct arm_smmu_master *master;
struct arm_smmu_cmdq_batch cmds = {};
struct arm_smmu_cmdq_batch cmds;
if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_ATS))
return 0;
@ -1789,6 +1807,8 @@ int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain, int ssid,
arm_smmu_atc_inv_to_cmd(ssid, iova, size, &cmd);
cmds.num = 0;
spin_lock_irqsave(&smmu_domain->devices_lock, flags);
list_for_each_entry(master, &smmu_domain->devices, domain_head) {
if (!master->ats_enabled)
@ -1823,8 +1843,7 @@ static void arm_smmu_tlb_inv_context(void *cookie)
} else {
cmd.opcode = CMDQ_OP_TLBI_S12_VMALL;
cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid;
arm_smmu_cmdq_issue_cmd(smmu, &cmd);
arm_smmu_cmdq_issue_sync(smmu);
arm_smmu_cmdq_issue_cmd_with_sync(smmu, &cmd);
}
arm_smmu_atc_inv_domain(smmu_domain, 0, 0, 0);
}
@ -1837,7 +1856,7 @@ static void __arm_smmu_tlb_inv_range(struct arm_smmu_cmdq_ent *cmd,
struct arm_smmu_device *smmu = smmu_domain->smmu;
unsigned long end = iova + size, num_pages = 0, tg = 0;
size_t inv_range = granule;
struct arm_smmu_cmdq_batch cmds = {};
struct arm_smmu_cmdq_batch cmds;
if (!size)
return;
@ -1855,6 +1874,8 @@ static void __arm_smmu_tlb_inv_range(struct arm_smmu_cmdq_ent *cmd,
num_pages = size >> tg;
}
cmds.num = 0;
while (iova < end) {
if (smmu->features & ARM_SMMU_FEAT_RANGE_INV) {
/*
@ -3338,18 +3359,16 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass)
/* Invalidate any cached configuration */
cmd.opcode = CMDQ_OP_CFGI_ALL;
arm_smmu_cmdq_issue_cmd(smmu, &cmd);
arm_smmu_cmdq_issue_sync(smmu);
arm_smmu_cmdq_issue_cmd_with_sync(smmu, &cmd);
/* Invalidate any stale TLB entries */
if (smmu->features & ARM_SMMU_FEAT_HYP) {
cmd.opcode = CMDQ_OP_TLBI_EL2_ALL;
arm_smmu_cmdq_issue_cmd(smmu, &cmd);
arm_smmu_cmdq_issue_cmd_with_sync(smmu, &cmd);
}
cmd.opcode = CMDQ_OP_TLBI_NSNH_ALL;
arm_smmu_cmdq_issue_cmd(smmu, &cmd);
arm_smmu_cmdq_issue_sync(smmu);
arm_smmu_cmdq_issue_cmd_with_sync(smmu, &cmd);
/* Event queue */
writeq_relaxed(smmu->evtq.q.q_base, smmu->base + ARM_SMMU_EVTQ_BASE);

View File

@ -193,6 +193,8 @@ static int qcom_adreno_smmu_init_context(struct arm_smmu_domain *smmu_domain,
{
struct adreno_smmu_priv *priv;
smmu_domain->cfg.flush_walk_prefer_tlbiasid = true;
/* Only enable split pagetables for the GPU device (SID 0) */
if (!qcom_adreno_smmu_is_gpu_device(dev))
return 0;
@ -235,6 +237,14 @@ static const struct of_device_id qcom_smmu_client_of_match[] __maybe_unused = {
{ }
};
static int qcom_smmu_init_context(struct arm_smmu_domain *smmu_domain,
struct io_pgtable_cfg *pgtbl_cfg, struct device *dev)
{
smmu_domain->cfg.flush_walk_prefer_tlbiasid = true;
return 0;
}
static int qcom_smmu_cfg_probe(struct arm_smmu_device *smmu)
{
unsigned int last_s2cr = ARM_SMMU_GR0_S2CR(smmu->num_mapping_groups - 1);
@ -358,6 +368,7 @@ static int qcom_smmu500_reset(struct arm_smmu_device *smmu)
}
static const struct arm_smmu_impl qcom_smmu_impl = {
.init_context = qcom_smmu_init_context,
.cfg_probe = qcom_smmu_cfg_probe,
.def_domain_type = qcom_smmu_def_domain_type,
.reset = qcom_smmu500_reset,

View File

@ -327,9 +327,16 @@ static void arm_smmu_tlb_inv_range_s2(unsigned long iova, size_t size,
static void arm_smmu_tlb_inv_walk_s1(unsigned long iova, size_t size,
size_t granule, void *cookie)
{
arm_smmu_tlb_inv_range_s1(iova, size, granule, cookie,
ARM_SMMU_CB_S1_TLBIVA);
arm_smmu_tlb_sync_context(cookie);
struct arm_smmu_domain *smmu_domain = cookie;
struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
if (cfg->flush_walk_prefer_tlbiasid) {
arm_smmu_tlb_inv_context_s1(cookie);
} else {
arm_smmu_tlb_inv_range_s1(iova, size, granule, cookie,
ARM_SMMU_CB_S1_TLBIVA);
arm_smmu_tlb_sync_context(cookie);
}
}
static void arm_smmu_tlb_add_page_s1(struct iommu_iotlb_gather *gather,
@ -1478,6 +1485,7 @@ static struct iommu_group *arm_smmu_device_group(struct device *dev)
struct iommu_group *group = NULL;
int i, idx;
mutex_lock(&smmu->stream_map_mutex);
for_each_cfg_sme(cfg, fwspec, i, idx) {
if (group && smmu->s2crs[idx].group &&
group != smmu->s2crs[idx].group)
@ -1486,8 +1494,10 @@ static struct iommu_group *arm_smmu_device_group(struct device *dev)
group = smmu->s2crs[idx].group;
}
if (group)
if (group) {
mutex_unlock(&smmu->stream_map_mutex);
return iommu_group_ref_get(group);
}
if (dev_is_pci(dev))
group = pci_device_group(dev);
@ -1501,6 +1511,7 @@ static struct iommu_group *arm_smmu_device_group(struct device *dev)
for_each_cfg_sme(cfg, fwspec, i, idx)
smmu->s2crs[idx].group = group;
mutex_unlock(&smmu->stream_map_mutex);
return group;
}
@ -2281,18 +2292,38 @@ static int __maybe_unused arm_smmu_runtime_suspend(struct device *dev)
static int __maybe_unused arm_smmu_pm_resume(struct device *dev)
{
int ret;
struct arm_smmu_device *smmu = dev_get_drvdata(dev);
ret = clk_bulk_prepare(smmu->num_clks, smmu->clks);
if (ret)
return ret;
if (pm_runtime_suspended(dev))
return 0;
return arm_smmu_runtime_resume(dev);
ret = arm_smmu_runtime_resume(dev);
if (ret)
clk_bulk_unprepare(smmu->num_clks, smmu->clks);
return ret;
}
static int __maybe_unused arm_smmu_pm_suspend(struct device *dev)
{
if (pm_runtime_suspended(dev))
return 0;
int ret = 0;
struct arm_smmu_device *smmu = dev_get_drvdata(dev);
return arm_smmu_runtime_suspend(dev);
if (pm_runtime_suspended(dev))
goto clk_unprepare;
ret = arm_smmu_runtime_suspend(dev);
if (ret)
return ret;
clk_unprepare:
clk_bulk_unprepare(smmu->num_clks, smmu->clks);
return ret;
}
static const struct dev_pm_ops arm_smmu_pm_ops = {

View File

@ -346,6 +346,7 @@ struct arm_smmu_cfg {
};
enum arm_smmu_cbar_type cbar;
enum arm_smmu_context_fmt fmt;
bool flush_walk_prefer_tlbiasid;
};
#define ARM_SMMU_INVALID_IRPTNDX 0xff

View File

@ -273,7 +273,9 @@ int iommu_probe_device(struct device *dev)
* support default domains, so the return value is not yet
* checked.
*/
mutex_lock(&group->mutex);
iommu_alloc_default_domain(group, dev);
mutex_unlock(&group->mutex);
if (group->default_domain) {
ret = __iommu_attach_device(group->default_domain, dev);