Arm SMMU updates for 6.9

- Device-tree binding updates for a bunch of Qualcomm SoCs
 
 - SMMUv2:
   * Support for Qualcomm X1E80100 MDSS
 
 - SMMUv3:
   * Significant rework of the driver's STE manipulation and domain
     handling code. This is the initial part of a larger scale rework
     aiming to improve the driver's implementation of the IOMMU API
     in preparation for hooking up IOMMUFD support.
 -----BEGIN PGP SIGNATURE-----
 
 iQFEBAABCgAuFiEEPxTL6PPUbjXGY88ct6xw3ITBYzQFAmXgsIQQHHdpbGxAa2Vy
 bmVsLm9yZwAKCRC3rHDchMFjNPAVCACqkmHrSK4HHUlpxttGgTbdyJjGPY4AGKg7
 Lc9ThuItZ16yAuDdhpRlR3P3ztelGfneCXgvLVj/TUTuEdhztC1KUTsjIoYOi9X/
 i6ke8HdBC/tkTJEhKF0qUGcyxfe08c552zQEGsKHUnZH1cBy5BzxAXCVJ/293fUS
 9nQ/hyDU5KtibqUPwCEPhgw5EjnmoXvy7VQDOe7TtLCFiwmBgbmW/mUf2kPM6oJ8
 2CIMPL4jB4kl7NbxjuJjc3ngT7mZ+O0OV2T6p9tGPnw3oES14KtjKQttFlFBBCDA
 qhKqIoHd38Z/DVXrlXQVsQjKbkAXu96A+d/dADr6eqczJ07oarOB
 =vamV
 -----END PGP SIGNATURE-----

Merge tag 'arm-smmu-updates' of git://git.kernel.org/pub/scm/linux/kernel/git/will/linux into arm/smmu

Arm SMMU updates for 6.9

- Device-tree binding updates for a bunch of Qualcomm SoCs

- SMMUv2:
  * Support for Qualcomm X1E80100 MDSS

- SMMUv3:
  * Significant rework of the driver's STE manipulation and domain
    handling code. This is the initial part of a larger scale rework
    aiming to improve the driver's implementation of the IOMMU API
    in preparation for hooking up IOMMUFD support.
This commit is contained in:
Joerg Roedel 2024-03-01 13:57:40 +01:00
commit f675692832
6 changed files with 533 additions and 285 deletions

View File

@ -83,6 +83,7 @@ properties:
- description: Qcom Adreno GPUs implementing "qcom,smmu-500" and "arm,mmu-500"
items:
- enum:
- qcom,qcm2290-smmu-500
- qcom,sa8775p-smmu-500
- qcom,sc7280-smmu-500
- qcom,sc8280xp-smmu-500
@ -93,6 +94,7 @@ properties:
- qcom,sm8350-smmu-500
- qcom,sm8450-smmu-500
- qcom,sm8550-smmu-500
- qcom,sm8650-smmu-500
- const: qcom,adreno-smmu
- const: qcom,smmu-500
- const: arm,mmu-500
@ -462,6 +464,7 @@ allOf:
compatible:
items:
- enum:
- qcom,qcm2290-smmu-500
- qcom,sm6115-smmu-500
- qcom,sm6125-smmu-500
- const: qcom,adreno-smmu
@ -484,7 +487,12 @@ allOf:
- if:
properties:
compatible:
const: qcom,sm8450-smmu-500
items:
- const: qcom,sm8450-smmu-500
- const: qcom,adreno-smmu
- const: qcom,smmu-500
- const: arm,mmu-500
then:
properties:
clock-names:
@ -508,7 +516,13 @@ allOf:
- if:
properties:
compatible:
const: qcom,sm8550-smmu-500
items:
- enum:
- qcom,sm8550-smmu-500
- qcom,sm8650-smmu-500
- const: qcom,adreno-smmu
- const: qcom,smmu-500
- const: arm,mmu-500
then:
properties:
clock-names:
@ -534,7 +548,6 @@ allOf:
- cavium,smmu-v2
- marvell,ap806-smmu-500
- nvidia,smmu-500
- qcom,qcm2290-smmu-500
- qcom,qdu1000-smmu-500
- qcom,sc7180-smmu-500
- qcom,sc8180x-smmu-500
@ -544,7 +557,6 @@ allOf:
- qcom,sdx65-smmu-500
- qcom,sm6350-smmu-500
- qcom,sm6375-smmu-500
- qcom,sm8650-smmu-500
- qcom,x1e80100-smmu-500
then:
properties:

View File

@ -292,10 +292,8 @@ arm_smmu_mmu_notifier_get(struct arm_smmu_domain *smmu_domain,
struct mm_struct *mm)
{
int ret;
unsigned long flags;
struct arm_smmu_ctx_desc *cd;
struct arm_smmu_mmu_notifier *smmu_mn;
struct arm_smmu_master *master;
list_for_each_entry(smmu_mn, &smmu_domain->mmu_notifiers, list) {
if (smmu_mn->mn.mm == mm) {
@ -325,28 +323,9 @@ arm_smmu_mmu_notifier_get(struct arm_smmu_domain *smmu_domain,
goto err_free_cd;
}
spin_lock_irqsave(&smmu_domain->devices_lock, flags);
list_for_each_entry(master, &smmu_domain->devices, domain_head) {
ret = arm_smmu_write_ctx_desc(master, mm_get_enqcmd_pasid(mm),
cd);
if (ret) {
list_for_each_entry_from_reverse(
master, &smmu_domain->devices, domain_head)
arm_smmu_write_ctx_desc(
master, mm_get_enqcmd_pasid(mm), NULL);
break;
}
}
spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
if (ret)
goto err_put_notifier;
list_add(&smmu_mn->list, &smmu_domain->mmu_notifiers);
return smmu_mn;
err_put_notifier:
/* Frees smmu_mn */
mmu_notifier_put(&smmu_mn->mn);
err_free_cd:
arm_smmu_free_shared_cd(cd);
return ERR_PTR(ret);
@ -363,9 +342,6 @@ static void arm_smmu_mmu_notifier_put(struct arm_smmu_mmu_notifier *smmu_mn)
list_del(&smmu_mn->list);
arm_smmu_update_ctx_desc_devices(smmu_domain, mm_get_enqcmd_pasid(mm),
NULL);
/*
* If we went through clear(), we've already invalidated, and no
* new TLB entry can have been formed.
@ -381,13 +357,20 @@ static void arm_smmu_mmu_notifier_put(struct arm_smmu_mmu_notifier *smmu_mn)
arm_smmu_free_shared_cd(cd);
}
static int __arm_smmu_sva_bind(struct device *dev, struct mm_struct *mm)
static int __arm_smmu_sva_bind(struct device *dev, ioasid_t pasid,
struct mm_struct *mm)
{
int ret;
struct arm_smmu_bond *bond;
struct arm_smmu_master *master = dev_iommu_priv_get(dev);
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct arm_smmu_domain *smmu_domain;
if (!(domain->type & __IOMMU_DOMAIN_PAGING))
return -ENODEV;
smmu_domain = to_smmu_domain(domain);
if (smmu_domain->stage != ARM_SMMU_DOMAIN_S1)
return -ENODEV;
if (!master || !master->sva_enabled)
return -ENODEV;
@ -404,9 +387,15 @@ static int __arm_smmu_sva_bind(struct device *dev, struct mm_struct *mm)
goto err_free_bond;
}
ret = arm_smmu_write_ctx_desc(master, pasid, bond->smmu_mn->cd);
if (ret)
goto err_put_notifier;
list_add(&bond->list, &master->bonds);
return 0;
err_put_notifier:
arm_smmu_mmu_notifier_put(bond->smmu_mn);
err_free_bond:
kfree(bond);
return ret;
@ -568,6 +557,9 @@ void arm_smmu_sva_remove_dev_pasid(struct iommu_domain *domain,
struct arm_smmu_master *master = dev_iommu_priv_get(dev);
mutex_lock(&sva_lock);
arm_smmu_write_ctx_desc(master, id, NULL);
list_for_each_entry(t, &master->bonds, list) {
if (t->mm == mm) {
bond = t;
@ -590,7 +582,7 @@ static int arm_smmu_sva_set_dev_pasid(struct iommu_domain *domain,
struct mm_struct *mm = domain->mm;
mutex_lock(&sva_lock);
ret = __arm_smmu_sva_bind(dev, mm);
ret = __arm_smmu_sva_bind(dev, id, mm);
mutex_unlock(&sva_lock);
return ret;

View File

@ -48,6 +48,9 @@ enum arm_smmu_msi_index {
ARM_SMMU_MAX_MSIS,
};
static void arm_smmu_sync_ste_for_sid(struct arm_smmu_device *smmu,
ioasid_t sid);
static phys_addr_t arm_smmu_msi_cfg[ARM_SMMU_MAX_MSIS][3] = {
[EVTQ_MSI_INDEX] = {
ARM_SMMU_EVTQ_IRQ_CFG0,
@ -86,6 +89,9 @@ static struct arm_smmu_option_prop arm_smmu_options[] = {
{ 0, NULL},
};
static int arm_smmu_domain_finalise(struct arm_smmu_domain *smmu_domain,
struct arm_smmu_device *smmu);
static void parse_driver_options(struct arm_smmu_device *smmu)
{
int i = 0;
@ -971,6 +977,199 @@ void arm_smmu_tlb_inv_asid(struct arm_smmu_device *smmu, u16 asid)
arm_smmu_cmdq_issue_cmd_with_sync(smmu, &cmd);
}
/*
* Based on the value of ent report which bits of the STE the HW will access. It
* would be nice if this was complete according to the spec, but minimally it
* has to capture the bits this driver uses.
*/
static void arm_smmu_get_ste_used(const struct arm_smmu_ste *ent,
struct arm_smmu_ste *used_bits)
{
unsigned int cfg = FIELD_GET(STRTAB_STE_0_CFG, le64_to_cpu(ent->data[0]));
used_bits->data[0] = cpu_to_le64(STRTAB_STE_0_V);
if (!(ent->data[0] & cpu_to_le64(STRTAB_STE_0_V)))
return;
used_bits->data[0] |= cpu_to_le64(STRTAB_STE_0_CFG);
/* S1 translates */
if (cfg & BIT(0)) {
used_bits->data[0] |= cpu_to_le64(STRTAB_STE_0_S1FMT |
STRTAB_STE_0_S1CTXPTR_MASK |
STRTAB_STE_0_S1CDMAX);
used_bits->data[1] |=
cpu_to_le64(STRTAB_STE_1_S1DSS | STRTAB_STE_1_S1CIR |
STRTAB_STE_1_S1COR | STRTAB_STE_1_S1CSH |
STRTAB_STE_1_S1STALLD | STRTAB_STE_1_STRW |
STRTAB_STE_1_EATS);
used_bits->data[2] |= cpu_to_le64(STRTAB_STE_2_S2VMID);
}
/* S2 translates */
if (cfg & BIT(1)) {
used_bits->data[1] |=
cpu_to_le64(STRTAB_STE_1_EATS | STRTAB_STE_1_SHCFG);
used_bits->data[2] |=
cpu_to_le64(STRTAB_STE_2_S2VMID | STRTAB_STE_2_VTCR |
STRTAB_STE_2_S2AA64 | STRTAB_STE_2_S2ENDI |
STRTAB_STE_2_S2PTW | STRTAB_STE_2_S2R);
used_bits->data[3] |= cpu_to_le64(STRTAB_STE_3_S2TTB_MASK);
}
if (cfg == STRTAB_STE_0_CFG_BYPASS)
used_bits->data[1] |= cpu_to_le64(STRTAB_STE_1_SHCFG);
}
/*
* Figure out if we can do a hitless update of entry to become target. Returns a
* bit mask where 1 indicates that qword needs to be set disruptively.
* unused_update is an intermediate value of entry that has unused bits set to
* their new values.
*/
static u8 arm_smmu_entry_qword_diff(const struct arm_smmu_ste *entry,
const struct arm_smmu_ste *target,
struct arm_smmu_ste *unused_update)
{
struct arm_smmu_ste target_used = {};
struct arm_smmu_ste cur_used = {};
u8 used_qword_diff = 0;
unsigned int i;
arm_smmu_get_ste_used(entry, &cur_used);
arm_smmu_get_ste_used(target, &target_used);
for (i = 0; i != ARRAY_SIZE(target_used.data); i++) {
/*
* Check that masks are up to date, the make functions are not
* allowed to set a bit to 1 if the used function doesn't say it
* is used.
*/
WARN_ON_ONCE(target->data[i] & ~target_used.data[i]);
/* Bits can change because they are not currently being used */
unused_update->data[i] = (entry->data[i] & cur_used.data[i]) |
(target->data[i] & ~cur_used.data[i]);
/*
* Each bit indicates that a used bit in a qword needs to be
* changed after unused_update is applied.
*/
if ((unused_update->data[i] & target_used.data[i]) !=
target->data[i])
used_qword_diff |= 1 << i;
}
return used_qword_diff;
}
static bool entry_set(struct arm_smmu_device *smmu, ioasid_t sid,
struct arm_smmu_ste *entry,
const struct arm_smmu_ste *target, unsigned int start,
unsigned int len)
{
bool changed = false;
unsigned int i;
for (i = start; len != 0; len--, i++) {
if (entry->data[i] != target->data[i]) {
WRITE_ONCE(entry->data[i], target->data[i]);
changed = true;
}
}
if (changed)
arm_smmu_sync_ste_for_sid(smmu, sid);
return changed;
}
/*
* Update the STE/CD to the target configuration. The transition from the
* current entry to the target entry takes place over multiple steps that
* attempts to make the transition hitless if possible. This function takes care
* not to create a situation where the HW can perceive a corrupted entry. HW is
* only required to have a 64 bit atomicity with stores from the CPU, while
* entries are many 64 bit values big.
*
* The difference between the current value and the target value is analyzed to
* determine which of three updates are required - disruptive, hitless or no
* change.
*
* In the most general disruptive case we can make any update in three steps:
* - Disrupting the entry (V=0)
* - Fill now unused qwords, execpt qword 0 which contains V
* - Make qword 0 have the final value and valid (V=1) with a single 64
* bit store
*
* However this disrupts the HW while it is happening. There are several
* interesting cases where a STE/CD can be updated without disturbing the HW
* because only a small number of bits are changing (S1DSS, CONFIG, etc) or
* because the used bits don't intersect. We can detect this by calculating how
* many 64 bit values need update after adjusting the unused bits and skip the
* V=0 process. This relies on the IGNORED behavior described in the
* specification.
*/
static void arm_smmu_write_ste(struct arm_smmu_master *master, u32 sid,
struct arm_smmu_ste *entry,
const struct arm_smmu_ste *target)
{
unsigned int num_entry_qwords = ARRAY_SIZE(target->data);
struct arm_smmu_device *smmu = master->smmu;
struct arm_smmu_ste unused_update;
u8 used_qword_diff;
used_qword_diff =
arm_smmu_entry_qword_diff(entry, target, &unused_update);
if (hweight8(used_qword_diff) == 1) {
/*
* Only one qword needs its used bits to be changed. This is a
* hitless update, update all bits the current STE is ignoring
* to their new values, then update a single "critical qword" to
* change the STE and finally 0 out any bits that are now unused
* in the target configuration.
*/
unsigned int critical_qword_index = ffs(used_qword_diff) - 1;
/*
* Skip writing unused bits in the critical qword since we'll be
* writing it in the next step anyways. This can save a sync
* when the only change is in that qword.
*/
unused_update.data[critical_qword_index] =
entry->data[critical_qword_index];
entry_set(smmu, sid, entry, &unused_update, 0, num_entry_qwords);
entry_set(smmu, sid, entry, target, critical_qword_index, 1);
entry_set(smmu, sid, entry, target, 0, num_entry_qwords);
} else if (used_qword_diff) {
/*
* At least two qwords need their inuse bits to be changed. This
* requires a breaking update, zero the V bit, write all qwords
* but 0, then set qword 0
*/
unused_update.data[0] = entry->data[0] & (~STRTAB_STE_0_V);
entry_set(smmu, sid, entry, &unused_update, 0, 1);
entry_set(smmu, sid, entry, target, 1, num_entry_qwords - 1);
entry_set(smmu, sid, entry, target, 0, 1);
} else {
/*
* No inuse bit changed. Sanity check that all unused bits are 0
* in the entry. The target was already sanity checked by
* compute_qword_diff().
*/
WARN_ON_ONCE(
entry_set(smmu, sid, entry, target, 0, num_entry_qwords));
}
/* It's likely that we'll want to use the new STE soon */
if (!(smmu->options & ARM_SMMU_OPT_SKIP_PREFETCH)) {
struct arm_smmu_cmdq_ent
prefetch_cmd = { .opcode = CMDQ_OP_PREFETCH_CFG,
.prefetch = {
.sid = sid,
} };
arm_smmu_cmdq_issue_cmd(smmu, &prefetch_cmd);
}
}
static void arm_smmu_sync_cd(struct arm_smmu_master *master,
int ssid, bool leaf)
{
@ -1251,158 +1450,131 @@ static void arm_smmu_sync_ste_for_sid(struct arm_smmu_device *smmu, u32 sid)
arm_smmu_cmdq_issue_cmd_with_sync(smmu, &cmd);
}
static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
struct arm_smmu_ste *dst)
static void arm_smmu_make_abort_ste(struct arm_smmu_ste *target)
{
/*
* This is hideously complicated, but we only really care about
* three cases at the moment:
*
* 1. Invalid (all zero) -> bypass/fault (init)
* 2. Bypass/fault -> translation/bypass (attach)
* 3. Translation/bypass -> bypass/fault (detach)
*
* Given that we can't update the STE atomically and the SMMU
* doesn't read the thing in a defined order, that leaves us
* with the following maintenance requirements:
*
* 1. Update Config, return (init time STEs aren't live)
* 2. Write everything apart from dword 0, sync, write dword 0, sync
* 3. Update Config, sync
*/
u64 val = le64_to_cpu(dst->data[0]);
bool ste_live = false;
struct arm_smmu_device *smmu = master->smmu;
struct arm_smmu_ctx_desc_cfg *cd_table = NULL;
struct arm_smmu_s2_cfg *s2_cfg = NULL;
struct arm_smmu_domain *smmu_domain = master->domain;
struct arm_smmu_cmdq_ent prefetch_cmd = {
.opcode = CMDQ_OP_PREFETCH_CFG,
.prefetch = {
.sid = sid,
},
};
if (smmu_domain) {
switch (smmu_domain->stage) {
case ARM_SMMU_DOMAIN_S1:
cd_table = &master->cd_table;
break;
case ARM_SMMU_DOMAIN_S2:
s2_cfg = &smmu_domain->s2_cfg;
break;
default:
break;
}
}
if (val & STRTAB_STE_0_V) {
switch (FIELD_GET(STRTAB_STE_0_CFG, val)) {
case STRTAB_STE_0_CFG_BYPASS:
break;
case STRTAB_STE_0_CFG_S1_TRANS:
case STRTAB_STE_0_CFG_S2_TRANS:
ste_live = true;
break;
case STRTAB_STE_0_CFG_ABORT:
BUG_ON(!disable_bypass);
break;
default:
BUG(); /* STE corruption */
}
}
/* Nuke the existing STE_0 value, as we're going to rewrite it */
val = STRTAB_STE_0_V;
/* Bypass/fault */
if (!smmu_domain || !(cd_table || s2_cfg)) {
if (!smmu_domain && disable_bypass)
val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_ABORT);
else
val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_BYPASS);
dst->data[0] = cpu_to_le64(val);
dst->data[1] = cpu_to_le64(FIELD_PREP(STRTAB_STE_1_SHCFG,
STRTAB_STE_1_SHCFG_INCOMING));
dst->data[2] = 0; /* Nuke the VMID */
/*
* The SMMU can perform negative caching, so we must sync
* the STE regardless of whether the old value was live.
*/
if (smmu)
arm_smmu_sync_ste_for_sid(smmu, sid);
return;
}
if (cd_table) {
u64 strw = smmu->features & ARM_SMMU_FEAT_E2H ?
STRTAB_STE_1_STRW_EL2 : STRTAB_STE_1_STRW_NSEL1;
BUG_ON(ste_live);
dst->data[1] = cpu_to_le64(
FIELD_PREP(STRTAB_STE_1_S1DSS, STRTAB_STE_1_S1DSS_SSID0) |
FIELD_PREP(STRTAB_STE_1_S1CIR, STRTAB_STE_1_S1C_CACHE_WBRA) |
FIELD_PREP(STRTAB_STE_1_S1COR, STRTAB_STE_1_S1C_CACHE_WBRA) |
FIELD_PREP(STRTAB_STE_1_S1CSH, ARM_SMMU_SH_ISH) |
FIELD_PREP(STRTAB_STE_1_STRW, strw));
if (smmu->features & ARM_SMMU_FEAT_STALLS &&
!master->stall_enabled)
dst->data[1] |= cpu_to_le64(STRTAB_STE_1_S1STALLD);
val |= (cd_table->cdtab_dma & STRTAB_STE_0_S1CTXPTR_MASK) |
FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_S1_TRANS) |
FIELD_PREP(STRTAB_STE_0_S1CDMAX, cd_table->s1cdmax) |
FIELD_PREP(STRTAB_STE_0_S1FMT, cd_table->s1fmt);
}
if (s2_cfg) {
BUG_ON(ste_live);
dst->data[2] = cpu_to_le64(
FIELD_PREP(STRTAB_STE_2_S2VMID, s2_cfg->vmid) |
FIELD_PREP(STRTAB_STE_2_VTCR, s2_cfg->vtcr) |
#ifdef __BIG_ENDIAN
STRTAB_STE_2_S2ENDI |
#endif
STRTAB_STE_2_S2PTW | STRTAB_STE_2_S2AA64 |
STRTAB_STE_2_S2R);
dst->data[3] = cpu_to_le64(s2_cfg->vttbr & STRTAB_STE_3_S2TTB_MASK);
val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_S2_TRANS);
}
if (master->ats_enabled)
dst->data[1] |= cpu_to_le64(FIELD_PREP(STRTAB_STE_1_EATS,
STRTAB_STE_1_EATS_TRANS));
arm_smmu_sync_ste_for_sid(smmu, sid);
/* See comment in arm_smmu_write_ctx_desc() */
WRITE_ONCE(dst->data[0], cpu_to_le64(val));
arm_smmu_sync_ste_for_sid(smmu, sid);
/* It's likely that we'll want to use the new STE soon */
if (!(smmu->options & ARM_SMMU_OPT_SKIP_PREFETCH))
arm_smmu_cmdq_issue_cmd(smmu, &prefetch_cmd);
memset(target, 0, sizeof(*target));
target->data[0] = cpu_to_le64(
STRTAB_STE_0_V |
FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_ABORT));
}
static void arm_smmu_init_bypass_stes(struct arm_smmu_ste *strtab,
unsigned int nent, bool force)
static void arm_smmu_make_bypass_ste(struct arm_smmu_ste *target)
{
memset(target, 0, sizeof(*target));
target->data[0] = cpu_to_le64(
STRTAB_STE_0_V |
FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_BYPASS));
target->data[1] = cpu_to_le64(
FIELD_PREP(STRTAB_STE_1_SHCFG, STRTAB_STE_1_SHCFG_INCOMING));
}
static void arm_smmu_make_cdtable_ste(struct arm_smmu_ste *target,
struct arm_smmu_master *master)
{
struct arm_smmu_ctx_desc_cfg *cd_table = &master->cd_table;
struct arm_smmu_device *smmu = master->smmu;
memset(target, 0, sizeof(*target));
target->data[0] = cpu_to_le64(
STRTAB_STE_0_V |
FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_S1_TRANS) |
FIELD_PREP(STRTAB_STE_0_S1FMT, cd_table->s1fmt) |
(cd_table->cdtab_dma & STRTAB_STE_0_S1CTXPTR_MASK) |
FIELD_PREP(STRTAB_STE_0_S1CDMAX, cd_table->s1cdmax));
target->data[1] = cpu_to_le64(
FIELD_PREP(STRTAB_STE_1_S1DSS, STRTAB_STE_1_S1DSS_SSID0) |
FIELD_PREP(STRTAB_STE_1_S1CIR, STRTAB_STE_1_S1C_CACHE_WBRA) |
FIELD_PREP(STRTAB_STE_1_S1COR, STRTAB_STE_1_S1C_CACHE_WBRA) |
FIELD_PREP(STRTAB_STE_1_S1CSH, ARM_SMMU_SH_ISH) |
((smmu->features & ARM_SMMU_FEAT_STALLS &&
!master->stall_enabled) ?
STRTAB_STE_1_S1STALLD :
0) |
FIELD_PREP(STRTAB_STE_1_EATS,
master->ats_enabled ? STRTAB_STE_1_EATS_TRANS : 0));
if (smmu->features & ARM_SMMU_FEAT_E2H) {
/*
* To support BTM the streamworld needs to match the
* configuration of the CPU so that the ASID broadcasts are
* properly matched. This means either S/NS-EL2-E2H (hypervisor)
* or NS-EL1 (guest). Since an SVA domain can be installed in a
* PASID this should always use a BTM compatible configuration
* if the HW supports it.
*/
target->data[1] |= cpu_to_le64(
FIELD_PREP(STRTAB_STE_1_STRW, STRTAB_STE_1_STRW_EL2));
} else {
target->data[1] |= cpu_to_le64(
FIELD_PREP(STRTAB_STE_1_STRW, STRTAB_STE_1_STRW_NSEL1));
/*
* VMID 0 is reserved for stage-2 bypass EL1 STEs, see
* arm_smmu_domain_alloc_id()
*/
target->data[2] =
cpu_to_le64(FIELD_PREP(STRTAB_STE_2_S2VMID, 0));
}
}
static void arm_smmu_make_s2_domain_ste(struct arm_smmu_ste *target,
struct arm_smmu_master *master,
struct arm_smmu_domain *smmu_domain)
{
struct arm_smmu_s2_cfg *s2_cfg = &smmu_domain->s2_cfg;
const struct io_pgtable_cfg *pgtbl_cfg =
&io_pgtable_ops_to_pgtable(smmu_domain->pgtbl_ops)->cfg;
typeof(&pgtbl_cfg->arm_lpae_s2_cfg.vtcr) vtcr =
&pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
u64 vtcr_val;
memset(target, 0, sizeof(*target));
target->data[0] = cpu_to_le64(
STRTAB_STE_0_V |
FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_S2_TRANS));
target->data[1] = cpu_to_le64(
FIELD_PREP(STRTAB_STE_1_EATS,
master->ats_enabled ? STRTAB_STE_1_EATS_TRANS : 0) |
FIELD_PREP(STRTAB_STE_1_SHCFG,
STRTAB_STE_1_SHCFG_INCOMING));
vtcr_val = FIELD_PREP(STRTAB_STE_2_VTCR_S2T0SZ, vtcr->tsz) |
FIELD_PREP(STRTAB_STE_2_VTCR_S2SL0, vtcr->sl) |
FIELD_PREP(STRTAB_STE_2_VTCR_S2IR0, vtcr->irgn) |
FIELD_PREP(STRTAB_STE_2_VTCR_S2OR0, vtcr->orgn) |
FIELD_PREP(STRTAB_STE_2_VTCR_S2SH0, vtcr->sh) |
FIELD_PREP(STRTAB_STE_2_VTCR_S2TG, vtcr->tg) |
FIELD_PREP(STRTAB_STE_2_VTCR_S2PS, vtcr->ps);
target->data[2] = cpu_to_le64(
FIELD_PREP(STRTAB_STE_2_S2VMID, s2_cfg->vmid) |
FIELD_PREP(STRTAB_STE_2_VTCR, vtcr_val) |
STRTAB_STE_2_S2AA64 |
#ifdef __BIG_ENDIAN
STRTAB_STE_2_S2ENDI |
#endif
STRTAB_STE_2_S2PTW |
STRTAB_STE_2_S2R);
target->data[3] = cpu_to_le64(pgtbl_cfg->arm_lpae_s2_cfg.vttbr &
STRTAB_STE_3_S2TTB_MASK);
}
/*
* This can safely directly manipulate the STE memory without a sync sequence
* because the STE table has not been installed in the SMMU yet.
*/
static void arm_smmu_init_initial_stes(struct arm_smmu_ste *strtab,
unsigned int nent)
{
unsigned int i;
u64 val = STRTAB_STE_0_V;
if (disable_bypass && !force)
val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_ABORT);
else
val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_BYPASS);
for (i = 0; i < nent; ++i) {
strtab->data[0] = cpu_to_le64(val);
strtab->data[1] = cpu_to_le64(FIELD_PREP(
STRTAB_STE_1_SHCFG, STRTAB_STE_1_SHCFG_INCOMING));
strtab->data[2] = 0;
if (disable_bypass)
arm_smmu_make_abort_ste(strtab);
else
arm_smmu_make_bypass_ste(strtab);
strtab++;
}
}
@ -1430,7 +1602,7 @@ static int arm_smmu_init_l2_strtab(struct arm_smmu_device *smmu, u32 sid)
return -ENOMEM;
}
arm_smmu_init_bypass_stes(desc->l2ptr, 1 << STRTAB_SPLIT, false);
arm_smmu_init_initial_stes(desc->l2ptr, 1 << STRTAB_SPLIT);
arm_smmu_write_strtab_l1_desc(strtab, desc);
return 0;
}
@ -2025,15 +2197,15 @@ static bool arm_smmu_capable(struct device *dev, enum iommu_cap cap)
static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
{
struct arm_smmu_domain *smmu_domain;
if (type == IOMMU_DOMAIN_SVA)
return arm_smmu_sva_domain_alloc();
return ERR_PTR(-EOPNOTSUPP);
}
if (type != IOMMU_DOMAIN_UNMANAGED &&
type != IOMMU_DOMAIN_DMA &&
type != IOMMU_DOMAIN_IDENTITY)
return NULL;
static struct iommu_domain *arm_smmu_domain_alloc_paging(struct device *dev)
{
struct arm_smmu_domain *smmu_domain;
/*
* Allocate the domain and initialise some of its data structures.
@ -2042,13 +2214,23 @@ static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
*/
smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
if (!smmu_domain)
return NULL;
return ERR_PTR(-ENOMEM);
mutex_init(&smmu_domain->init_mutex);
INIT_LIST_HEAD(&smmu_domain->devices);
spin_lock_init(&smmu_domain->devices_lock);
INIT_LIST_HEAD(&smmu_domain->mmu_notifiers);
if (dev) {
struct arm_smmu_master *master = dev_iommu_priv_get(dev);
int ret;
ret = arm_smmu_domain_finalise(smmu_domain, master->smmu);
if (ret) {
kfree(smmu_domain);
return ERR_PTR(ret);
}
}
return &smmu_domain->domain;
}
@ -2074,12 +2256,12 @@ static void arm_smmu_domain_free(struct iommu_domain *domain)
kfree(smmu_domain);
}
static int arm_smmu_domain_finalise_s1(struct arm_smmu_domain *smmu_domain,
static int arm_smmu_domain_finalise_s1(struct arm_smmu_device *smmu,
struct arm_smmu_domain *smmu_domain,
struct io_pgtable_cfg *pgtbl_cfg)
{
int ret;
u32 asid;
struct arm_smmu_device *smmu = smmu_domain->smmu;
struct arm_smmu_ctx_desc *cd = &smmu_domain->cd;
typeof(&pgtbl_cfg->arm_lpae_s1_cfg.tcr) tcr = &pgtbl_cfg->arm_lpae_s1_cfg.tcr;
@ -2111,13 +2293,12 @@ out_unlock:
return ret;
}
static int arm_smmu_domain_finalise_s2(struct arm_smmu_domain *smmu_domain,
static int arm_smmu_domain_finalise_s2(struct arm_smmu_device *smmu,
struct arm_smmu_domain *smmu_domain,
struct io_pgtable_cfg *pgtbl_cfg)
{
int vmid;
struct arm_smmu_device *smmu = smmu_domain->smmu;
struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg;
typeof(&pgtbl_cfg->arm_lpae_s2_cfg.vtcr) vtcr;
/* Reserve VMID 0 for stage-2 bypass STEs */
vmid = ida_alloc_range(&smmu->vmid_map, 1, (1 << smmu->vmid_bits) - 1,
@ -2125,35 +2306,21 @@ static int arm_smmu_domain_finalise_s2(struct arm_smmu_domain *smmu_domain,
if (vmid < 0)
return vmid;
vtcr = &pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
cfg->vmid = (u16)vmid;
cfg->vttbr = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
cfg->vtcr = FIELD_PREP(STRTAB_STE_2_VTCR_S2T0SZ, vtcr->tsz) |
FIELD_PREP(STRTAB_STE_2_VTCR_S2SL0, vtcr->sl) |
FIELD_PREP(STRTAB_STE_2_VTCR_S2IR0, vtcr->irgn) |
FIELD_PREP(STRTAB_STE_2_VTCR_S2OR0, vtcr->orgn) |
FIELD_PREP(STRTAB_STE_2_VTCR_S2SH0, vtcr->sh) |
FIELD_PREP(STRTAB_STE_2_VTCR_S2TG, vtcr->tg) |
FIELD_PREP(STRTAB_STE_2_VTCR_S2PS, vtcr->ps);
return 0;
}
static int arm_smmu_domain_finalise(struct iommu_domain *domain)
static int arm_smmu_domain_finalise(struct arm_smmu_domain *smmu_domain,
struct arm_smmu_device *smmu)
{
int ret;
unsigned long ias, oas;
enum io_pgtable_fmt fmt;
struct io_pgtable_cfg pgtbl_cfg;
struct io_pgtable_ops *pgtbl_ops;
int (*finalise_stage_fn)(struct arm_smmu_domain *,
struct io_pgtable_cfg *);
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct arm_smmu_device *smmu = smmu_domain->smmu;
if (domain->type == IOMMU_DOMAIN_IDENTITY) {
smmu_domain->stage = ARM_SMMU_DOMAIN_BYPASS;
return 0;
}
int (*finalise_stage_fn)(struct arm_smmu_device *smmu,
struct arm_smmu_domain *smmu_domain,
struct io_pgtable_cfg *pgtbl_cfg);
/* Restrict the stage to what we can actually support */
if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
@ -2192,17 +2359,18 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain)
if (!pgtbl_ops)
return -ENOMEM;
domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
domain->geometry.aperture_end = (1UL << pgtbl_cfg.ias) - 1;
domain->geometry.force_aperture = true;
smmu_domain->domain.pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
smmu_domain->domain.geometry.aperture_end = (1UL << pgtbl_cfg.ias) - 1;
smmu_domain->domain.geometry.force_aperture = true;
ret = finalise_stage_fn(smmu_domain, &pgtbl_cfg);
ret = finalise_stage_fn(smmu, smmu_domain, &pgtbl_cfg);
if (ret < 0) {
free_io_pgtable_ops(pgtbl_ops);
return ret;
}
smmu_domain->pgtbl_ops = pgtbl_ops;
smmu_domain->smmu = smmu;
return 0;
}
@ -2225,7 +2393,8 @@ arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 sid)
}
}
static void arm_smmu_install_ste_for_dev(struct arm_smmu_master *master)
static void arm_smmu_install_ste_for_dev(struct arm_smmu_master *master,
const struct arm_smmu_ste *target)
{
int i, j;
struct arm_smmu_device *smmu = master->smmu;
@ -2242,7 +2411,7 @@ static void arm_smmu_install_ste_for_dev(struct arm_smmu_master *master)
if (j < i)
continue;
arm_smmu_write_strtab_ent(master, sid, step);
arm_smmu_write_ste(master, sid, step, target);
}
}
@ -2261,12 +2430,12 @@ static bool arm_smmu_ats_supported(struct arm_smmu_master *master)
return dev_is_pci(dev) && pci_ats_supported(to_pci_dev(dev));
}
static void arm_smmu_enable_ats(struct arm_smmu_master *master)
static void arm_smmu_enable_ats(struct arm_smmu_master *master,
struct arm_smmu_domain *smmu_domain)
{
size_t stu;
struct pci_dev *pdev;
struct arm_smmu_device *smmu = master->smmu;
struct arm_smmu_domain *smmu_domain = master->domain;
/* Don't enable ATS at the endpoint if it's not enabled in the STE */
if (!master->ats_enabled)
@ -2282,10 +2451,9 @@ static void arm_smmu_enable_ats(struct arm_smmu_master *master)
dev_err(master->dev, "Failed to enable ATS (STU %zu)\n", stu);
}
static void arm_smmu_disable_ats(struct arm_smmu_master *master)
static void arm_smmu_disable_ats(struct arm_smmu_master *master,
struct arm_smmu_domain *smmu_domain)
{
struct arm_smmu_domain *smmu_domain = master->domain;
if (!master->ats_enabled)
return;
@ -2348,35 +2516,28 @@ static void arm_smmu_disable_pasid(struct arm_smmu_master *master)
static void arm_smmu_detach_dev(struct arm_smmu_master *master)
{
struct iommu_domain *domain = iommu_get_domain_for_dev(master->dev);
struct arm_smmu_domain *smmu_domain;
unsigned long flags;
struct arm_smmu_domain *smmu_domain = master->domain;
if (!smmu_domain)
if (!domain || !(domain->type & __IOMMU_DOMAIN_PAGING))
return;
arm_smmu_disable_ats(master);
smmu_domain = to_smmu_domain(domain);
arm_smmu_disable_ats(master, smmu_domain);
spin_lock_irqsave(&smmu_domain->devices_lock, flags);
list_del(&master->domain_head);
list_del_init(&master->domain_head);
spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
master->domain = NULL;
master->ats_enabled = false;
arm_smmu_install_ste_for_dev(master);
/*
* Clearing the CD entry isn't strictly required to detach the domain
* since the table is uninstalled anyway, but it helps avoid confusion
* in the call to arm_smmu_write_ctx_desc on the next attach (which
* expects the entry to be empty).
*/
if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1 && master->cd_table.cdtab)
arm_smmu_write_ctx_desc(master, IOMMU_NO_PASID, NULL);
}
static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
{
int ret = 0;
unsigned long flags;
struct arm_smmu_ste target;
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
struct arm_smmu_device *smmu;
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
@ -2398,15 +2559,10 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
return -EBUSY;
}
arm_smmu_detach_dev(master);
mutex_lock(&smmu_domain->init_mutex);
if (!smmu_domain->smmu) {
smmu_domain->smmu = smmu;
ret = arm_smmu_domain_finalise(domain);
if (ret)
smmu_domain->smmu = NULL;
ret = arm_smmu_domain_finalise(smmu_domain, smmu);
} else if (smmu_domain->smmu != smmu)
ret = -EINVAL;
@ -2414,57 +2570,140 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
if (ret)
return ret;
master->domain = smmu_domain;
/*
* The SMMU does not support enabling ATS with bypass. When the STE is
* in bypass (STE.Config[2:0] == 0b100), ATS Translation Requests and
* Translated transactions are denied as though ATS is disabled for the
* stream (STE.EATS == 0b00), causing F_BAD_ATS_TREQ and
* F_TRANSL_FORBIDDEN events (IHI0070Ea 5.2 Stream Table Entry).
* Prevent arm_smmu_share_asid() from trying to change the ASID
* of either the old or new domain while we are working on it.
* This allows the STE and the smmu_domain->devices list to
* be inconsistent during this routine.
*/
if (smmu_domain->stage != ARM_SMMU_DOMAIN_BYPASS)
master->ats_enabled = arm_smmu_ats_supported(master);
mutex_lock(&arm_smmu_asid_lock);
arm_smmu_detach_dev(master);
master->ats_enabled = arm_smmu_ats_supported(master);
spin_lock_irqsave(&smmu_domain->devices_lock, flags);
list_add(&master->domain_head, &smmu_domain->devices);
spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
switch (smmu_domain->stage) {
case ARM_SMMU_DOMAIN_S1:
if (!master->cd_table.cdtab) {
ret = arm_smmu_alloc_cd_tables(master);
if (ret) {
master->domain = NULL;
if (ret)
goto out_list_del;
} else {
/*
* arm_smmu_write_ctx_desc() relies on the entry being
* invalid to work, clear any existing entry.
*/
ret = arm_smmu_write_ctx_desc(master, IOMMU_NO_PASID,
NULL);
if (ret)
goto out_list_del;
}
}
/*
* Prevent SVA from concurrently modifying the CD or writing to
* the CD entry
*/
mutex_lock(&arm_smmu_asid_lock);
ret = arm_smmu_write_ctx_desc(master, IOMMU_NO_PASID, &smmu_domain->cd);
mutex_unlock(&arm_smmu_asid_lock);
if (ret) {
master->domain = NULL;
if (ret)
goto out_list_del;
}
arm_smmu_make_cdtable_ste(&target, master);
arm_smmu_install_ste_for_dev(master, &target);
break;
case ARM_SMMU_DOMAIN_S2:
arm_smmu_make_s2_domain_ste(&target, master, smmu_domain);
arm_smmu_install_ste_for_dev(master, &target);
if (master->cd_table.cdtab)
arm_smmu_write_ctx_desc(master, IOMMU_NO_PASID,
NULL);
break;
}
arm_smmu_install_ste_for_dev(master);
arm_smmu_enable_ats(master);
return 0;
arm_smmu_enable_ats(master, smmu_domain);
goto out_unlock;
out_list_del:
spin_lock_irqsave(&smmu_domain->devices_lock, flags);
list_del(&master->domain_head);
list_del_init(&master->domain_head);
spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
out_unlock:
mutex_unlock(&arm_smmu_asid_lock);
return ret;
}
static int arm_smmu_attach_dev_ste(struct device *dev,
struct arm_smmu_ste *ste)
{
struct arm_smmu_master *master = dev_iommu_priv_get(dev);
if (arm_smmu_master_sva_enabled(master))
return -EBUSY;
/*
* Do not allow any ASID to be changed while are working on the STE,
* otherwise we could miss invalidations.
*/
mutex_lock(&arm_smmu_asid_lock);
/*
* The SMMU does not support enabling ATS with bypass/abort. When the
* STE is in bypass (STE.Config[2:0] == 0b100), ATS Translation Requests
* and Translated transactions are denied as though ATS is disabled for
* the stream (STE.EATS == 0b00), causing F_BAD_ATS_TREQ and
* F_TRANSL_FORBIDDEN events (IHI0070Ea 5.2 Stream Table Entry).
*/
arm_smmu_detach_dev(master);
arm_smmu_install_ste_for_dev(master, ste);
mutex_unlock(&arm_smmu_asid_lock);
/*
* This has to be done after removing the master from the
* arm_smmu_domain->devices to avoid races updating the same context
* descriptor from arm_smmu_share_asid().
*/
if (master->cd_table.cdtab)
arm_smmu_write_ctx_desc(master, IOMMU_NO_PASID, NULL);
return 0;
}
static int arm_smmu_attach_dev_identity(struct iommu_domain *domain,
struct device *dev)
{
struct arm_smmu_ste ste;
arm_smmu_make_bypass_ste(&ste);
return arm_smmu_attach_dev_ste(dev, &ste);
}
static const struct iommu_domain_ops arm_smmu_identity_ops = {
.attach_dev = arm_smmu_attach_dev_identity,
};
static struct iommu_domain arm_smmu_identity_domain = {
.type = IOMMU_DOMAIN_IDENTITY,
.ops = &arm_smmu_identity_ops,
};
static int arm_smmu_attach_dev_blocked(struct iommu_domain *domain,
struct device *dev)
{
struct arm_smmu_ste ste;
arm_smmu_make_abort_ste(&ste);
return arm_smmu_attach_dev_ste(dev, &ste);
}
static const struct iommu_domain_ops arm_smmu_blocked_ops = {
.attach_dev = arm_smmu_attach_dev_blocked,
};
static struct iommu_domain arm_smmu_blocked_domain = {
.type = IOMMU_DOMAIN_BLOCKED,
.ops = &arm_smmu_blocked_ops,
};
static int arm_smmu_map_pages(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t pgsize, size_t pgcount,
int prot, gfp_t gfp, size_t *mapped)
@ -2658,6 +2897,7 @@ static struct iommu_device *arm_smmu_probe_device(struct device *dev)
master->dev = dev;
master->smmu = smmu;
INIT_LIST_HEAD(&master->bonds);
INIT_LIST_HEAD(&master->domain_head);
dev_iommu_priv_set(dev, master);
ret = arm_smmu_insert_master(smmu, master);
@ -2699,7 +2939,13 @@ static void arm_smmu_release_device(struct device *dev)
if (WARN_ON(arm_smmu_master_sva_enabled(master)))
iopf_queue_remove_device(master->smmu->evtq.iopf, dev);
arm_smmu_detach_dev(master);
/* Put the STE back to what arm_smmu_init_strtab() sets */
if (disable_bypass && !dev->iommu->require_direct)
arm_smmu_attach_dev_blocked(&arm_smmu_blocked_domain, dev);
else
arm_smmu_attach_dev_identity(&arm_smmu_identity_domain, dev);
arm_smmu_disable_pasid(master);
arm_smmu_remove_master(master);
if (master->cd_table.cdtab)
@ -2844,8 +3090,11 @@ static void arm_smmu_remove_dev_pasid(struct device *dev, ioasid_t pasid)
}
static struct iommu_ops arm_smmu_ops = {
.identity_domain = &arm_smmu_identity_domain,
.blocked_domain = &arm_smmu_blocked_domain,
.capable = arm_smmu_capable,
.domain_alloc = arm_smmu_domain_alloc,
.domain_alloc_paging = arm_smmu_domain_alloc_paging,
.probe_device = arm_smmu_probe_device,
.release_device = arm_smmu_release_device,
.device_group = arm_smmu_device_group,
@ -3049,7 +3298,7 @@ static int arm_smmu_init_strtab_linear(struct arm_smmu_device *smmu)
reg |= FIELD_PREP(STRTAB_BASE_CFG_LOG2SIZE, smmu->sid_bits);
cfg->strtab_base_cfg = reg;
arm_smmu_init_bypass_stes(strtab, cfg->num_l1_ents, false);
arm_smmu_init_initial_stes(strtab, cfg->num_l1_ents);
return 0;
}
@ -3760,7 +4009,6 @@ static void arm_smmu_rmr_install_bypass_ste(struct arm_smmu_device *smmu)
iort_get_rmr_sids(dev_fwnode(smmu->dev), &rmr_list);
list_for_each_entry(e, &rmr_list, list) {
struct arm_smmu_ste *step;
struct iommu_iort_rmr_data *rmr;
int ret, i;
@ -3773,8 +4021,12 @@ static void arm_smmu_rmr_install_bypass_ste(struct arm_smmu_device *smmu)
continue;
}
step = arm_smmu_get_step_for_sid(smmu, rmr->sids[i]);
arm_smmu_init_bypass_stes(step, 1, true);
/*
* STE table is not programmed to HW, see
* arm_smmu_initial_bypass_stes()
*/
arm_smmu_make_bypass_ste(
arm_smmu_get_step_for_sid(smmu, rmr->sids[i]));
}
}

View File

@ -609,8 +609,6 @@ struct arm_smmu_ctx_desc_cfg {
struct arm_smmu_s2_cfg {
u16 vmid;
u64 vttbr;
u64 vtcr;
};
struct arm_smmu_strtab_cfg {
@ -697,7 +695,6 @@ struct arm_smmu_stream {
struct arm_smmu_master {
struct arm_smmu_device *smmu;
struct device *dev;
struct arm_smmu_domain *domain;
struct list_head domain_head;
struct arm_smmu_stream *streams;
/* Locked by the iommu core using the group mutex */
@ -715,7 +712,6 @@ struct arm_smmu_master {
enum arm_smmu_domain_stage {
ARM_SMMU_DOMAIN_S1 = 0,
ARM_SMMU_DOMAIN_S2,
ARM_SMMU_DOMAIN_BYPASS,
};
struct arm_smmu_domain {

View File

@ -260,6 +260,7 @@ static const struct of_device_id qcom_smmu_client_of_match[] __maybe_unused = {
{ .compatible = "qcom,sm6375-mdss" },
{ .compatible = "qcom,sm8150-mdss" },
{ .compatible = "qcom,sm8250-mdss" },
{ .compatible = "qcom,x1e80100-mdss" },
{ }
};

View File

@ -859,10 +859,14 @@ static void arm_smmu_destroy_domain_context(struct arm_smmu_domain *smmu_domain)
arm_smmu_rpm_put(smmu);
}
static struct iommu_domain *arm_smmu_domain_alloc_paging(struct device *dev)
static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
{
struct arm_smmu_domain *smmu_domain;
if (type != IOMMU_DOMAIN_UNMANAGED) {
if (using_legacy_binding || type != IOMMU_DOMAIN_DMA)
return NULL;
}
/*
* Allocate the domain and initialise some of its data structures.
* We can't really do anything meaningful until we've added a
@ -875,15 +879,6 @@ static struct iommu_domain *arm_smmu_domain_alloc_paging(struct device *dev)
mutex_init(&smmu_domain->init_mutex);
spin_lock_init(&smmu_domain->cb_lock);
if (dev) {
struct arm_smmu_master_cfg *cfg = dev_iommu_priv_get(dev);
if (arm_smmu_init_domain_context(smmu_domain, cfg->smmu, dev)) {
kfree(smmu_domain);
return NULL;
}
}
return &smmu_domain->domain;
}
@ -1600,7 +1595,7 @@ static struct iommu_ops arm_smmu_ops = {
.identity_domain = &arm_smmu_identity_domain,
.blocked_domain = &arm_smmu_blocked_domain,
.capable = arm_smmu_capable,
.domain_alloc_paging = arm_smmu_domain_alloc_paging,
.domain_alloc = arm_smmu_domain_alloc,
.probe_device = arm_smmu_probe_device,
.release_device = arm_smmu_release_device,
.probe_finalize = arm_smmu_probe_finalize,