Arm SMMU fixes for 5.18
- Fix off-by-one in SMMUv3 SVA TLB invalidation - Disable large mappings to workaround nvidia erratum -----BEGIN PGP SIGNATURE----- iQFEBAABCgAuFiEEPxTL6PPUbjXGY88ct6xw3ITBYzQFAmJiivoQHHdpbGxAa2Vy bmVsLm9yZwAKCRC3rHDchMFjNKvGB/42IgImPOaHgybA+pfWcYkesdprUuo400wz xE2tnaZxv7z2dzGlB028DspkQit/kbToA8aPQkWwVeHU6h9SiTAPi0OUctcmFDDd WWE2357a7epFGp913+zsl61sNHrogq6joNA/7XPu8kBHCb7VOIRu+/2JSkwqaPqv xHpfWn1mcfTdqUtFoBgikp3h/KpznHi0DJLYKs9qNGstLYftIbOCubvMpTiweZb1 dHT7Oq2SR64x+s3gPUYjRMNYF4XgKZ0iVflv1i1EZJXgAOpQNF1r4oBMbXH+H+l8 XfNTBDogWVaOyxzvq9ruRoeufmQSpALXwAQcltFcZIHgulMvnKIR =ml9k -----END PGP SIGNATURE----- Merge tag 'arm-smmu-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/will/linux into iommu/fixes Arm SMMU fixes for 5.18 - Fix off-by-one in SMMUv3 SVA TLB invalidation - Disable large mappings to workaround nvidia erratum
This commit is contained in:
commit
e6f48bed2c
@ -183,7 +183,14 @@ static void arm_smmu_mm_invalidate_range(struct mmu_notifier *mn,
|
||||
{
|
||||
struct arm_smmu_mmu_notifier *smmu_mn = mn_to_smmu(mn);
|
||||
struct arm_smmu_domain *smmu_domain = smmu_mn->domain;
|
||||
size_t size = end - start + 1;
|
||||
size_t size;
|
||||
|
||||
/*
|
||||
* The mm_types defines vm_end as the first byte after the end address,
|
||||
* different from IOMMU subsystem using the last address of an address
|
||||
* range. So do a simple translation here by calculating size correctly.
|
||||
*/
|
||||
size = end - start;
|
||||
|
||||
if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_BTM))
|
||||
arm_smmu_tlb_inv_range_asid(start, size, smmu_mn->cd->asid,
|
||||
|
@ -258,6 +258,34 @@ static void nvidia_smmu_probe_finalize(struct arm_smmu_device *smmu, struct devi
|
||||
dev_name(dev), err);
|
||||
}
|
||||
|
||||
static int nvidia_smmu_init_context(struct arm_smmu_domain *smmu_domain,
|
||||
struct io_pgtable_cfg *pgtbl_cfg,
|
||||
struct device *dev)
|
||||
{
|
||||
struct arm_smmu_device *smmu = smmu_domain->smmu;
|
||||
const struct device_node *np = smmu->dev->of_node;
|
||||
|
||||
/*
|
||||
* Tegra194 and Tegra234 SoCs have the erratum that causes walk cache
|
||||
* entries to not be invalidated correctly. The problem is that the walk
|
||||
* cache index generated for IOVA is not same across translation and
|
||||
* invalidation requests. This is leading to page faults when PMD entry
|
||||
* is released during unmap and populated with new PTE table during
|
||||
* subsequent map request. Disabling large page mappings avoids the
|
||||
* release of PMD entry and avoid translations seeing stale PMD entry in
|
||||
* walk cache.
|
||||
* Fix this by limiting the page mappings to PAGE_SIZE on Tegra194 and
|
||||
* Tegra234.
|
||||
*/
|
||||
if (of_device_is_compatible(np, "nvidia,tegra234-smmu") ||
|
||||
of_device_is_compatible(np, "nvidia,tegra194-smmu")) {
|
||||
smmu->pgsize_bitmap = PAGE_SIZE;
|
||||
pgtbl_cfg->pgsize_bitmap = smmu->pgsize_bitmap;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct arm_smmu_impl nvidia_smmu_impl = {
|
||||
.read_reg = nvidia_smmu_read_reg,
|
||||
.write_reg = nvidia_smmu_write_reg,
|
||||
@ -268,10 +296,12 @@ static const struct arm_smmu_impl nvidia_smmu_impl = {
|
||||
.global_fault = nvidia_smmu_global_fault,
|
||||
.context_fault = nvidia_smmu_context_fault,
|
||||
.probe_finalize = nvidia_smmu_probe_finalize,
|
||||
.init_context = nvidia_smmu_init_context,
|
||||
};
|
||||
|
||||
static const struct arm_smmu_impl nvidia_smmu_single_impl = {
|
||||
.probe_finalize = nvidia_smmu_probe_finalize,
|
||||
.init_context = nvidia_smmu_init_context,
|
||||
};
|
||||
|
||||
struct arm_smmu_device *nvidia_smmu_impl_init(struct arm_smmu_device *smmu)
|
||||
|
Loading…
Reference in New Issue
Block a user