Merge branch 'for-next/iommu/svm' into for-next/iommu/core

More steps along the way to Shared Virtual {Addressing, Memory} support
for Arm's SMMUv3, including the addition of a helper library that can be
shared amongst other IOMMU implementations wishing to support this
feature.

* for-next/iommu/svm:
  iommu/arm-smmu-v3: Hook up ATC invalidation to mm ops
  iommu/arm-smmu-v3: Implement iommu_sva_bind/unbind()
  iommu/sva: Add PASID helpers
  iommu/ioasid: Add ioasid references
This commit is contained in:
Will Deacon 2020-12-08 15:07:49 +00:00
commit a5f12de3ec
11 changed files with 469 additions and 24 deletions

View File

@ -103,6 +103,11 @@ config IOMMU_DMA
select IRQ_MSI_IOMMU
select NEED_SG_DMA_LENGTH
# Shared Virtual Addressing library
config IOMMU_SVA_LIB
bool
select IOASID
config FSL_PAMU
bool "Freescale IOMMU support"
depends on PCI
@ -311,6 +316,8 @@ config ARM_SMMU_V3
config ARM_SMMU_V3_SVA
bool "Shared Virtual Addressing support for the ARM SMMUv3"
depends on ARM_SMMU_V3
select IOMMU_SVA_LIB
select MMU_NOTIFIER
help
Support for sharing process address spaces with devices using the
SMMUv3.

View File

@ -27,3 +27,4 @@ obj-$(CONFIG_FSL_PAMU) += fsl_pamu.o fsl_pamu_domain.o
obj-$(CONFIG_S390_IOMMU) += s390-iommu.o
obj-$(CONFIG_HYPERV_IOMMU) += hyperv-iommu.o
obj-$(CONFIG_VIRTIO_IOMMU) += virtio-iommu.o
obj-$(CONFIG_IOMMU_SVA_LIB) += iommu-sva-lib.o

View File

@ -5,11 +5,35 @@
#include <linux/mm.h>
#include <linux/mmu_context.h>
#include <linux/mmu_notifier.h>
#include <linux/slab.h>
#include "arm-smmu-v3.h"
#include "../../iommu-sva-lib.h"
#include "../../io-pgtable-arm.h"
struct arm_smmu_mmu_notifier {
struct mmu_notifier mn;
struct arm_smmu_ctx_desc *cd;
bool cleared;
refcount_t refs;
struct list_head list;
struct arm_smmu_domain *domain;
};
#define mn_to_smmu(mn) container_of(mn, struct arm_smmu_mmu_notifier, mn)
struct arm_smmu_bond {
struct iommu_sva sva;
struct mm_struct *mm;
struct arm_smmu_mmu_notifier *smmu_mn;
struct list_head list;
refcount_t refs;
};
#define sva_to_bond(handle) \
container_of(handle, struct arm_smmu_bond, sva)
static DEFINE_MUTEX(sva_lock);
/*
@ -64,7 +88,6 @@ arm_smmu_share_asid(struct mm_struct *mm, u16 asid)
return NULL;
}
__maybe_unused
static struct arm_smmu_ctx_desc *arm_smmu_alloc_shared_cd(struct mm_struct *mm)
{
u16 asid;
@ -145,7 +168,6 @@ out_put_context:
return err < 0 ? ERR_PTR(err) : ret;
}
__maybe_unused
static void arm_smmu_free_shared_cd(struct arm_smmu_ctx_desc *cd)
{
if (arm_smmu_free_asid(cd)) {
@ -155,6 +177,215 @@ static void arm_smmu_free_shared_cd(struct arm_smmu_ctx_desc *cd)
}
}
static void arm_smmu_mm_invalidate_range(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long start, unsigned long end)
{
struct arm_smmu_mmu_notifier *smmu_mn = mn_to_smmu(mn);
arm_smmu_atc_inv_domain(smmu_mn->domain, mm->pasid, start,
end - start + 1);
}
static void arm_smmu_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
{
struct arm_smmu_mmu_notifier *smmu_mn = mn_to_smmu(mn);
struct arm_smmu_domain *smmu_domain = smmu_mn->domain;
mutex_lock(&sva_lock);
if (smmu_mn->cleared) {
mutex_unlock(&sva_lock);
return;
}
/*
* DMA may still be running. Keep the cd valid to avoid C_BAD_CD events,
* but disable translation.
*/
arm_smmu_write_ctx_desc(smmu_domain, mm->pasid, &quiet_cd);
arm_smmu_tlb_inv_asid(smmu_domain->smmu, smmu_mn->cd->asid);
arm_smmu_atc_inv_domain(smmu_domain, mm->pasid, 0, 0);
smmu_mn->cleared = true;
mutex_unlock(&sva_lock);
}
static void arm_smmu_mmu_notifier_free(struct mmu_notifier *mn)
{
kfree(mn_to_smmu(mn));
}
static struct mmu_notifier_ops arm_smmu_mmu_notifier_ops = {
.invalidate_range = arm_smmu_mm_invalidate_range,
.release = arm_smmu_mm_release,
.free_notifier = arm_smmu_mmu_notifier_free,
};
/* Allocate or get existing MMU notifier for this {domain, mm} pair */
static struct arm_smmu_mmu_notifier *
arm_smmu_mmu_notifier_get(struct arm_smmu_domain *smmu_domain,
struct mm_struct *mm)
{
int ret;
struct arm_smmu_ctx_desc *cd;
struct arm_smmu_mmu_notifier *smmu_mn;
list_for_each_entry(smmu_mn, &smmu_domain->mmu_notifiers, list) {
if (smmu_mn->mn.mm == mm) {
refcount_inc(&smmu_mn->refs);
return smmu_mn;
}
}
cd = arm_smmu_alloc_shared_cd(mm);
if (IS_ERR(cd))
return ERR_CAST(cd);
smmu_mn = kzalloc(sizeof(*smmu_mn), GFP_KERNEL);
if (!smmu_mn) {
ret = -ENOMEM;
goto err_free_cd;
}
refcount_set(&smmu_mn->refs, 1);
smmu_mn->cd = cd;
smmu_mn->domain = smmu_domain;
smmu_mn->mn.ops = &arm_smmu_mmu_notifier_ops;
ret = mmu_notifier_register(&smmu_mn->mn, mm);
if (ret) {
kfree(smmu_mn);
goto err_free_cd;
}
ret = arm_smmu_write_ctx_desc(smmu_domain, mm->pasid, cd);
if (ret)
goto err_put_notifier;
list_add(&smmu_mn->list, &smmu_domain->mmu_notifiers);
return smmu_mn;
err_put_notifier:
/* Frees smmu_mn */
mmu_notifier_put(&smmu_mn->mn);
err_free_cd:
arm_smmu_free_shared_cd(cd);
return ERR_PTR(ret);
}
static void arm_smmu_mmu_notifier_put(struct arm_smmu_mmu_notifier *smmu_mn)
{
struct mm_struct *mm = smmu_mn->mn.mm;
struct arm_smmu_ctx_desc *cd = smmu_mn->cd;
struct arm_smmu_domain *smmu_domain = smmu_mn->domain;
if (!refcount_dec_and_test(&smmu_mn->refs))
return;
list_del(&smmu_mn->list);
arm_smmu_write_ctx_desc(smmu_domain, mm->pasid, NULL);
/*
* If we went through clear(), we've already invalidated, and no
* new TLB entry can have been formed.
*/
if (!smmu_mn->cleared) {
arm_smmu_tlb_inv_asid(smmu_domain->smmu, cd->asid);
arm_smmu_atc_inv_domain(smmu_domain, mm->pasid, 0, 0);
}
/* Frees smmu_mn */
mmu_notifier_put(&smmu_mn->mn);
arm_smmu_free_shared_cd(cd);
}
static struct iommu_sva *
__arm_smmu_sva_bind(struct device *dev, struct mm_struct *mm)
{
int ret;
struct arm_smmu_bond *bond;
struct arm_smmu_master *master = dev_iommu_priv_get(dev);
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
if (!master || !master->sva_enabled)
return ERR_PTR(-ENODEV);
/* If bind() was already called for this {dev, mm} pair, reuse it. */
list_for_each_entry(bond, &master->bonds, list) {
if (bond->mm == mm) {
refcount_inc(&bond->refs);
return &bond->sva;
}
}
bond = kzalloc(sizeof(*bond), GFP_KERNEL);
if (!bond)
return ERR_PTR(-ENOMEM);
/* Allocate a PASID for this mm if necessary */
ret = iommu_sva_alloc_pasid(mm, 1, (1U << master->ssid_bits) - 1);
if (ret)
goto err_free_bond;
bond->mm = mm;
bond->sva.dev = dev;
refcount_set(&bond->refs, 1);
bond->smmu_mn = arm_smmu_mmu_notifier_get(smmu_domain, mm);
if (IS_ERR(bond->smmu_mn)) {
ret = PTR_ERR(bond->smmu_mn);
goto err_free_pasid;
}
list_add(&bond->list, &master->bonds);
return &bond->sva;
err_free_pasid:
iommu_sva_free_pasid(mm);
err_free_bond:
kfree(bond);
return ERR_PTR(ret);
}
struct iommu_sva *
arm_smmu_sva_bind(struct device *dev, struct mm_struct *mm, void *drvdata)
{
struct iommu_sva *handle;
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
if (smmu_domain->stage != ARM_SMMU_DOMAIN_S1)
return ERR_PTR(-EINVAL);
mutex_lock(&sva_lock);
handle = __arm_smmu_sva_bind(dev, mm);
mutex_unlock(&sva_lock);
return handle;
}
void arm_smmu_sva_unbind(struct iommu_sva *handle)
{
struct arm_smmu_bond *bond = sva_to_bond(handle);
mutex_lock(&sva_lock);
if (refcount_dec_and_test(&bond->refs)) {
list_del(&bond->list);
arm_smmu_mmu_notifier_put(bond->smmu_mn);
iommu_sva_free_pasid(bond->mm);
kfree(bond);
}
mutex_unlock(&sva_lock);
}
u32 arm_smmu_sva_get_pasid(struct iommu_sva *handle)
{
struct arm_smmu_bond *bond = sva_to_bond(handle);
return bond->mm->pasid;
}
bool arm_smmu_sva_supported(struct arm_smmu_device *smmu)
{
unsigned long reg, fld;
@ -246,3 +477,12 @@ int arm_smmu_master_disable_sva(struct arm_smmu_master *master)
return 0;
}
void arm_smmu_sva_notifier_synchronize(void)
{
/*
* Some MMU notifiers may still be waiting to be freed, using
* arm_smmu_mmu_notifier_free(). Wait for them.
*/
mmu_notifier_synchronize();
}

View File

@ -76,6 +76,12 @@ struct arm_smmu_option_prop {
DEFINE_XARRAY_ALLOC1(arm_smmu_asid_xa);
DEFINE_MUTEX(arm_smmu_asid_lock);
/*
* Special value used by SVA when a process dies, to quiesce a CD without
* disabling it.
*/
struct arm_smmu_ctx_desc quiet_cd = { 0 };
static struct arm_smmu_option_prop arm_smmu_options[] = {
{ ARM_SMMU_OPT_SKIP_PREFETCH, "hisilicon,broken-prefetch-cmd" },
{ ARM_SMMU_OPT_PAGE0_REGS_ONLY, "cavium,cn9900-broken-page1-regspace"},
@ -91,11 +97,6 @@ static inline void __iomem *arm_smmu_page1_fixup(unsigned long offset,
return smmu->base + offset;
}
static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
{
return container_of(dom, struct arm_smmu_domain, domain);
}
static void parse_driver_options(struct arm_smmu_device *smmu)
{
int i = 0;
@ -983,7 +984,9 @@ int arm_smmu_write_ctx_desc(struct arm_smmu_domain *smmu_domain, int ssid,
* (2) Install a secondary CD, for SID+SSID traffic.
* (3) Update ASID of a CD. Atomically write the first 64 bits of the
* CD, then invalidate the old entry and mappings.
* (4) Remove a secondary CD.
* (4) Quiesce the context without clearing the valid bit. Disable
* translation, and ignore any translation fault.
* (5) Remove a secondary CD.
*/
u64 val;
bool cd_live;
@ -1000,8 +1003,10 @@ int arm_smmu_write_ctx_desc(struct arm_smmu_domain *smmu_domain, int ssid,
val = le64_to_cpu(cdptr[0]);
cd_live = !!(val & CTXDESC_CD_0_V);
if (!cd) { /* (4) */
if (!cd) { /* (5) */
val = 0;
} else if (cd == &quiet_cd) { /* (4) */
val |= CTXDESC_CD_0_TCR_EPD0;
} else if (cd_live) { /* (3) */
val &= ~CTXDESC_CD_0_ASID;
val |= FIELD_PREP(CTXDESC_CD_0_ASID, cd->asid);
@ -1519,6 +1524,20 @@ arm_smmu_atc_inv_to_cmd(int ssid, unsigned long iova, size_t size,
size_t inval_grain_shift = 12;
unsigned long page_start, page_end;
/*
* ATS and PASID:
*
* If substream_valid is clear, the PCIe TLP is sent without a PASID
* prefix. In that case all ATC entries within the address range are
* invalidated, including those that were requested with a PASID! There
* is no way to invalidate only entries without PASID.
*
* When using STRTAB_STE_1_S1DSS_SSID0 (reserving CD 0 for non-PASID
* traffic), translation requests without PASID create ATC entries
* without PASID, which must be invalidated with substream_valid clear.
* This has the unpleasant side-effect of invalidating all PASID-tagged
* ATC entries within the address range.
*/
*cmd = (struct arm_smmu_cmdq_ent) {
.opcode = CMDQ_OP_ATC_INV,
.substream_valid = !!ssid,
@ -1577,8 +1596,8 @@ static int arm_smmu_atc_inv_master(struct arm_smmu_master *master)
return arm_smmu_cmdq_issue_sync(master->smmu);
}
static int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain,
int ssid, unsigned long iova, size_t size)
int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain, int ssid,
unsigned long iova, size_t size)
{
int i;
unsigned long flags;
@ -1794,6 +1813,7 @@ static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
mutex_init(&smmu_domain->init_mutex);
INIT_LIST_HEAD(&smmu_domain->devices);
spin_lock_init(&smmu_domain->devices_lock);
INIT_LIST_HEAD(&smmu_domain->mmu_notifiers);
return &smmu_domain->domain;
}
@ -2589,6 +2609,9 @@ static struct iommu_ops arm_smmu_ops = {
.dev_feat_enabled = arm_smmu_dev_feature_enabled,
.dev_enable_feat = arm_smmu_dev_enable_feature,
.dev_disable_feat = arm_smmu_dev_disable_feature,
.sva_bind = arm_smmu_sva_bind,
.sva_unbind = arm_smmu_sva_unbind,
.sva_get_pasid = arm_smmu_sva_get_pasid,
.pgsize_bitmap = -1UL, /* Restricted during device attach */
};
@ -3611,6 +3634,12 @@ static const struct of_device_id arm_smmu_of_match[] = {
};
MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
static void arm_smmu_driver_unregister(struct platform_driver *drv)
{
arm_smmu_sva_notifier_synchronize();
platform_driver_unregister(drv);
}
static struct platform_driver arm_smmu_driver = {
.driver = {
.name = "arm-smmu-v3",
@ -3621,7 +3650,8 @@ static struct platform_driver arm_smmu_driver = {
.remove = arm_smmu_device_remove,
.shutdown = arm_smmu_device_shutdown,
};
module_platform_driver(arm_smmu_driver);
module_driver(arm_smmu_driver, platform_driver_register,
arm_smmu_driver_unregister);
MODULE_DESCRIPTION("IOMMU API for ARM architected SMMUv3 implementations");
MODULE_AUTHOR("Will Deacon <will@kernel.org>");

View File

@ -678,15 +678,25 @@ struct arm_smmu_domain {
struct list_head devices;
spinlock_t devices_lock;
struct list_head mmu_notifiers;
};
static inline struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
{
return container_of(dom, struct arm_smmu_domain, domain);
}
extern struct xarray arm_smmu_asid_xa;
extern struct mutex arm_smmu_asid_lock;
extern struct arm_smmu_ctx_desc quiet_cd;
int arm_smmu_write_ctx_desc(struct arm_smmu_domain *smmu_domain, int ssid,
struct arm_smmu_ctx_desc *cd);
void arm_smmu_tlb_inv_asid(struct arm_smmu_device *smmu, u16 asid);
bool arm_smmu_free_asid(struct arm_smmu_ctx_desc *cd);
int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain, int ssid,
unsigned long iova, size_t size);
#ifdef CONFIG_ARM_SMMU_V3_SVA
bool arm_smmu_sva_supported(struct arm_smmu_device *smmu);
@ -694,6 +704,11 @@ bool arm_smmu_master_sva_supported(struct arm_smmu_master *master);
bool arm_smmu_master_sva_enabled(struct arm_smmu_master *master);
int arm_smmu_master_enable_sva(struct arm_smmu_master *master);
int arm_smmu_master_disable_sva(struct arm_smmu_master *master);
struct iommu_sva *arm_smmu_sva_bind(struct device *dev, struct mm_struct *mm,
void *drvdata);
void arm_smmu_sva_unbind(struct iommu_sva *handle);
u32 arm_smmu_sva_get_pasid(struct iommu_sva *handle);
void arm_smmu_sva_notifier_synchronize(void);
#else /* CONFIG_ARM_SMMU_V3_SVA */
static inline bool arm_smmu_sva_supported(struct arm_smmu_device *smmu)
{
@ -719,5 +734,20 @@ static inline int arm_smmu_master_disable_sva(struct arm_smmu_master *master)
{
return -ENODEV;
}
static inline struct iommu_sva *
arm_smmu_sva_bind(struct device *dev, struct mm_struct *mm, void *drvdata)
{
return ERR_PTR(-ENODEV);
}
static inline void arm_smmu_sva_unbind(struct iommu_sva *handle) {}
static inline u32 arm_smmu_sva_get_pasid(struct iommu_sva *handle)
{
return IOMMU_PASID_INVALID;
}
static inline void arm_smmu_sva_notifier_synchronize(void) {}
#endif /* CONFIG_ARM_SMMU_V3_SVA */
#endif /* _ARM_SMMU_V3_H */

View File

@ -5191,7 +5191,7 @@ static void auxiliary_unlink_device(struct dmar_domain *domain,
domain->auxd_refcnt--;
if (!domain->auxd_refcnt && domain->default_pasid > 0)
ioasid_free(domain->default_pasid);
ioasid_put(domain->default_pasid);
}
static int aux_domain_add_dev(struct dmar_domain *domain,
@ -5252,7 +5252,7 @@ attach_failed:
spin_unlock(&iommu->lock);
spin_unlock_irqrestore(&device_domain_lock, flags);
if (!domain->auxd_refcnt && domain->default_pasid > 0)
ioasid_free(domain->default_pasid);
ioasid_put(domain->default_pasid);
return ret;
}

View File

@ -598,7 +598,7 @@ intel_svm_bind_mm(struct device *dev, unsigned int flags,
if (mm) {
ret = mmu_notifier_register(&svm->notifier, mm);
if (ret) {
ioasid_free(svm->pasid);
ioasid_put(svm->pasid);
kfree(svm);
kfree(sdev);
goto out;
@ -616,7 +616,7 @@ intel_svm_bind_mm(struct device *dev, unsigned int flags,
if (ret) {
if (mm)
mmu_notifier_unregister(&svm->notifier, mm);
ioasid_free(svm->pasid);
ioasid_put(svm->pasid);
kfree(svm);
kfree(sdev);
goto out;
@ -689,7 +689,7 @@ static int intel_svm_unbind_mm(struct device *dev, u32 pasid)
kfree_rcu(sdev, rcu);
if (list_empty(&svm->devs)) {
ioasid_free(svm->pasid);
ioasid_put(svm->pasid);
if (svm->mm) {
mmu_notifier_unregister(&svm->notifier, svm->mm);
/* Clear mm's pasid. */

View File

@ -2,7 +2,7 @@
/*
* I/O Address Space ID allocator. There is one global IOASID space, split into
* subsets. Users create a subset with DECLARE_IOASID_SET, then allocate and
* free IOASIDs with ioasid_alloc and ioasid_free.
* free IOASIDs with ioasid_alloc and ioasid_put.
*/
#include <linux/ioasid.h>
#include <linux/module.h>
@ -15,6 +15,7 @@ struct ioasid_data {
struct ioasid_set *set;
void *private;
struct rcu_head rcu;
refcount_t refs;
};
/*
@ -314,6 +315,7 @@ ioasid_t ioasid_alloc(struct ioasid_set *set, ioasid_t min, ioasid_t max,
data->set = set;
data->private = private;
refcount_set(&data->refs, 1);
/*
* Custom allocator needs allocator data to perform platform specific
@ -346,13 +348,36 @@ exit_free:
EXPORT_SYMBOL_GPL(ioasid_alloc);
/**
* ioasid_free - Free an IOASID
* @ioasid: the ID to remove
* ioasid_get - obtain a reference to the IOASID
*/
void ioasid_free(ioasid_t ioasid)
void ioasid_get(ioasid_t ioasid)
{
struct ioasid_data *ioasid_data;
spin_lock(&ioasid_allocator_lock);
ioasid_data = xa_load(&active_allocator->xa, ioasid);
if (ioasid_data)
refcount_inc(&ioasid_data->refs);
else
WARN_ON(1);
spin_unlock(&ioasid_allocator_lock);
}
EXPORT_SYMBOL_GPL(ioasid_get);
/**
* ioasid_put - Release a reference to an ioasid
* @ioasid: the ID to remove
*
* Put a reference to the IOASID, free it when the number of references drops to
* zero.
*
* Return: %true if the IOASID was freed, %false otherwise.
*/
bool ioasid_put(ioasid_t ioasid)
{
bool free = false;
struct ioasid_data *ioasid_data;
spin_lock(&ioasid_allocator_lock);
ioasid_data = xa_load(&active_allocator->xa, ioasid);
if (!ioasid_data) {
@ -360,6 +385,10 @@ void ioasid_free(ioasid_t ioasid)
goto exit_unlock;
}
free = refcount_dec_and_test(&ioasid_data->refs);
if (!free)
goto exit_unlock;
active_allocator->ops->free(ioasid, active_allocator->ops->pdata);
/* Custom allocator needs additional steps to free the xa element */
if (active_allocator->flags & IOASID_ALLOCATOR_CUSTOM) {
@ -369,8 +398,9 @@ void ioasid_free(ioasid_t ioasid)
exit_unlock:
spin_unlock(&ioasid_allocator_lock);
return free;
}
EXPORT_SYMBOL_GPL(ioasid_free);
EXPORT_SYMBOL_GPL(ioasid_put);
/**
* ioasid_find - Find IOASID data

View File

@ -0,0 +1,86 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Helpers for IOMMU drivers implementing SVA
*/
#include <linux/mutex.h>
#include <linux/sched/mm.h>
#include "iommu-sva-lib.h"
static DEFINE_MUTEX(iommu_sva_lock);
static DECLARE_IOASID_SET(iommu_sva_pasid);
/**
* iommu_sva_alloc_pasid - Allocate a PASID for the mm
* @mm: the mm
* @min: minimum PASID value (inclusive)
* @max: maximum PASID value (inclusive)
*
* Try to allocate a PASID for this mm, or take a reference to the existing one
* provided it fits within the [@min, @max] range. On success the PASID is
* available in mm->pasid, and must be released with iommu_sva_free_pasid().
* @min must be greater than 0, because 0 indicates an unused mm->pasid.
*
* Returns 0 on success and < 0 on error.
*/
int iommu_sva_alloc_pasid(struct mm_struct *mm, ioasid_t min, ioasid_t max)
{
int ret = 0;
ioasid_t pasid;
if (min == INVALID_IOASID || max == INVALID_IOASID ||
min == 0 || max < min)
return -EINVAL;
mutex_lock(&iommu_sva_lock);
if (mm->pasid) {
if (mm->pasid >= min && mm->pasid <= max)
ioasid_get(mm->pasid);
else
ret = -EOVERFLOW;
} else {
pasid = ioasid_alloc(&iommu_sva_pasid, min, max, mm);
if (pasid == INVALID_IOASID)
ret = -ENOMEM;
else
mm->pasid = pasid;
}
mutex_unlock(&iommu_sva_lock);
return ret;
}
EXPORT_SYMBOL_GPL(iommu_sva_alloc_pasid);
/**
* iommu_sva_free_pasid - Release the mm's PASID
* @mm: the mm
*
* Drop one reference to a PASID allocated with iommu_sva_alloc_pasid()
*/
void iommu_sva_free_pasid(struct mm_struct *mm)
{
mutex_lock(&iommu_sva_lock);
if (ioasid_put(mm->pasid))
mm->pasid = 0;
mutex_unlock(&iommu_sva_lock);
}
EXPORT_SYMBOL_GPL(iommu_sva_free_pasid);
/* ioasid_find getter() requires a void * argument */
static bool __mmget_not_zero(void *mm)
{
return mmget_not_zero(mm);
}
/**
* iommu_sva_find() - Find mm associated to the given PASID
* @pasid: Process Address Space ID assigned to the mm
*
* On success a reference to the mm is taken, and must be released with mmput().
*
* Returns the mm corresponding to this PASID, or an error if not found.
*/
struct mm_struct *iommu_sva_find(ioasid_t pasid)
{
return ioasid_find(&iommu_sva_pasid, pasid, __mmget_not_zero);
}
EXPORT_SYMBOL_GPL(iommu_sva_find);

View File

@ -0,0 +1,15 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* SVA library for IOMMU drivers
*/
#ifndef _IOMMU_SVA_LIB_H
#define _IOMMU_SVA_LIB_H
#include <linux/ioasid.h>
#include <linux/mm_types.h>
int iommu_sva_alloc_pasid(struct mm_struct *mm, ioasid_t min, ioasid_t max);
void iommu_sva_free_pasid(struct mm_struct *mm);
struct mm_struct *iommu_sva_find(ioasid_t pasid);
#endif /* _IOMMU_SVA_LIB_H */

View File

@ -34,7 +34,8 @@ struct ioasid_allocator_ops {
#if IS_ENABLED(CONFIG_IOASID)
ioasid_t ioasid_alloc(struct ioasid_set *set, ioasid_t min, ioasid_t max,
void *private);
void ioasid_free(ioasid_t ioasid);
void ioasid_get(ioasid_t ioasid);
bool ioasid_put(ioasid_t ioasid);
void *ioasid_find(struct ioasid_set *set, ioasid_t ioasid,
bool (*getter)(void *));
int ioasid_register_allocator(struct ioasid_allocator_ops *allocator);
@ -48,10 +49,15 @@ static inline ioasid_t ioasid_alloc(struct ioasid_set *set, ioasid_t min,
return INVALID_IOASID;
}
static inline void ioasid_free(ioasid_t ioasid)
static inline void ioasid_get(ioasid_t ioasid)
{
}
static inline bool ioasid_put(ioasid_t ioasid)
{
return false;
}
static inline void *ioasid_find(struct ioasid_set *set, ioasid_t ioasid,
bool (*getter)(void *))
{