powerpc/mm/iommu, vfio/spapr: Put pages on VFIO container shutdown

At the moment the userspace tool is expected to request pinning of
the entire guest RAM when VFIO IOMMU SPAPR v2 driver is present.
When the userspace process finishes, all the pinned pages need to
be put; this is done as a part of the userspace memory context (MM)
destruction which happens on the very last mmdrop().

This approach has a problem that a MM of the userspace process
may live longer than the userspace process itself as kernel threads
use userspace process MMs which was runnning on a CPU where
the kernel thread was scheduled to. If this happened, the MM remains
referenced until this exact kernel thread wakes up again
and releases the very last reference to the MM, on an idle system this
can take even hours.

This moves preregistered regions tracking from MM to VFIO; insteads of
using mm_iommu_table_group_mem_t::used, tce_container::prereg_list is
added so each container releases regions which it has pre-registered.

This changes the userspace interface to return EBUSY if a memory
region is already registered in a container. However it should not
have any practical effect as the only userspace tool available now
does register memory region once per container anyway.

As tce_iommu_register_pages/tce_iommu_unregister_pages are called
under container->lock, this does not need additional locking.

Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
Reviewed-by: Nicholas Piggin <npiggin@gmail.com>
Acked-by: Alex Williamson <alex.williamson@redhat.com>
Reviewed-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
This commit is contained in:
Alexey Kardashevskiy 2016-11-30 17:52:05 +11:00 committed by Michael Ellerman
parent bc82d122ae
commit 4b6fad7097
3 changed files with 61 additions and 15 deletions

View File

@ -156,13 +156,11 @@ static inline void destroy_pagetable_page(struct mm_struct *mm)
} }
#endif #endif
void destroy_context(struct mm_struct *mm) void destroy_context(struct mm_struct *mm)
{ {
#ifdef CONFIG_SPAPR_TCE_IOMMU #ifdef CONFIG_SPAPR_TCE_IOMMU
mm_iommu_cleanup(mm); WARN_ON_ONCE(!list_empty(&mm->context.iommu_group_mem_list));
#endif #endif
#ifdef CONFIG_PPC_ICSWX #ifdef CONFIG_PPC_ICSWX
drop_cop(mm->context.acop, mm); drop_cop(mm->context.acop, mm);
kfree(mm->context.cop_lockp); kfree(mm->context.cop_lockp);

View File

@ -365,14 +365,3 @@ void mm_iommu_init(struct mm_struct *mm)
{ {
INIT_LIST_HEAD_RCU(&mm->context.iommu_group_mem_list); INIT_LIST_HEAD_RCU(&mm->context.iommu_group_mem_list);
} }
void mm_iommu_cleanup(struct mm_struct *mm)
{
struct mm_iommu_table_group_mem_t *mem, *tmp;
list_for_each_entry_safe(mem, tmp, &mm->context.iommu_group_mem_list,
next) {
list_del_rcu(&mem->next);
mm_iommu_do_free(mem);
}
}

View File

@ -88,6 +88,15 @@ struct tce_iommu_group {
struct iommu_group *grp; struct iommu_group *grp;
}; };
/*
* A container needs to remember which preregistered region it has
* referenced to do proper cleanup at the userspace process exit.
*/
struct tce_iommu_prereg {
struct list_head next;
struct mm_iommu_table_group_mem_t *mem;
};
/* /*
* The container descriptor supports only a single group per container. * The container descriptor supports only a single group per container.
* Required by the API as the container is not supplied with the IOMMU group * Required by the API as the container is not supplied with the IOMMU group
@ -102,6 +111,7 @@ struct tce_container {
struct mm_struct *mm; struct mm_struct *mm;
struct iommu_table *tables[IOMMU_TABLE_GROUP_MAX_TABLES]; struct iommu_table *tables[IOMMU_TABLE_GROUP_MAX_TABLES];
struct list_head group_list; struct list_head group_list;
struct list_head prereg_list;
}; };
static long tce_iommu_mm_set(struct tce_container *container) static long tce_iommu_mm_set(struct tce_container *container)
@ -118,10 +128,27 @@ static long tce_iommu_mm_set(struct tce_container *container)
return 0; return 0;
} }
static long tce_iommu_prereg_free(struct tce_container *container,
struct tce_iommu_prereg *tcemem)
{
long ret;
ret = mm_iommu_put(container->mm, tcemem->mem);
if (ret)
return ret;
list_del(&tcemem->next);
kfree(tcemem);
return 0;
}
static long tce_iommu_unregister_pages(struct tce_container *container, static long tce_iommu_unregister_pages(struct tce_container *container,
__u64 vaddr, __u64 size) __u64 vaddr, __u64 size)
{ {
struct mm_iommu_table_group_mem_t *mem; struct mm_iommu_table_group_mem_t *mem;
struct tce_iommu_prereg *tcemem;
bool found = false;
if ((vaddr & ~PAGE_MASK) || (size & ~PAGE_MASK)) if ((vaddr & ~PAGE_MASK) || (size & ~PAGE_MASK))
return -EINVAL; return -EINVAL;
@ -130,7 +157,17 @@ static long tce_iommu_unregister_pages(struct tce_container *container,
if (!mem) if (!mem)
return -ENOENT; return -ENOENT;
return mm_iommu_put(container->mm, mem); list_for_each_entry(tcemem, &container->prereg_list, next) {
if (tcemem->mem == mem) {
found = true;
break;
}
}
if (!found)
return -ENOENT;
return tce_iommu_prereg_free(container, tcemem);
} }
static long tce_iommu_register_pages(struct tce_container *container, static long tce_iommu_register_pages(struct tce_container *container,
@ -138,16 +175,29 @@ static long tce_iommu_register_pages(struct tce_container *container,
{ {
long ret = 0; long ret = 0;
struct mm_iommu_table_group_mem_t *mem = NULL; struct mm_iommu_table_group_mem_t *mem = NULL;
struct tce_iommu_prereg *tcemem;
unsigned long entries = size >> PAGE_SHIFT; unsigned long entries = size >> PAGE_SHIFT;
if ((vaddr & ~PAGE_MASK) || (size & ~PAGE_MASK) || if ((vaddr & ~PAGE_MASK) || (size & ~PAGE_MASK) ||
((vaddr + size) < vaddr)) ((vaddr + size) < vaddr))
return -EINVAL; return -EINVAL;
mem = mm_iommu_find(container->mm, vaddr, entries);
if (mem) {
list_for_each_entry(tcemem, &container->prereg_list, next) {
if (tcemem->mem == mem)
return -EBUSY;
}
}
ret = mm_iommu_get(container->mm, vaddr, entries, &mem); ret = mm_iommu_get(container->mm, vaddr, entries, &mem);
if (ret) if (ret)
return ret; return ret;
tcemem = kzalloc(sizeof(*tcemem), GFP_KERNEL);
tcemem->mem = mem;
list_add(&tcemem->next, &container->prereg_list);
container->enabled = true; container->enabled = true;
return 0; return 0;
@ -334,6 +384,7 @@ static void *tce_iommu_open(unsigned long arg)
mutex_init(&container->lock); mutex_init(&container->lock);
INIT_LIST_HEAD_RCU(&container->group_list); INIT_LIST_HEAD_RCU(&container->group_list);
INIT_LIST_HEAD_RCU(&container->prereg_list);
container->v2 = arg == VFIO_SPAPR_TCE_v2_IOMMU; container->v2 = arg == VFIO_SPAPR_TCE_v2_IOMMU;
@ -372,6 +423,14 @@ static void tce_iommu_release(void *iommu_data)
tce_iommu_free_table(container, tbl); tce_iommu_free_table(container, tbl);
} }
while (!list_empty(&container->prereg_list)) {
struct tce_iommu_prereg *tcemem;
tcemem = list_first_entry(&container->prereg_list,
struct tce_iommu_prereg, next);
WARN_ON_ONCE(tce_iommu_prereg_free(container, tcemem));
}
tce_iommu_disable(container); tce_iommu_disable(container);
if (container->mm) if (container->mm)
mmdrop(container->mm); mmdrop(container->mm);