mirror of
https://github.com/torvalds/linux.git
synced 2024-12-18 17:12:55 +00:00
15b244a88e
We are adding support for DMA memory pre-registration to be used in conjunction with VFIO. The idea is that the userspace which is going to run a guest may want to pre-register a user space memory region so it all gets pinned once and never goes away. Having this done, a hypervisor will not have to pin/unpin pages on every DMA map/unmap request. This is going to help with multiple pinning of the same memory. Another use of it is in-kernel real mode (mmu off) acceleration of DMA requests where real time translation of guest physical to host physical addresses is non-trivial and may fail as linux ptes may be temporarily invalid. Also, having cached host physical addresses (compared to just pinning at the start and then walking the page table again on every H_PUT_TCE), we can be sure that the addresses which we put into TCE table are the ones we already pinned. This adds a list of memory regions to mm_context_t. Each region consists of a header and a list of physical addresses. This adds API to: 1. register/unregister memory regions; 2. do final cleanup (which puts all pre-registered pages); 3. do userspace to physical address translation; 4. manage usage counters; multiple registration of the same memory is allowed (once per container). This implements 2 counters per registered memory region: - @mapped: incremented on every DMA mapping; decremented on unmapping; initialized to 1 when a region is just registered; once it becomes zero, no more mappings allowe; - @used: incremented on every "register" ioctl; decremented on "unregister"; unregistration is allowed for DMA mapped regions unless it is the very last reference. For the very last reference this checks that the region is still mapped and returns -EBUSY so the userspace gets to know that memory is still pinned and unregistration needs to be retried; @used remains 1. Host physical addresses are stored in vmalloc'ed array. In order to access these in the real mode (mmu off), there is a real_vmalloc_addr() helper. In-kernel acceleration patchset will move it from KVM to MMU code. Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru> Reviewed-by: David Gibson <david@gibson.dropbear.id.au> Reviewed-by: David Gibson <david@gibson.dropbear.id.au> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
132 lines
3.9 KiB
C
132 lines
3.9 KiB
C
#ifndef __ASM_POWERPC_MMU_CONTEXT_H
|
|
#define __ASM_POWERPC_MMU_CONTEXT_H
|
|
#ifdef __KERNEL__
|
|
|
|
#include <linux/kernel.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/spinlock.h>
|
|
#include <asm/mmu.h>
|
|
#include <asm/cputable.h>
|
|
#include <asm-generic/mm_hooks.h>
|
|
#include <asm/cputhreads.h>
|
|
|
|
/*
|
|
* Most if the context management is out of line
|
|
*/
|
|
extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
|
|
extern void destroy_context(struct mm_struct *mm);
|
|
#ifdef CONFIG_SPAPR_TCE_IOMMU
|
|
struct mm_iommu_table_group_mem_t;
|
|
|
|
extern bool mm_iommu_preregistered(void);
|
|
extern long mm_iommu_get(unsigned long ua, unsigned long entries,
|
|
struct mm_iommu_table_group_mem_t **pmem);
|
|
extern long mm_iommu_put(struct mm_iommu_table_group_mem_t *mem);
|
|
extern void mm_iommu_init(mm_context_t *ctx);
|
|
extern void mm_iommu_cleanup(mm_context_t *ctx);
|
|
extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup(unsigned long ua,
|
|
unsigned long size);
|
|
extern struct mm_iommu_table_group_mem_t *mm_iommu_find(unsigned long ua,
|
|
unsigned long entries);
|
|
extern long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
|
|
unsigned long ua, unsigned long *hpa);
|
|
extern long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem);
|
|
extern void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem);
|
|
#endif
|
|
|
|
extern void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next);
|
|
extern void switch_slb(struct task_struct *tsk, struct mm_struct *mm);
|
|
extern void set_context(unsigned long id, pgd_t *pgd);
|
|
|
|
#ifdef CONFIG_PPC_BOOK3S_64
|
|
extern int __init_new_context(void);
|
|
extern void __destroy_context(int context_id);
|
|
static inline void mmu_context_init(void) { }
|
|
#else
|
|
extern unsigned long __init_new_context(void);
|
|
extern void __destroy_context(unsigned long context_id);
|
|
extern void mmu_context_init(void);
|
|
#endif
|
|
|
|
extern void switch_cop(struct mm_struct *next);
|
|
extern int use_cop(unsigned long acop, struct mm_struct *mm);
|
|
extern void drop_cop(unsigned long acop, struct mm_struct *mm);
|
|
|
|
/*
|
|
* switch_mm is the entry point called from the architecture independent
|
|
* code in kernel/sched/core.c
|
|
*/
|
|
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
|
struct task_struct *tsk)
|
|
{
|
|
/* Mark this context has been used on the new CPU */
|
|
cpumask_set_cpu(smp_processor_id(), mm_cpumask(next));
|
|
|
|
/* 32-bit keeps track of the current PGDIR in the thread struct */
|
|
#ifdef CONFIG_PPC32
|
|
tsk->thread.pgdir = next->pgd;
|
|
#endif /* CONFIG_PPC32 */
|
|
|
|
/* 64-bit Book3E keeps track of current PGD in the PACA */
|
|
#ifdef CONFIG_PPC_BOOK3E_64
|
|
get_paca()->pgd = next->pgd;
|
|
#endif
|
|
/* Nothing else to do if we aren't actually switching */
|
|
if (prev == next)
|
|
return;
|
|
|
|
#ifdef CONFIG_PPC_ICSWX
|
|
/* Switch coprocessor context only if prev or next uses a coprocessor */
|
|
if (prev->context.acop || next->context.acop)
|
|
switch_cop(next);
|
|
#endif /* CONFIG_PPC_ICSWX */
|
|
|
|
/* We must stop all altivec streams before changing the HW
|
|
* context
|
|
*/
|
|
#ifdef CONFIG_ALTIVEC
|
|
if (cpu_has_feature(CPU_FTR_ALTIVEC))
|
|
asm volatile ("dssall");
|
|
#endif /* CONFIG_ALTIVEC */
|
|
|
|
/* The actual HW switching method differs between the various
|
|
* sub architectures.
|
|
*/
|
|
#ifdef CONFIG_PPC_STD_MMU_64
|
|
switch_slb(tsk, next);
|
|
#else
|
|
/* Out of line for now */
|
|
switch_mmu_context(prev, next);
|
|
#endif
|
|
|
|
}
|
|
|
|
#define deactivate_mm(tsk,mm) do { } while (0)
|
|
|
|
/*
|
|
* After we have set current->mm to a new value, this activates
|
|
* the context for the new mm so we see the new mappings.
|
|
*/
|
|
static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
|
|
{
|
|
unsigned long flags;
|
|
|
|
local_irq_save(flags);
|
|
switch_mm(prev, next, current);
|
|
local_irq_restore(flags);
|
|
}
|
|
|
|
/* We don't currently use enter_lazy_tlb() for anything */
|
|
static inline void enter_lazy_tlb(struct mm_struct *mm,
|
|
struct task_struct *tsk)
|
|
{
|
|
/* 64-bit Book3E keeps track of current PGD in the PACA */
|
|
#ifdef CONFIG_PPC_BOOK3E_64
|
|
get_paca()->pgd = NULL;
|
|
#endif
|
|
}
|
|
|
|
#endif /* __KERNEL__ */
|
|
#endif /* __ASM_POWERPC_MMU_CONTEXT_H */
|