forked from Minki/linux
193e67c00e
Unmap returns a size_t all throughout the IOMMU framework. Make io-pgtable match this convention. Moreover, there isn't a need to have a signed int return type as we return 0 in case of failures. Signed-off-by: Vivek Gautam <vivek.gautam@codeaurora.org> Acked-by: Robin Murphy <robin.murphy@arm.com> Signed-off-by: Joerg Roedel <jroedel@suse.de>
209 lines
6.7 KiB
C
209 lines
6.7 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef __IO_PGTABLE_H
|
|
#define __IO_PGTABLE_H
|
|
#include <linux/bitops.h>
|
|
|
|
/*
|
|
* Public API for use by IOMMU drivers
|
|
*/
|
|
enum io_pgtable_fmt {
|
|
ARM_32_LPAE_S1,
|
|
ARM_32_LPAE_S2,
|
|
ARM_64_LPAE_S1,
|
|
ARM_64_LPAE_S2,
|
|
ARM_V7S,
|
|
IO_PGTABLE_NUM_FMTS,
|
|
};
|
|
|
|
/**
|
|
* struct iommu_gather_ops - IOMMU callbacks for TLB and page table management.
|
|
*
|
|
* @tlb_flush_all: Synchronously invalidate the entire TLB context.
|
|
* @tlb_add_flush: Queue up a TLB invalidation for a virtual address range.
|
|
* @tlb_sync: Ensure any queued TLB invalidation has taken effect, and
|
|
* any corresponding page table updates are visible to the
|
|
* IOMMU.
|
|
*
|
|
* Note that these can all be called in atomic context and must therefore
|
|
* not block.
|
|
*/
|
|
struct iommu_gather_ops {
|
|
void (*tlb_flush_all)(void *cookie);
|
|
void (*tlb_add_flush)(unsigned long iova, size_t size, size_t granule,
|
|
bool leaf, void *cookie);
|
|
void (*tlb_sync)(void *cookie);
|
|
};
|
|
|
|
/**
|
|
* struct io_pgtable_cfg - Configuration data for a set of page tables.
|
|
*
|
|
* @quirks: A bitmap of hardware quirks that require some special
|
|
* action by the low-level page table allocator.
|
|
* @pgsize_bitmap: A bitmap of page sizes supported by this set of page
|
|
* tables.
|
|
* @ias: Input address (iova) size, in bits.
|
|
* @oas: Output address (paddr) size, in bits.
|
|
* @tlb: TLB management callbacks for this set of tables.
|
|
* @iommu_dev: The device representing the DMA configuration for the
|
|
* page table walker.
|
|
*/
|
|
struct io_pgtable_cfg {
|
|
/*
|
|
* IO_PGTABLE_QUIRK_ARM_NS: (ARM formats) Set NS and NSTABLE bits in
|
|
* stage 1 PTEs, for hardware which insists on validating them
|
|
* even in non-secure state where they should normally be ignored.
|
|
*
|
|
* IO_PGTABLE_QUIRK_NO_PERMS: Ignore the IOMMU_READ, IOMMU_WRITE and
|
|
* IOMMU_NOEXEC flags and map everything with full access, for
|
|
* hardware which does not implement the permissions of a given
|
|
* format, and/or requires some format-specific default value.
|
|
*
|
|
* IO_PGTABLE_QUIRK_TLBI_ON_MAP: If the format forbids caching invalid
|
|
* (unmapped) entries but the hardware might do so anyway, perform
|
|
* TLB maintenance when mapping as well as when unmapping.
|
|
*
|
|
* IO_PGTABLE_QUIRK_ARM_MTK_4GB: (ARM v7s format) Set bit 9 in all
|
|
* PTEs, for Mediatek IOMMUs which treat it as a 33rd address bit
|
|
* when the SoC is in "4GB mode" and they can only access the high
|
|
* remap of DRAM (0x1_00000000 to 0x1_ffffffff).
|
|
*
|
|
* IO_PGTABLE_QUIRK_NO_DMA: Guarantees that the tables will only ever
|
|
* be accessed by a fully cache-coherent IOMMU or CPU (e.g. for a
|
|
* software-emulated IOMMU), such that pagetable updates need not
|
|
* be treated as explicit DMA data.
|
|
*/
|
|
#define IO_PGTABLE_QUIRK_ARM_NS BIT(0)
|
|
#define IO_PGTABLE_QUIRK_NO_PERMS BIT(1)
|
|
#define IO_PGTABLE_QUIRK_TLBI_ON_MAP BIT(2)
|
|
#define IO_PGTABLE_QUIRK_ARM_MTK_4GB BIT(3)
|
|
#define IO_PGTABLE_QUIRK_NO_DMA BIT(4)
|
|
unsigned long quirks;
|
|
unsigned long pgsize_bitmap;
|
|
unsigned int ias;
|
|
unsigned int oas;
|
|
const struct iommu_gather_ops *tlb;
|
|
struct device *iommu_dev;
|
|
|
|
/* Low-level data specific to the table format */
|
|
union {
|
|
struct {
|
|
u64 ttbr[2];
|
|
u64 tcr;
|
|
u64 mair[2];
|
|
} arm_lpae_s1_cfg;
|
|
|
|
struct {
|
|
u64 vttbr;
|
|
u64 vtcr;
|
|
} arm_lpae_s2_cfg;
|
|
|
|
struct {
|
|
u32 ttbr[2];
|
|
u32 tcr;
|
|
u32 nmrr;
|
|
u32 prrr;
|
|
} arm_v7s_cfg;
|
|
};
|
|
};
|
|
|
|
/**
|
|
* struct io_pgtable_ops - Page table manipulation API for IOMMU drivers.
|
|
*
|
|
* @map: Map a physically contiguous memory region.
|
|
* @unmap: Unmap a physically contiguous memory region.
|
|
* @iova_to_phys: Translate iova to physical address.
|
|
*
|
|
* These functions map directly onto the iommu_ops member functions with
|
|
* the same names.
|
|
*/
|
|
struct io_pgtable_ops {
|
|
int (*map)(struct io_pgtable_ops *ops, unsigned long iova,
|
|
phys_addr_t paddr, size_t size, int prot);
|
|
size_t (*unmap)(struct io_pgtable_ops *ops, unsigned long iova,
|
|
size_t size);
|
|
phys_addr_t (*iova_to_phys)(struct io_pgtable_ops *ops,
|
|
unsigned long iova);
|
|
};
|
|
|
|
/**
|
|
* alloc_io_pgtable_ops() - Allocate a page table allocator for use by an IOMMU.
|
|
*
|
|
* @fmt: The page table format.
|
|
* @cfg: The page table configuration. This will be modified to represent
|
|
* the configuration actually provided by the allocator (e.g. the
|
|
* pgsize_bitmap may be restricted).
|
|
* @cookie: An opaque token provided by the IOMMU driver and passed back to
|
|
* the callback routines in cfg->tlb.
|
|
*/
|
|
struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt,
|
|
struct io_pgtable_cfg *cfg,
|
|
void *cookie);
|
|
|
|
/**
|
|
* free_io_pgtable_ops() - Free an io_pgtable_ops structure. The caller
|
|
* *must* ensure that the page table is no longer
|
|
* live, but the TLB can be dirty.
|
|
*
|
|
* @ops: The ops returned from alloc_io_pgtable_ops.
|
|
*/
|
|
void free_io_pgtable_ops(struct io_pgtable_ops *ops);
|
|
|
|
|
|
/*
|
|
* Internal structures for page table allocator implementations.
|
|
*/
|
|
|
|
/**
|
|
* struct io_pgtable - Internal structure describing a set of page tables.
|
|
*
|
|
* @fmt: The page table format.
|
|
* @cookie: An opaque token provided by the IOMMU driver and passed back to
|
|
* any callback routines.
|
|
* @cfg: A copy of the page table configuration.
|
|
* @ops: The page table operations in use for this set of page tables.
|
|
*/
|
|
struct io_pgtable {
|
|
enum io_pgtable_fmt fmt;
|
|
void *cookie;
|
|
struct io_pgtable_cfg cfg;
|
|
struct io_pgtable_ops ops;
|
|
};
|
|
|
|
#define io_pgtable_ops_to_pgtable(x) container_of((x), struct io_pgtable, ops)
|
|
|
|
static inline void io_pgtable_tlb_flush_all(struct io_pgtable *iop)
|
|
{
|
|
iop->cfg.tlb->tlb_flush_all(iop->cookie);
|
|
}
|
|
|
|
static inline void io_pgtable_tlb_add_flush(struct io_pgtable *iop,
|
|
unsigned long iova, size_t size, size_t granule, bool leaf)
|
|
{
|
|
iop->cfg.tlb->tlb_add_flush(iova, size, granule, leaf, iop->cookie);
|
|
}
|
|
|
|
static inline void io_pgtable_tlb_sync(struct io_pgtable *iop)
|
|
{
|
|
iop->cfg.tlb->tlb_sync(iop->cookie);
|
|
}
|
|
|
|
/**
|
|
* struct io_pgtable_init_fns - Alloc/free a set of page tables for a
|
|
* particular format.
|
|
*
|
|
* @alloc: Allocate a set of page tables described by cfg.
|
|
* @free: Free the page tables associated with iop.
|
|
*/
|
|
struct io_pgtable_init_fns {
|
|
struct io_pgtable *(*alloc)(struct io_pgtable_cfg *cfg, void *cookie);
|
|
void (*free)(struct io_pgtable *iop);
|
|
};
|
|
|
|
extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns;
|
|
extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns;
|
|
extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns;
|
|
extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns;
|
|
extern struct io_pgtable_init_fns io_pgtable_arm_v7s_init_fns;
|
|
|
|
#endif /* __IO_PGTABLE_H */
|