forked from Minki/linux
media: staging: imgu: Drop support for other page sizes
The hardware only supports 4ki pages; drop support for other sizes. Signed-off-by: Sakari Ailus <sakari.ailus@linux.intel.com> Tested-by: Rajmohan Mani <rajmohan.mani@intel.com> Signed-off-by: Mauro Carvalho Chehab <mchehab+samsung@kernel.org>
This commit is contained in:
parent
3efcbe3e42
commit
17f61abb9b
@ -31,12 +31,11 @@ static void imgu_dmamap_free_buffer(struct page **pages,
|
|||||||
* Based on the implementation of __iommu_dma_alloc_pages()
|
* Based on the implementation of __iommu_dma_alloc_pages()
|
||||||
* defined in drivers/iommu/dma-iommu.c
|
* defined in drivers/iommu/dma-iommu.c
|
||||||
*/
|
*/
|
||||||
static struct page **imgu_dmamap_alloc_buffer(size_t size,
|
static struct page **imgu_dmamap_alloc_buffer(size_t size, gfp_t gfp)
|
||||||
unsigned long order_mask,
|
|
||||||
gfp_t gfp)
|
|
||||||
{
|
{
|
||||||
struct page **pages;
|
struct page **pages;
|
||||||
unsigned int i = 0, count = size >> PAGE_SHIFT;
|
unsigned int i = 0, count = size >> PAGE_SHIFT;
|
||||||
|
unsigned int order_mask = 1;
|
||||||
const gfp_t high_order_gfp = __GFP_NOWARN | __GFP_NORETRY;
|
const gfp_t high_order_gfp = __GFP_NOWARN | __GFP_NORETRY;
|
||||||
|
|
||||||
/* Allocate mem for array of page ptrs */
|
/* Allocate mem for array of page ptrs */
|
||||||
@ -45,10 +44,6 @@ static struct page **imgu_dmamap_alloc_buffer(size_t size,
|
|||||||
if (!pages)
|
if (!pages)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
order_mask &= (2U << MAX_ORDER) - 1;
|
|
||||||
if (!order_mask)
|
|
||||||
return NULL;
|
|
||||||
|
|
||||||
gfp |= __GFP_HIGHMEM | __GFP_ZERO;
|
gfp |= __GFP_HIGHMEM | __GFP_ZERO;
|
||||||
|
|
||||||
while (count) {
|
while (count) {
|
||||||
@ -99,7 +94,6 @@ void *imgu_dmamap_alloc(struct imgu_device *imgu, struct imgu_css_map *map,
|
|||||||
size_t len)
|
size_t len)
|
||||||
{
|
{
|
||||||
unsigned long shift = iova_shift(&imgu->iova_domain);
|
unsigned long shift = iova_shift(&imgu->iova_domain);
|
||||||
unsigned int alloc_sizes = imgu->mmu->pgsize_bitmap;
|
|
||||||
struct device *dev = &imgu->pci_dev->dev;
|
struct device *dev = &imgu->pci_dev->dev;
|
||||||
size_t size = PAGE_ALIGN(len);
|
size_t size = PAGE_ALIGN(len);
|
||||||
struct page **pages;
|
struct page **pages;
|
||||||
@ -114,8 +108,7 @@ void *imgu_dmamap_alloc(struct imgu_device *imgu, struct imgu_css_map *map,
|
|||||||
if (!iova)
|
if (!iova)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
pages = imgu_dmamap_alloc_buffer(size, alloc_sizes >> PAGE_SHIFT,
|
pages = imgu_dmamap_alloc_buffer(size, GFP_KERNEL);
|
||||||
GFP_KERNEL);
|
|
||||||
if (!pages)
|
if (!pages)
|
||||||
goto out_free_iova;
|
goto out_free_iova;
|
||||||
|
|
||||||
@ -257,7 +250,7 @@ int imgu_dmamap_init(struct imgu_device *imgu)
|
|||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
order = __ffs(imgu->mmu->pgsize_bitmap);
|
order = __ffs(IPU3_PAGE_SIZE);
|
||||||
base_pfn = max_t(unsigned long, 1, imgu->mmu->aperture_start >> order);
|
base_pfn = max_t(unsigned long, 1, imgu->mmu->aperture_start >> order);
|
||||||
init_iova_domain(&imgu->iova_domain, 1UL << order, base_pfn);
|
init_iova_domain(&imgu->iova_domain, 1UL << order, base_pfn);
|
||||||
|
|
||||||
|
@ -20,9 +20,6 @@
|
|||||||
|
|
||||||
#include "ipu3-mmu.h"
|
#include "ipu3-mmu.h"
|
||||||
|
|
||||||
#define IPU3_PAGE_SHIFT 12
|
|
||||||
#define IPU3_PAGE_SIZE (1UL << IPU3_PAGE_SHIFT)
|
|
||||||
|
|
||||||
#define IPU3_PT_BITS 10
|
#define IPU3_PT_BITS 10
|
||||||
#define IPU3_PT_PTES (1UL << IPU3_PT_BITS)
|
#define IPU3_PT_PTES (1UL << IPU3_PT_BITS)
|
||||||
#define IPU3_PT_SIZE (IPU3_PT_PTES << 2)
|
#define IPU3_PT_SIZE (IPU3_PT_PTES << 2)
|
||||||
@ -238,43 +235,6 @@ static int __imgu_mmu_map(struct imgu_mmu *mmu, unsigned long iova,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* The following four functions are implemented based on iommu.c
|
|
||||||
* drivers/iommu/iommu.c/iommu_pgsize().
|
|
||||||
*/
|
|
||||||
static size_t imgu_mmu_pgsize(unsigned long pgsize_bitmap,
|
|
||||||
unsigned long addr_merge, size_t size)
|
|
||||||
{
|
|
||||||
unsigned int pgsize_idx;
|
|
||||||
size_t pgsize;
|
|
||||||
|
|
||||||
/* Max page size that still fits into 'size' */
|
|
||||||
pgsize_idx = __fls(size);
|
|
||||||
|
|
||||||
/* need to consider alignment requirements ? */
|
|
||||||
if (likely(addr_merge)) {
|
|
||||||
/* Max page size allowed by address */
|
|
||||||
unsigned int align_pgsize_idx = __ffs(addr_merge);
|
|
||||||
|
|
||||||
pgsize_idx = min(pgsize_idx, align_pgsize_idx);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* build a mask of acceptable page sizes */
|
|
||||||
pgsize = (1UL << (pgsize_idx + 1)) - 1;
|
|
||||||
|
|
||||||
/* throw away page sizes not supported by the hardware */
|
|
||||||
pgsize &= pgsize_bitmap;
|
|
||||||
|
|
||||||
/* make sure we're still sane */
|
|
||||||
WARN_ON(!pgsize);
|
|
||||||
|
|
||||||
/* pick the biggest page */
|
|
||||||
pgsize_idx = __fls(pgsize);
|
|
||||||
pgsize = 1UL << pgsize_idx;
|
|
||||||
|
|
||||||
return pgsize;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* imgu_mmu_map - map a buffer to a physical address
|
* imgu_mmu_map - map a buffer to a physical address
|
||||||
*
|
*
|
||||||
@ -290,20 +250,16 @@ int imgu_mmu_map(struct imgu_mmu_info *info, unsigned long iova,
|
|||||||
phys_addr_t paddr, size_t size)
|
phys_addr_t paddr, size_t size)
|
||||||
{
|
{
|
||||||
struct imgu_mmu *mmu = to_imgu_mmu(info);
|
struct imgu_mmu *mmu = to_imgu_mmu(info);
|
||||||
unsigned int min_pagesz;
|
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
/* find out the minimum page size supported */
|
|
||||||
min_pagesz = 1 << __ffs(mmu->geometry.pgsize_bitmap);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* both the virtual address and the physical one, as well as
|
* both the virtual address and the physical one, as well as
|
||||||
* the size of the mapping, must be aligned (at least) to the
|
* the size of the mapping, must be aligned (at least) to the
|
||||||
* size of the smallest page supported by the hardware
|
* size of the smallest page supported by the hardware
|
||||||
*/
|
*/
|
||||||
if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) {
|
if (!IS_ALIGNED(iova | paddr | size, IPU3_PAGE_SIZE)) {
|
||||||
dev_err(mmu->dev, "unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%x\n",
|
dev_err(mmu->dev, "unaligned: iova 0x%lx pa %pa size 0x%zx\n",
|
||||||
iova, &paddr, size, min_pagesz);
|
iova, &paddr, size);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -311,19 +267,15 @@ int imgu_mmu_map(struct imgu_mmu_info *info, unsigned long iova,
|
|||||||
iova, &paddr, size);
|
iova, &paddr, size);
|
||||||
|
|
||||||
while (size) {
|
while (size) {
|
||||||
size_t pgsize = imgu_mmu_pgsize(mmu->geometry.pgsize_bitmap,
|
dev_dbg(mmu->dev, "mapping: iova 0x%lx pa %pa\n", iova, &paddr);
|
||||||
iova | paddr, size);
|
|
||||||
|
|
||||||
dev_dbg(mmu->dev, "mapping: iova 0x%lx pa %pa pgsize 0x%zx\n",
|
|
||||||
iova, &paddr, pgsize);
|
|
||||||
|
|
||||||
ret = __imgu_mmu_map(mmu, iova, paddr);
|
ret = __imgu_mmu_map(mmu, iova, paddr);
|
||||||
if (ret)
|
if (ret)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
iova += pgsize;
|
iova += IPU3_PAGE_SIZE;
|
||||||
paddr += pgsize;
|
paddr += IPU3_PAGE_SIZE;
|
||||||
size -= pgsize;
|
size -= IPU3_PAGE_SIZE;
|
||||||
}
|
}
|
||||||
|
|
||||||
call_if_imgu_is_powered(mmu, imgu_mmu_tlb_invalidate);
|
call_if_imgu_is_powered(mmu, imgu_mmu_tlb_invalidate);
|
||||||
@ -348,21 +300,19 @@ size_t imgu_mmu_map_sg(struct imgu_mmu_info *info, unsigned long iova,
|
|||||||
struct imgu_mmu *mmu = to_imgu_mmu(info);
|
struct imgu_mmu *mmu = to_imgu_mmu(info);
|
||||||
struct scatterlist *s;
|
struct scatterlist *s;
|
||||||
size_t s_length, mapped = 0;
|
size_t s_length, mapped = 0;
|
||||||
unsigned int i, min_pagesz;
|
unsigned int i;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
min_pagesz = 1 << __ffs(mmu->geometry.pgsize_bitmap);
|
|
||||||
|
|
||||||
for_each_sg(sg, s, nents, i) {
|
for_each_sg(sg, s, nents, i) {
|
||||||
phys_addr_t phys = page_to_phys(sg_page(s)) + s->offset;
|
phys_addr_t phys = page_to_phys(sg_page(s)) + s->offset;
|
||||||
|
|
||||||
s_length = s->length;
|
s_length = s->length;
|
||||||
|
|
||||||
if (!IS_ALIGNED(s->offset, min_pagesz))
|
if (!IS_ALIGNED(s->offset, IPU3_PAGE_SIZE))
|
||||||
goto out_err;
|
goto out_err;
|
||||||
|
|
||||||
/* must be min_pagesz aligned to be mapped singlely */
|
/* must be IPU3_PAGE_SIZE aligned to be mapped singlely */
|
||||||
if (i == nents - 1 && !IS_ALIGNED(s->length, min_pagesz))
|
if (i == nents - 1 && !IS_ALIGNED(s->length, IPU3_PAGE_SIZE))
|
||||||
s_length = PAGE_ALIGN(s->length);
|
s_length = PAGE_ALIGN(s->length);
|
||||||
|
|
||||||
ret = imgu_mmu_map(info, iova + mapped, phys, s_length);
|
ret = imgu_mmu_map(info, iova + mapped, phys, s_length);
|
||||||
@ -429,19 +379,15 @@ size_t imgu_mmu_unmap(struct imgu_mmu_info *info, unsigned long iova,
|
|||||||
{
|
{
|
||||||
struct imgu_mmu *mmu = to_imgu_mmu(info);
|
struct imgu_mmu *mmu = to_imgu_mmu(info);
|
||||||
size_t unmapped_page, unmapped = 0;
|
size_t unmapped_page, unmapped = 0;
|
||||||
unsigned int min_pagesz;
|
|
||||||
|
|
||||||
/* find out the minimum page size supported */
|
|
||||||
min_pagesz = 1 << __ffs(mmu->geometry.pgsize_bitmap);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The virtual address, as well as the size of the mapping, must be
|
* The virtual address, as well as the size of the mapping, must be
|
||||||
* aligned (at least) to the size of the smallest page supported
|
* aligned (at least) to the size of the smallest page supported
|
||||||
* by the hardware
|
* by the hardware
|
||||||
*/
|
*/
|
||||||
if (!IS_ALIGNED(iova | size, min_pagesz)) {
|
if (!IS_ALIGNED(iova | size, IPU3_PAGE_SIZE)) {
|
||||||
dev_err(mmu->dev, "unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n",
|
dev_err(mmu->dev, "unaligned: iova 0x%lx size 0x%zx\n",
|
||||||
iova, size, min_pagesz);
|
iova, size);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -452,10 +398,7 @@ size_t imgu_mmu_unmap(struct imgu_mmu_info *info, unsigned long iova,
|
|||||||
* or we hit an area that isn't mapped.
|
* or we hit an area that isn't mapped.
|
||||||
*/
|
*/
|
||||||
while (unmapped < size) {
|
while (unmapped < size) {
|
||||||
size_t pgsize = imgu_mmu_pgsize(mmu->geometry.pgsize_bitmap,
|
unmapped_page = __imgu_mmu_unmap(mmu, iova, IPU3_PAGE_SIZE);
|
||||||
iova, size - unmapped);
|
|
||||||
|
|
||||||
unmapped_page = __imgu_mmu_unmap(mmu, iova, pgsize);
|
|
||||||
if (!unmapped_page)
|
if (!unmapped_page)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
@ -535,7 +478,6 @@ struct imgu_mmu_info *imgu_mmu_init(struct device *parent, void __iomem *base)
|
|||||||
|
|
||||||
mmu->geometry.aperture_start = 0;
|
mmu->geometry.aperture_start = 0;
|
||||||
mmu->geometry.aperture_end = DMA_BIT_MASK(IPU3_MMU_ADDRESS_BITS);
|
mmu->geometry.aperture_end = DMA_BIT_MASK(IPU3_MMU_ADDRESS_BITS);
|
||||||
mmu->geometry.pgsize_bitmap = IPU3_PAGE_SIZE;
|
|
||||||
|
|
||||||
return &mmu->geometry;
|
return &mmu->geometry;
|
||||||
|
|
||||||
|
@ -5,17 +5,18 @@
|
|||||||
#ifndef __IPU3_MMU_H
|
#ifndef __IPU3_MMU_H
|
||||||
#define __IPU3_MMU_H
|
#define __IPU3_MMU_H
|
||||||
|
|
||||||
|
#define IPU3_PAGE_SHIFT 12
|
||||||
|
#define IPU3_PAGE_SIZE (1UL << IPU3_PAGE_SHIFT)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* struct imgu_mmu_info - Describes mmu geometry
|
* struct imgu_mmu_info - Describes mmu geometry
|
||||||
*
|
*
|
||||||
* @aperture_start: First address that can be mapped
|
* @aperture_start: First address that can be mapped
|
||||||
* @aperture_end: Last address that can be mapped
|
* @aperture_end: Last address that can be mapped
|
||||||
* @pgsize_bitmap: Bitmap of page sizes in use
|
|
||||||
*/
|
*/
|
||||||
struct imgu_mmu_info {
|
struct imgu_mmu_info {
|
||||||
dma_addr_t aperture_start;
|
dma_addr_t aperture_start;
|
||||||
dma_addr_t aperture_end;
|
dma_addr_t aperture_end;
|
||||||
unsigned long pgsize_bitmap;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
struct device;
|
struct device;
|
||||||
|
Loading…
Reference in New Issue
Block a user