dma-direct: factor out dma_set_{de,en}crypted helpers
Factor out helpers the make dealing with memory encryption a little less cumbersome. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Robin Murphy <robin.murphy@arm.com>
This commit is contained in:
parent
0fcfb00b28
commit
4d0564785b
@ -75,6 +75,20 @@ static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
|
|||||||
min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit);
|
min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int dma_set_decrypted(struct device *dev, void *vaddr, size_t size)
|
||||||
|
{
|
||||||
|
if (!force_dma_unencrypted(dev))
|
||||||
|
return 0;
|
||||||
|
return set_memory_decrypted((unsigned long)vaddr, 1 << get_order(size));
|
||||||
|
}
|
||||||
|
|
||||||
|
static int dma_set_encrypted(struct device *dev, void *vaddr, size_t size)
|
||||||
|
{
|
||||||
|
if (!force_dma_unencrypted(dev))
|
||||||
|
return 0;
|
||||||
|
return set_memory_encrypted((unsigned long)vaddr, 1 << get_order(size));
|
||||||
|
}
|
||||||
|
|
||||||
static void __dma_direct_free_pages(struct device *dev, struct page *page,
|
static void __dma_direct_free_pages(struct device *dev, struct page *page,
|
||||||
size_t size)
|
size_t size)
|
||||||
{
|
{
|
||||||
@ -154,7 +168,6 @@ void *dma_direct_alloc(struct device *dev, size_t size,
|
|||||||
{
|
{
|
||||||
struct page *page;
|
struct page *page;
|
||||||
void *ret;
|
void *ret;
|
||||||
int err;
|
|
||||||
|
|
||||||
size = PAGE_ALIGN(size);
|
size = PAGE_ALIGN(size);
|
||||||
if (attrs & DMA_ATTR_NO_WARN)
|
if (attrs & DMA_ATTR_NO_WARN)
|
||||||
@ -216,12 +229,8 @@ void *dma_direct_alloc(struct device *dev, size_t size,
|
|||||||
__builtin_return_address(0));
|
__builtin_return_address(0));
|
||||||
if (!ret)
|
if (!ret)
|
||||||
goto out_free_pages;
|
goto out_free_pages;
|
||||||
if (force_dma_unencrypted(dev)) {
|
if (dma_set_decrypted(dev, ret, size))
|
||||||
err = set_memory_decrypted((unsigned long)ret,
|
|
||||||
1 << get_order(size));
|
|
||||||
if (err)
|
|
||||||
goto out_free_pages;
|
goto out_free_pages;
|
||||||
}
|
|
||||||
memset(ret, 0, size);
|
memset(ret, 0, size);
|
||||||
goto done;
|
goto done;
|
||||||
}
|
}
|
||||||
@ -238,13 +247,8 @@ void *dma_direct_alloc(struct device *dev, size_t size,
|
|||||||
}
|
}
|
||||||
|
|
||||||
ret = page_address(page);
|
ret = page_address(page);
|
||||||
if (force_dma_unencrypted(dev)) {
|
if (dma_set_decrypted(dev, ret, size))
|
||||||
err = set_memory_decrypted((unsigned long)ret,
|
|
||||||
1 << get_order(size));
|
|
||||||
if (err)
|
|
||||||
goto out_free_pages;
|
goto out_free_pages;
|
||||||
}
|
|
||||||
|
|
||||||
memset(ret, 0, size);
|
memset(ret, 0, size);
|
||||||
|
|
||||||
if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
|
if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
|
||||||
@ -259,13 +263,9 @@ done:
|
|||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
out_encrypt_pages:
|
out_encrypt_pages:
|
||||||
if (force_dma_unencrypted(dev)) {
|
|
||||||
err = set_memory_encrypted((unsigned long)page_address(page),
|
|
||||||
1 << get_order(size));
|
|
||||||
/* If memory cannot be re-encrypted, it must be leaked */
|
/* If memory cannot be re-encrypted, it must be leaked */
|
||||||
if (err)
|
if (dma_set_encrypted(dev, page_address(page), size))
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
|
||||||
out_free_pages:
|
out_free_pages:
|
||||||
__dma_direct_free_pages(dev, page, size);
|
__dma_direct_free_pages(dev, page, size);
|
||||||
return NULL;
|
return NULL;
|
||||||
@ -304,8 +304,7 @@ void dma_direct_free(struct device *dev, size_t size,
|
|||||||
dma_free_from_pool(dev, cpu_addr, PAGE_ALIGN(size)))
|
dma_free_from_pool(dev, cpu_addr, PAGE_ALIGN(size)))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (force_dma_unencrypted(dev))
|
dma_set_encrypted(dev, cpu_addr, 1 << page_order);
|
||||||
set_memory_encrypted((unsigned long)cpu_addr, 1 << page_order);
|
|
||||||
|
|
||||||
if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr))
|
if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr))
|
||||||
vunmap(cpu_addr);
|
vunmap(cpu_addr);
|
||||||
@ -341,11 +340,8 @@ struct page *dma_direct_alloc_pages(struct device *dev, size_t size,
|
|||||||
}
|
}
|
||||||
|
|
||||||
ret = page_address(page);
|
ret = page_address(page);
|
||||||
if (force_dma_unencrypted(dev)) {
|
if (dma_set_decrypted(dev, ret, size))
|
||||||
if (set_memory_decrypted((unsigned long)ret,
|
|
||||||
1 << get_order(size)))
|
|
||||||
goto out_free_pages;
|
goto out_free_pages;
|
||||||
}
|
|
||||||
memset(ret, 0, size);
|
memset(ret, 0, size);
|
||||||
*dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
|
*dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
|
||||||
return page;
|
return page;
|
||||||
@ -366,9 +362,7 @@ void dma_direct_free_pages(struct device *dev, size_t size,
|
|||||||
dma_free_from_pool(dev, vaddr, size))
|
dma_free_from_pool(dev, vaddr, size))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (force_dma_unencrypted(dev))
|
dma_set_encrypted(dev, vaddr, 1 << page_order);
|
||||||
set_memory_encrypted((unsigned long)vaddr, 1 << page_order);
|
|
||||||
|
|
||||||
__dma_direct_free_pages(dev, page, size);
|
__dma_direct_free_pages(dev, page, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user