forked from Minki/linux
tee: replace tee_shm_register()
tee_shm_register() is replaced by the previously introduced functions tee_shm_register_user_buf() and tee_shm_register_kernel_buf(). Since there are not external callers left we can remove tee_shm_register() and refactor the remains. Reviewed-by: Sumit Garg <sumit.garg@linaro.org> Signed-off-by: Jens Wiklander <jens.wiklander@linaro.org>
This commit is contained in:
parent
231b1fc5da
commit
53e16519c2
@ -12,17 +12,43 @@
|
||||
#include <linux/uio.h>
|
||||
#include "tee_private.h"
|
||||
|
||||
static void shm_put_kernel_pages(struct page **pages, size_t page_count)
|
||||
{
|
||||
size_t n;
|
||||
|
||||
for (n = 0; n < page_count; n++)
|
||||
put_page(pages[n]);
|
||||
}
|
||||
|
||||
static int shm_get_kernel_pages(unsigned long start, size_t page_count,
|
||||
struct page **pages)
|
||||
{
|
||||
struct kvec *kiov;
|
||||
size_t n;
|
||||
int rc;
|
||||
|
||||
kiov = kcalloc(page_count, sizeof(*kiov), GFP_KERNEL);
|
||||
if (!kiov)
|
||||
return -ENOMEM;
|
||||
|
||||
for (n = 0; n < page_count; n++) {
|
||||
kiov[n].iov_base = (void *)(start + n * PAGE_SIZE);
|
||||
kiov[n].iov_len = PAGE_SIZE;
|
||||
}
|
||||
|
||||
rc = get_kernel_pages(kiov, page_count, 0, pages);
|
||||
kfree(kiov);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void release_registered_pages(struct tee_shm *shm)
|
||||
{
|
||||
if (shm->pages) {
|
||||
if (shm->flags & TEE_SHM_USER_MAPPED) {
|
||||
if (shm->flags & TEE_SHM_USER_MAPPED)
|
||||
unpin_user_pages(shm->pages, shm->num_pages);
|
||||
} else {
|
||||
size_t n;
|
||||
|
||||
for (n = 0; n < shm->num_pages; n++)
|
||||
put_page(shm->pages[n]);
|
||||
}
|
||||
else
|
||||
shm_put_kernel_pages(shm->pages, shm->num_pages);
|
||||
|
||||
kfree(shm->pages);
|
||||
}
|
||||
@ -189,28 +215,24 @@ struct tee_shm *tee_shm_alloc_priv_buf(struct tee_context *ctx, size_t size)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(tee_shm_alloc_priv_buf);
|
||||
|
||||
struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr,
|
||||
size_t length, u32 flags)
|
||||
static struct tee_shm *
|
||||
register_shm_helper(struct tee_context *ctx, unsigned long addr,
|
||||
size_t length, u32 flags, int id)
|
||||
{
|
||||
struct tee_device *teedev = ctx->teedev;
|
||||
const u32 req_user_flags = TEE_SHM_DMA_BUF | TEE_SHM_USER_MAPPED;
|
||||
const u32 req_kernel_flags = TEE_SHM_DMA_BUF | TEE_SHM_KERNEL_MAPPED;
|
||||
struct tee_shm *shm;
|
||||
unsigned long start;
|
||||
size_t num_pages;
|
||||
void *ret;
|
||||
int rc;
|
||||
int num_pages;
|
||||
unsigned long start;
|
||||
|
||||
if (flags != req_user_flags && flags != req_kernel_flags)
|
||||
return ERR_PTR(-ENOTSUPP);
|
||||
|
||||
if (!tee_device_get(teedev))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
if (!teedev->desc->ops->shm_register ||
|
||||
!teedev->desc->ops->shm_unregister) {
|
||||
tee_device_put(teedev);
|
||||
return ERR_PTR(-ENOTSUPP);
|
||||
ret = ERR_PTR(-ENOTSUPP);
|
||||
goto err_dev_put;
|
||||
}
|
||||
|
||||
teedev_ctx_get(ctx);
|
||||
@ -218,13 +240,13 @@ struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr,
|
||||
shm = kzalloc(sizeof(*shm), GFP_KERNEL);
|
||||
if (!shm) {
|
||||
ret = ERR_PTR(-ENOMEM);
|
||||
goto err;
|
||||
goto err_ctx_put;
|
||||
}
|
||||
|
||||
refcount_set(&shm->refcount, 1);
|
||||
shm->flags = flags | TEE_SHM_REGISTER;
|
||||
shm->flags = flags;
|
||||
shm->ctx = ctx;
|
||||
shm->id = -1;
|
||||
shm->id = id;
|
||||
addr = untagged_addr(addr);
|
||||
start = rounddown(addr, PAGE_SIZE);
|
||||
shm->offset = addr - start;
|
||||
@ -233,71 +255,45 @@ struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr,
|
||||
shm->pages = kcalloc(num_pages, sizeof(*shm->pages), GFP_KERNEL);
|
||||
if (!shm->pages) {
|
||||
ret = ERR_PTR(-ENOMEM);
|
||||
goto err;
|
||||
goto err_free_shm;
|
||||
}
|
||||
|
||||
if (flags & TEE_SHM_USER_MAPPED) {
|
||||
if (flags & TEE_SHM_USER_MAPPED)
|
||||
rc = pin_user_pages_fast(start, num_pages, FOLL_WRITE,
|
||||
shm->pages);
|
||||
} else {
|
||||
struct kvec *kiov;
|
||||
int i;
|
||||
|
||||
kiov = kcalloc(num_pages, sizeof(*kiov), GFP_KERNEL);
|
||||
if (!kiov) {
|
||||
ret = ERR_PTR(-ENOMEM);
|
||||
goto err;
|
||||
}
|
||||
|
||||
for (i = 0; i < num_pages; i++) {
|
||||
kiov[i].iov_base = (void *)(start + i * PAGE_SIZE);
|
||||
kiov[i].iov_len = PAGE_SIZE;
|
||||
}
|
||||
|
||||
rc = get_kernel_pages(kiov, num_pages, 0, shm->pages);
|
||||
kfree(kiov);
|
||||
}
|
||||
else
|
||||
rc = shm_get_kernel_pages(start, num_pages, shm->pages);
|
||||
if (rc > 0)
|
||||
shm->num_pages = rc;
|
||||
if (rc != num_pages) {
|
||||
if (rc >= 0)
|
||||
rc = -ENOMEM;
|
||||
ret = ERR_PTR(rc);
|
||||
goto err;
|
||||
}
|
||||
|
||||
mutex_lock(&teedev->mutex);
|
||||
shm->id = idr_alloc(&teedev->idr, shm, 1, 0, GFP_KERNEL);
|
||||
mutex_unlock(&teedev->mutex);
|
||||
|
||||
if (shm->id < 0) {
|
||||
ret = ERR_PTR(shm->id);
|
||||
goto err;
|
||||
goto err_put_shm_pages;
|
||||
}
|
||||
|
||||
rc = teedev->desc->ops->shm_register(ctx, shm, shm->pages,
|
||||
shm->num_pages, start);
|
||||
if (rc) {
|
||||
ret = ERR_PTR(rc);
|
||||
goto err;
|
||||
goto err_put_shm_pages;
|
||||
}
|
||||
|
||||
return shm;
|
||||
err:
|
||||
if (shm) {
|
||||
if (shm->id >= 0) {
|
||||
mutex_lock(&teedev->mutex);
|
||||
idr_remove(&teedev->idr, shm->id);
|
||||
mutex_unlock(&teedev->mutex);
|
||||
}
|
||||
release_registered_pages(shm);
|
||||
}
|
||||
err_put_shm_pages:
|
||||
if (flags & TEE_SHM_USER_MAPPED)
|
||||
unpin_user_pages(shm->pages, shm->num_pages);
|
||||
else
|
||||
shm_put_kernel_pages(shm->pages, shm->num_pages);
|
||||
kfree(shm->pages);
|
||||
err_free_shm:
|
||||
kfree(shm);
|
||||
err_ctx_put:
|
||||
teedev_ctx_put(ctx);
|
||||
err_dev_put:
|
||||
tee_device_put(teedev);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(tee_shm_register);
|
||||
|
||||
/**
|
||||
* tee_shm_register_user_buf() - Register a userspace shared memory buffer
|
||||
@ -310,8 +306,35 @@ EXPORT_SYMBOL_GPL(tee_shm_register);
|
||||
struct tee_shm *tee_shm_register_user_buf(struct tee_context *ctx,
|
||||
unsigned long addr, size_t length)
|
||||
{
|
||||
return tee_shm_register(ctx, addr, length,
|
||||
TEE_SHM_DMA_BUF | TEE_SHM_USER_MAPPED);
|
||||
u32 flags = TEE_SHM_DMA_BUF | TEE_SHM_USER_MAPPED | TEE_SHM_REGISTER;
|
||||
struct tee_device *teedev = ctx->teedev;
|
||||
struct tee_shm *shm;
|
||||
void *ret;
|
||||
int id;
|
||||
|
||||
mutex_lock(&teedev->mutex);
|
||||
id = idr_alloc(&teedev->idr, NULL, 1, 0, GFP_KERNEL);
|
||||
mutex_unlock(&teedev->mutex);
|
||||
if (id < 0)
|
||||
return ERR_PTR(id);
|
||||
|
||||
shm = register_shm_helper(ctx, addr, length, flags, id);
|
||||
if (IS_ERR(shm)) {
|
||||
mutex_lock(&teedev->mutex);
|
||||
idr_remove(&teedev->idr, id);
|
||||
mutex_unlock(&teedev->mutex);
|
||||
return shm;
|
||||
}
|
||||
|
||||
mutex_lock(&teedev->mutex);
|
||||
ret = idr_replace(&teedev->idr, shm, id);
|
||||
mutex_unlock(&teedev->mutex);
|
||||
if (IS_ERR(ret)) {
|
||||
tee_shm_free(shm);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return shm;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -327,8 +350,9 @@ struct tee_shm *tee_shm_register_user_buf(struct tee_context *ctx,
|
||||
struct tee_shm *tee_shm_register_kernel_buf(struct tee_context *ctx,
|
||||
void *addr, size_t length)
|
||||
{
|
||||
return tee_shm_register(ctx, (unsigned long)addr, length,
|
||||
TEE_SHM_DMA_BUF | TEE_SHM_KERNEL_MAPPED);
|
||||
u32 flags = TEE_SHM_REGISTER | TEE_SHM_KERNEL_MAPPED;
|
||||
|
||||
return register_shm_helper(ctx, (unsigned long)addr, length, flags, -1);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(tee_shm_register_kernel_buf);
|
||||
|
||||
|
@ -276,17 +276,6 @@ void *tee_get_drvdata(struct tee_device *teedev);
|
||||
struct tee_shm *tee_shm_alloc_priv_buf(struct tee_context *ctx, size_t size);
|
||||
struct tee_shm *tee_shm_alloc_kernel_buf(struct tee_context *ctx, size_t size);
|
||||
|
||||
/**
|
||||
* tee_shm_register() - Register shared memory buffer
|
||||
* @ctx: Context that registers the shared memory
|
||||
* @addr: Address is userspace of the shared buffer
|
||||
* @length: Length of the shared buffer
|
||||
* @flags: Flags setting properties for the requested shared memory.
|
||||
*
|
||||
* @returns a pointer to 'struct tee_shm'
|
||||
*/
|
||||
struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr,
|
||||
size_t length, u32 flags);
|
||||
struct tee_shm *tee_shm_register_kernel_buf(struct tee_context *ctx,
|
||||
void *addr, size_t length);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user