drm/msm: Add a helper function for in-kernel buffer allocations

Nearly all of the buffer allocations for kernel allocate an buffer object,
virtual address and GPU iova at the same time. Make a helper function to
handle the details.

Signed-off-by: Jordan Crouse <jcrouse@codeaurora.org>
[dropped msm_fbdev conversion to new helper, since it interferes with
display-handover work, where we want to separate allocation and mapping]
Signed-off-by: Rob Clark <robdclark@gmail.com>
This commit is contained in:
Jordan Crouse 2017-07-27 10:42:40 -06:00 committed by Rob Clark
parent 1267a4dfe0
commit 8223286d62
6 changed files with 72 additions and 54 deletions

View File

@ -284,28 +284,14 @@ static int a5xx_me_init(struct msm_gpu *gpu)
static struct drm_gem_object *a5xx_ucode_load_bo(struct msm_gpu *gpu,
const struct firmware *fw, u64 *iova)
{
struct drm_device *drm = gpu->dev;
struct drm_gem_object *bo;
void *ptr;
bo = msm_gem_new_locked(drm, fw->size - 4, MSM_BO_UNCACHED);
if (IS_ERR(bo))
return bo;
ptr = msm_gem_kernel_new_locked(gpu->dev, fw->size - 4,
MSM_BO_UNCACHED | MSM_BO_GPU_READONLY, gpu->aspace, &bo, iova);
ptr = msm_gem_get_vaddr(bo);
if (!ptr) {
drm_gem_object_unreference(bo);
return ERR_PTR(-ENOMEM);
}
if (iova) {
int ret = msm_gem_get_iova(bo, gpu->aspace, iova);
if (ret) {
drm_gem_object_unreference(bo);
return ERR_PTR(ret);
}
}
if (IS_ERR(ptr))
return ERR_CAST(ptr);
memcpy(ptr, &fw->data[4], fw->size - 4);

View File

@ -294,16 +294,10 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu)
*/
bosize = (cmds_size + (cmds_size / TYPE4_MAX_PAYLOAD) + 1) << 2;
a5xx_gpu->gpmu_bo = msm_gem_new_locked(drm, bosize, MSM_BO_UNCACHED);
if (IS_ERR(a5xx_gpu->gpmu_bo))
goto err;
if (msm_gem_get_iova(a5xx_gpu->gpmu_bo, gpu->aspace,
&a5xx_gpu->gpmu_iova))
goto err;
ptr = msm_gem_get_vaddr(a5xx_gpu->gpmu_bo);
if (!ptr)
ptr = msm_gem_kernel_new_locked(drm, bosize,
MSM_BO_UNCACHED | MSM_BO_GPU_READONLY, gpu->aspace,
&a5xx_gpu->gpmu_bo, &a5xx_gpu->gpmu_iova);
if (IS_ERR(ptr))
goto err;
while (cmds_size > 0) {

View File

@ -391,31 +391,19 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
return ret;
}
adreno_gpu->memptrs_bo = msm_gem_new(drm, sizeof(*adreno_gpu->memptrs),
MSM_BO_UNCACHED);
if (IS_ERR(adreno_gpu->memptrs_bo)) {
ret = PTR_ERR(adreno_gpu->memptrs_bo);
adreno_gpu->memptrs_bo = NULL;
dev_err(drm->dev, "could not allocate memptrs: %d\n", ret);
return ret;
}
adreno_gpu->memptrs = msm_gem_kernel_new(drm,
sizeof(*adreno_gpu->memptrs), MSM_BO_UNCACHED, gpu->aspace,
&adreno_gpu->memptrs_bo, &adreno_gpu->memptrs_iova);
adreno_gpu->memptrs = msm_gem_get_vaddr(adreno_gpu->memptrs_bo);
if (IS_ERR(adreno_gpu->memptrs)) {
dev_err(drm->dev, "could not vmap memptrs\n");
return -ENOMEM;
ret = PTR_ERR(adreno_gpu->memptrs);
adreno_gpu->memptrs = NULL;
dev_err(drm->dev, "could not allocate memptrs: %d\n", ret);
}
ret = msm_gem_get_iova(adreno_gpu->memptrs_bo, gpu->aspace,
&adreno_gpu->memptrs_iova);
if (ret) {
dev_err(drm->dev, "could not map memptrs: %d\n", ret);
return ret;
}
return 0;
}
void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu)
{
struct msm_gpu *gpu = &adreno_gpu->base;

View File

@ -237,6 +237,12 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev,
uint32_t size, uint32_t flags);
struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev,
uint32_t size, uint32_t flags);
void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
uint32_t flags, struct msm_gem_address_space *aspace,
struct drm_gem_object **bo, uint64_t *iova);
void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size,
uint32_t flags, struct msm_gem_address_space *aspace,
struct drm_gem_object **bo, uint64_t *iova);
struct drm_gem_object *msm_gem_import(struct drm_device *dev,
struct dma_buf *dmabuf, struct sg_table *sgt);

View File

@ -1024,3 +1024,49 @@ fail:
drm_gem_object_unreference_unlocked(obj);
return ERR_PTR(ret);
}
static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
uint32_t flags, struct msm_gem_address_space *aspace,
struct drm_gem_object **bo, uint64_t *iova, bool locked)
{
void *vaddr;
struct drm_gem_object *obj = _msm_gem_new(dev, size, flags, locked);
int ret;
if (IS_ERR(obj))
return ERR_CAST(obj);
if (iova) {
ret = msm_gem_get_iova(obj, aspace, iova);
if (ret) {
drm_gem_object_unreference(obj);
return ERR_PTR(ret);
}
}
vaddr = msm_gem_get_vaddr(obj);
if (!vaddr) {
msm_gem_put_iova(obj, aspace);
drm_gem_object_unreference(obj);
return ERR_PTR(-ENOMEM);
}
if (bo)
*bo = obj;
return vaddr;
}
void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
uint32_t flags, struct msm_gem_address_space *aspace,
struct drm_gem_object **bo, uint64_t *iova)
{
return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, false);
}
void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size,
uint32_t flags, struct msm_gem_address_space *aspace,
struct drm_gem_object **bo, uint64_t *iova)
{
return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, true);
}

View File

@ -33,16 +33,14 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size)
}
ring->gpu = gpu;
ring->bo = msm_gem_new(gpu->dev, size, MSM_BO_WC);
if (IS_ERR(ring->bo)) {
ret = PTR_ERR(ring->bo);
ring->bo = NULL;
goto fail;
}
ring->start = msm_gem_get_vaddr(ring->bo);
/* Pass NULL for the iova pointer - we will map it later */
ring->start = msm_gem_kernel_new(gpu->dev, size, MSM_BO_WC,
gpu->aspace, &ring->bo, NULL);
if (IS_ERR(ring->start)) {
ret = PTR_ERR(ring->start);
ring->start = 0;
goto fail;
}
ring->end = ring->start + (size / 4);