drm/vkms: Add functions to map/unmap GEM backing storage

This patch add the necessary functions to map/unmap GEM
backing memory to the kernel's virtual address space.

Signed-off-by: Haneen Mohammed <hamohammed.sa@gmail.com>
Signed-off-by: Sean Paul <seanpaul@chromium.org>
Link: https://patchwork.freedesktop.org/patch/msgid/4b6563ae4f4337a5fd51f872424addf64e8d59a6.1532446182.git.hamohammed.sa@gmail.com
This commit is contained in:
Haneen Mohammed 2018-07-24 19:26:59 +03:00 committed by Sean Paul
parent 344d00795e
commit bb112b14af
2 changed files with 87 additions and 1 deletions

View File

@ -39,6 +39,8 @@ struct vkms_gem_object {
struct drm_gem_object gem;
struct mutex pages_lock; /* Page lock used in page fault handler */
struct page **pages;
unsigned int vmap_count;
void *vaddr;
};
#define drm_crtc_to_vkms_output(target) \
@ -47,6 +49,9 @@ struct vkms_gem_object {
#define drm_device_to_vkms_device(target) \
container_of(target, struct vkms_device, drm)
#define drm_gem_to_vkms_gem(target)\
container_of(target, struct vkms_gem_object, gem)
/* CRTC */
int vkms_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
struct drm_plane *primary, struct drm_plane *cursor);
@ -75,4 +80,8 @@ int vkms_dumb_map(struct drm_file *file, struct drm_device *dev,
void vkms_gem_free_object(struct drm_gem_object *obj);
int vkms_gem_vmap(struct drm_gem_object *obj);
void vkms_gem_vunmap(struct drm_gem_object *obj);
#endif /* _VKMS_DRV_H_ */

View File

@ -37,7 +37,9 @@ void vkms_gem_free_object(struct drm_gem_object *obj)
struct vkms_gem_object *gem = container_of(obj, struct vkms_gem_object,
gem);
kvfree(gem->pages);
WARN_ON(gem->pages);
WARN_ON(gem->vaddr);
mutex_destroy(&gem->pages_lock);
drm_gem_object_release(obj);
kfree(gem);
@ -177,3 +179,78 @@ unref:
return ret;
}
static struct page **_get_pages(struct vkms_gem_object *vkms_obj)
{
struct drm_gem_object *gem_obj = &vkms_obj->gem;
if (!vkms_obj->pages) {
struct page **pages = drm_gem_get_pages(gem_obj);
if (IS_ERR(pages))
return pages;
if (cmpxchg(&vkms_obj->pages, NULL, pages))
drm_gem_put_pages(gem_obj, pages, false, true);
}
return vkms_obj->pages;
}
void vkms_gem_vunmap(struct drm_gem_object *obj)
{
struct vkms_gem_object *vkms_obj = drm_gem_to_vkms_gem(obj);
mutex_lock(&vkms_obj->pages_lock);
if (vkms_obj->vmap_count < 1) {
WARN_ON(vkms_obj->vaddr);
WARN_ON(vkms_obj->pages);
mutex_unlock(&vkms_obj->pages_lock);
return;
}
vkms_obj->vmap_count--;
if (vkms_obj->vmap_count == 0) {
vunmap(vkms_obj->vaddr);
vkms_obj->vaddr = NULL;
drm_gem_put_pages(obj, vkms_obj->pages, false, true);
vkms_obj->pages = NULL;
}
mutex_unlock(&vkms_obj->pages_lock);
}
int vkms_gem_vmap(struct drm_gem_object *obj)
{
struct vkms_gem_object *vkms_obj = drm_gem_to_vkms_gem(obj);
int ret = 0;
mutex_lock(&vkms_obj->pages_lock);
if (!vkms_obj->vaddr) {
unsigned int n_pages = obj->size >> PAGE_SHIFT;
struct page **pages = _get_pages(vkms_obj);
if (IS_ERR(pages)) {
ret = PTR_ERR(pages);
goto out;
}
vkms_obj->vaddr = vmap(pages, n_pages, VM_MAP, PAGE_KERNEL);
if (!vkms_obj->vaddr)
goto err_vmap;
vkms_obj->vmap_count++;
}
goto out;
err_vmap:
ret = -ENOMEM;
drm_gem_put_pages(obj, vkms_obj->pages, false, true);
vkms_obj->pages = NULL;
out:
mutex_unlock(&vkms_obj->pages_lock);
return ret;
}