forked from Minki/linux
drm/exynos: merge exynos_drm_buf.c to exynos_drm_gem.c
The struct exynos_drm_gem_obj can have fields of the struct exynos_drm_gem_buf then don't need to use exynos_drm_buf.c file. Signed-off-by: Joonyoung Shim <jy0922.shim@samsung.com> Signed-off-by: Inki Dae <inki.dae@samsung.com>
This commit is contained in:
parent
01ed50ddbd
commit
2a8cb48945
@ -4,8 +4,8 @@
|
||||
|
||||
ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/exynos
|
||||
exynosdrm-y := exynos_drm_drv.o exynos_drm_crtc.o exynos_drm_fbdev.o \
|
||||
exynos_drm_fb.o exynos_drm_buf.o exynos_drm_gem.o \
|
||||
exynos_drm_core.o exynos_drm_plane.o
|
||||
exynos_drm_fb.o exynos_drm_gem.o exynos_drm_core.o \
|
||||
exynos_drm_plane.o
|
||||
|
||||
exynosdrm-$(CONFIG_DRM_EXYNOS_IOMMU) += exynos_drm_iommu.o
|
||||
exynosdrm-$(CONFIG_DRM_EXYNOS_FIMD) += exynos_drm_fimd.o
|
||||
|
@ -1,170 +0,0 @@
|
||||
/* exynos_drm_buf.c
|
||||
*
|
||||
* Copyright (c) 2011 Samsung Electronics Co., Ltd.
|
||||
* Author: Inki Dae <inki.dae@samsung.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the
|
||||
* Free Software Foundation; either version 2 of the License, or (at your
|
||||
* option) any later version.
|
||||
*/
|
||||
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/exynos_drm.h>
|
||||
|
||||
#include "exynos_drm_drv.h"
|
||||
#include "exynos_drm_gem.h"
|
||||
#include "exynos_drm_buf.h"
|
||||
#include "exynos_drm_iommu.h"
|
||||
|
||||
static int lowlevel_buffer_allocate(struct drm_device *dev,
|
||||
unsigned int flags, struct exynos_drm_gem_buf *buf)
|
||||
{
|
||||
int ret = 0;
|
||||
enum dma_attr attr;
|
||||
unsigned int nr_pages;
|
||||
|
||||
if (buf->dma_addr) {
|
||||
DRM_DEBUG_KMS("already allocated.\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
init_dma_attrs(&buf->dma_attrs);
|
||||
|
||||
/*
|
||||
* if EXYNOS_BO_CONTIG, fully physically contiguous memory
|
||||
* region will be allocated else physically contiguous
|
||||
* as possible.
|
||||
*/
|
||||
if (!(flags & EXYNOS_BO_NONCONTIG))
|
||||
dma_set_attr(DMA_ATTR_FORCE_CONTIGUOUS, &buf->dma_attrs);
|
||||
|
||||
/*
|
||||
* if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping
|
||||
* else cachable mapping.
|
||||
*/
|
||||
if (flags & EXYNOS_BO_WC || !(flags & EXYNOS_BO_CACHABLE))
|
||||
attr = DMA_ATTR_WRITE_COMBINE;
|
||||
else
|
||||
attr = DMA_ATTR_NON_CONSISTENT;
|
||||
|
||||
dma_set_attr(attr, &buf->dma_attrs);
|
||||
dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &buf->dma_attrs);
|
||||
|
||||
nr_pages = buf->size >> PAGE_SHIFT;
|
||||
|
||||
if (!is_drm_iommu_supported(dev)) {
|
||||
dma_addr_t start_addr;
|
||||
unsigned int i = 0;
|
||||
|
||||
buf->pages = drm_calloc_large(nr_pages, sizeof(struct page *));
|
||||
if (!buf->pages) {
|
||||
DRM_ERROR("failed to allocate pages.\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
buf->cookie = dma_alloc_attrs(dev->dev,
|
||||
buf->size,
|
||||
&buf->dma_addr, GFP_KERNEL,
|
||||
&buf->dma_attrs);
|
||||
if (!buf->cookie) {
|
||||
DRM_ERROR("failed to allocate buffer.\n");
|
||||
ret = -ENOMEM;
|
||||
goto err_free;
|
||||
}
|
||||
|
||||
start_addr = buf->dma_addr;
|
||||
while (i < nr_pages) {
|
||||
buf->pages[i] = phys_to_page(start_addr);
|
||||
start_addr += PAGE_SIZE;
|
||||
i++;
|
||||
}
|
||||
} else {
|
||||
|
||||
buf->pages = dma_alloc_attrs(dev->dev, buf->size,
|
||||
&buf->dma_addr, GFP_KERNEL,
|
||||
&buf->dma_attrs);
|
||||
if (!buf->pages) {
|
||||
DRM_ERROR("failed to allocate buffer.\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
|
||||
DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
|
||||
(unsigned long)buf->dma_addr,
|
||||
buf->size);
|
||||
|
||||
return ret;
|
||||
|
||||
err_free:
|
||||
if (!is_drm_iommu_supported(dev))
|
||||
drm_free_large(buf->pages);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void lowlevel_buffer_deallocate(struct drm_device *dev,
|
||||
unsigned int flags, struct exynos_drm_gem_buf *buf)
|
||||
{
|
||||
if (!buf->dma_addr) {
|
||||
DRM_DEBUG_KMS("dma_addr is invalid.\n");
|
||||
return;
|
||||
}
|
||||
|
||||
DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
|
||||
(unsigned long)buf->dma_addr,
|
||||
buf->size);
|
||||
|
||||
if (!is_drm_iommu_supported(dev)) {
|
||||
dma_free_attrs(dev->dev, buf->size, buf->cookie,
|
||||
(dma_addr_t)buf->dma_addr, &buf->dma_attrs);
|
||||
drm_free_large(buf->pages);
|
||||
} else
|
||||
dma_free_attrs(dev->dev, buf->size, buf->pages,
|
||||
(dma_addr_t)buf->dma_addr, &buf->dma_attrs);
|
||||
|
||||
buf->dma_addr = (dma_addr_t)NULL;
|
||||
}
|
||||
|
||||
struct exynos_drm_gem_buf *exynos_drm_init_buf(struct drm_device *dev,
|
||||
unsigned int size)
|
||||
{
|
||||
struct exynos_drm_gem_buf *buffer;
|
||||
|
||||
DRM_DEBUG_KMS("desired size = 0x%x\n", size);
|
||||
|
||||
buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
|
||||
if (!buffer)
|
||||
return NULL;
|
||||
|
||||
buffer->size = size;
|
||||
return buffer;
|
||||
}
|
||||
|
||||
void exynos_drm_fini_buf(struct drm_device *dev,
|
||||
struct exynos_drm_gem_buf *buffer)
|
||||
{
|
||||
kfree(buffer);
|
||||
buffer = NULL;
|
||||
}
|
||||
|
||||
int exynos_drm_alloc_buf(struct drm_device *dev,
|
||||
struct exynos_drm_gem_buf *buf, unsigned int flags)
|
||||
{
|
||||
|
||||
/*
|
||||
* allocate memory region and set the memory information
|
||||
* to vaddr and dma_addr of a buffer object.
|
||||
*/
|
||||
if (lowlevel_buffer_allocate(dev, flags, buf) < 0)
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void exynos_drm_free_buf(struct drm_device *dev,
|
||||
unsigned int flags, struct exynos_drm_gem_buf *buffer)
|
||||
{
|
||||
|
||||
lowlevel_buffer_deallocate(dev, flags, buffer);
|
||||
}
|
@ -1,33 +0,0 @@
|
||||
/* exynos_drm_buf.h
|
||||
*
|
||||
* Copyright (c) 2011 Samsung Electronics Co., Ltd.
|
||||
* Author: Inki Dae <inki.dae@samsung.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the
|
||||
* Free Software Foundation; either version 2 of the License, or (at your
|
||||
* option) any later version.
|
||||
*/
|
||||
|
||||
#ifndef _EXYNOS_DRM_BUF_H_
|
||||
#define _EXYNOS_DRM_BUF_H_
|
||||
|
||||
/* create and initialize buffer object. */
|
||||
struct exynos_drm_gem_buf *exynos_drm_init_buf(struct drm_device *dev,
|
||||
unsigned int size);
|
||||
|
||||
/* destroy buffer object. */
|
||||
void exynos_drm_fini_buf(struct drm_device *dev,
|
||||
struct exynos_drm_gem_buf *buffer);
|
||||
|
||||
/* allocate physical memory region and setup sgt. */
|
||||
int exynos_drm_alloc_buf(struct drm_device *dev,
|
||||
struct exynos_drm_gem_buf *buf,
|
||||
unsigned int flags);
|
||||
|
||||
/* release physical memory region, and sgt. */
|
||||
void exynos_drm_free_buf(struct drm_device *dev,
|
||||
unsigned int flags,
|
||||
struct exynos_drm_gem_buf *buffer);
|
||||
|
||||
#endif
|
@ -238,22 +238,22 @@ err_free:
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
struct exynos_drm_gem_buf *exynos_drm_fb_buffer(struct drm_framebuffer *fb,
|
||||
int index)
|
||||
struct exynos_drm_gem_obj *exynos_drm_fb_gem_obj(struct drm_framebuffer *fb,
|
||||
int index)
|
||||
{
|
||||
struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb);
|
||||
struct exynos_drm_gem_buf *buffer;
|
||||
struct exynos_drm_gem_obj *obj;
|
||||
|
||||
if (index >= MAX_FB_BUFFER)
|
||||
return NULL;
|
||||
|
||||
buffer = exynos_fb->exynos_gem_obj[index]->buffer;
|
||||
if (!buffer)
|
||||
obj = exynos_fb->exynos_gem_obj[index];
|
||||
if (!obj)
|
||||
return NULL;
|
||||
|
||||
DRM_DEBUG_KMS("dma_addr = 0x%lx\n", (unsigned long)buffer->dma_addr);
|
||||
DRM_DEBUG_KMS("dma_addr = 0x%lx\n", (unsigned long)obj->dma_addr);
|
||||
|
||||
return buffer;
|
||||
return obj;
|
||||
}
|
||||
|
||||
static void exynos_drm_output_poll_changed(struct drm_device *dev)
|
||||
|
@ -19,8 +19,8 @@ exynos_drm_framebuffer_init(struct drm_device *dev,
|
||||
struct drm_mode_fb_cmd2 *mode_cmd,
|
||||
struct drm_gem_object *obj);
|
||||
|
||||
/* get memory information of a drm framebuffer */
|
||||
struct exynos_drm_gem_buf *exynos_drm_fb_buffer(struct drm_framebuffer *fb,
|
||||
/* get gem object of a drm framebuffer */
|
||||
struct exynos_drm_gem_obj *exynos_drm_fb_gem_obj(struct drm_framebuffer *fb,
|
||||
int index);
|
||||
|
||||
void exynos_drm_mode_config_init(struct drm_device *dev);
|
||||
|
@ -40,8 +40,7 @@ static int exynos_drm_fb_mmap(struct fb_info *info,
|
||||
{
|
||||
struct drm_fb_helper *helper = info->par;
|
||||
struct exynos_drm_fbdev *exynos_fbd = to_exynos_fbdev(helper);
|
||||
struct exynos_drm_gem_obj *exynos_gem_obj = exynos_fbd->exynos_gem_obj;
|
||||
struct exynos_drm_gem_buf *buffer = exynos_gem_obj->buffer;
|
||||
struct exynos_drm_gem_obj *obj = exynos_fbd->exynos_gem_obj;
|
||||
unsigned long vm_size;
|
||||
int ret;
|
||||
|
||||
@ -49,11 +48,11 @@ static int exynos_drm_fb_mmap(struct fb_info *info,
|
||||
|
||||
vm_size = vma->vm_end - vma->vm_start;
|
||||
|
||||
if (vm_size > buffer->size)
|
||||
if (vm_size > obj->size)
|
||||
return -EINVAL;
|
||||
|
||||
ret = dma_mmap_attrs(helper->dev->dev, vma, buffer->pages,
|
||||
buffer->dma_addr, buffer->size, &buffer->dma_attrs);
|
||||
ret = dma_mmap_attrs(helper->dev->dev, vma, obj->pages, obj->dma_addr,
|
||||
obj->size, &obj->dma_attrs);
|
||||
if (ret < 0) {
|
||||
DRM_ERROR("failed to mmap.\n");
|
||||
return ret;
|
||||
@ -80,7 +79,7 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
|
||||
struct drm_framebuffer *fb)
|
||||
{
|
||||
struct fb_info *fbi = helper->fbdev;
|
||||
struct exynos_drm_gem_buf *buffer;
|
||||
struct exynos_drm_gem_obj *obj;
|
||||
unsigned int size = fb->width * fb->height * (fb->bits_per_pixel >> 3);
|
||||
unsigned int nr_pages;
|
||||
unsigned long offset;
|
||||
@ -89,18 +88,17 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
|
||||
drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height);
|
||||
|
||||
/* RGB formats use only one buffer */
|
||||
buffer = exynos_drm_fb_buffer(fb, 0);
|
||||
if (!buffer) {
|
||||
DRM_DEBUG_KMS("buffer is null.\n");
|
||||
obj = exynos_drm_fb_gem_obj(fb, 0);
|
||||
if (!obj) {
|
||||
DRM_DEBUG_KMS("gem object is null.\n");
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
nr_pages = buffer->size >> PAGE_SHIFT;
|
||||
nr_pages = obj->size >> PAGE_SHIFT;
|
||||
|
||||
buffer->kvaddr = (void __iomem *) vmap(buffer->pages,
|
||||
nr_pages, VM_MAP,
|
||||
obj->kvaddr = (void __iomem *) vmap(obj->pages, nr_pages, VM_MAP,
|
||||
pgprot_writecombine(PAGE_KERNEL));
|
||||
if (!buffer->kvaddr) {
|
||||
if (!obj->kvaddr) {
|
||||
DRM_ERROR("failed to map pages to kernel space.\n");
|
||||
return -EIO;
|
||||
}
|
||||
@ -111,7 +109,7 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
|
||||
offset = fbi->var.xoffset * (fb->bits_per_pixel >> 3);
|
||||
offset += fbi->var.yoffset * fb->pitches[0];
|
||||
|
||||
fbi->screen_base = buffer->kvaddr + offset;
|
||||
fbi->screen_base = obj->kvaddr + offset;
|
||||
fbi->screen_size = size;
|
||||
fbi->fix.smem_len = size;
|
||||
|
||||
@ -290,8 +288,8 @@ static void exynos_drm_fbdev_destroy(struct drm_device *dev,
|
||||
struct exynos_drm_gem_obj *exynos_gem_obj = exynos_fbd->exynos_gem_obj;
|
||||
struct drm_framebuffer *fb;
|
||||
|
||||
if (exynos_gem_obj->buffer->kvaddr)
|
||||
vunmap(exynos_gem_obj->buffer->kvaddr);
|
||||
if (exynos_gem_obj->kvaddr)
|
||||
vunmap(exynos_gem_obj->kvaddr);
|
||||
|
||||
/* release drm framebuffer and real buffer */
|
||||
if (fb_helper->fb && fb_helper->fb->funcs) {
|
||||
|
@ -18,9 +18,109 @@
|
||||
|
||||
#include "exynos_drm_drv.h"
|
||||
#include "exynos_drm_gem.h"
|
||||
#include "exynos_drm_buf.h"
|
||||
#include "exynos_drm_iommu.h"
|
||||
|
||||
static int exynos_drm_alloc_buf(struct exynos_drm_gem_obj *obj)
|
||||
{
|
||||
struct drm_device *dev = obj->base.dev;
|
||||
enum dma_attr attr;
|
||||
unsigned int nr_pages;
|
||||
|
||||
if (obj->dma_addr) {
|
||||
DRM_DEBUG_KMS("already allocated.\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
init_dma_attrs(&obj->dma_attrs);
|
||||
|
||||
/*
|
||||
* if EXYNOS_BO_CONTIG, fully physically contiguous memory
|
||||
* region will be allocated else physically contiguous
|
||||
* as possible.
|
||||
*/
|
||||
if (!(obj->flags & EXYNOS_BO_NONCONTIG))
|
||||
dma_set_attr(DMA_ATTR_FORCE_CONTIGUOUS, &obj->dma_attrs);
|
||||
|
||||
/*
|
||||
* if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping
|
||||
* else cachable mapping.
|
||||
*/
|
||||
if (obj->flags & EXYNOS_BO_WC || !(obj->flags & EXYNOS_BO_CACHABLE))
|
||||
attr = DMA_ATTR_WRITE_COMBINE;
|
||||
else
|
||||
attr = DMA_ATTR_NON_CONSISTENT;
|
||||
|
||||
dma_set_attr(attr, &obj->dma_attrs);
|
||||
dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &obj->dma_attrs);
|
||||
|
||||
nr_pages = obj->size >> PAGE_SHIFT;
|
||||
|
||||
if (!is_drm_iommu_supported(dev)) {
|
||||
dma_addr_t start_addr;
|
||||
unsigned int i = 0;
|
||||
|
||||
obj->pages = drm_calloc_large(nr_pages, sizeof(struct page *));
|
||||
if (!obj->pages) {
|
||||
DRM_ERROR("failed to allocate pages.\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
obj->cookie = dma_alloc_attrs(dev->dev,
|
||||
obj->size,
|
||||
&obj->dma_addr, GFP_KERNEL,
|
||||
&obj->dma_attrs);
|
||||
if (!obj->cookie) {
|
||||
DRM_ERROR("failed to allocate buffer.\n");
|
||||
drm_free_large(obj->pages);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
start_addr = obj->dma_addr;
|
||||
while (i < nr_pages) {
|
||||
obj->pages[i] = phys_to_page(start_addr);
|
||||
start_addr += PAGE_SIZE;
|
||||
i++;
|
||||
}
|
||||
} else {
|
||||
obj->pages = dma_alloc_attrs(dev->dev, obj->size,
|
||||
&obj->dma_addr, GFP_KERNEL,
|
||||
&obj->dma_attrs);
|
||||
if (!obj->pages) {
|
||||
DRM_ERROR("failed to allocate buffer.\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
|
||||
DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
|
||||
(unsigned long)obj->dma_addr,
|
||||
obj->size);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void exynos_drm_free_buf(struct exynos_drm_gem_obj *obj)
|
||||
{
|
||||
struct drm_device *dev = obj->base.dev;
|
||||
|
||||
if (!obj->dma_addr) {
|
||||
DRM_DEBUG_KMS("dma_addr is invalid.\n");
|
||||
return;
|
||||
}
|
||||
|
||||
DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
|
||||
(unsigned long)obj->dma_addr, obj->size);
|
||||
|
||||
if (!is_drm_iommu_supported(dev)) {
|
||||
dma_free_attrs(dev->dev, obj->size, obj->cookie,
|
||||
(dma_addr_t)obj->dma_addr, &obj->dma_attrs);
|
||||
drm_free_large(obj->pages);
|
||||
} else
|
||||
dma_free_attrs(dev->dev, obj->size, obj->pages,
|
||||
(dma_addr_t)obj->dma_addr, &obj->dma_attrs);
|
||||
|
||||
obj->dma_addr = (dma_addr_t)NULL;
|
||||
}
|
||||
|
||||
static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
|
||||
struct drm_file *file_priv,
|
||||
unsigned int *handle)
|
||||
@ -45,11 +145,7 @@ static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
|
||||
|
||||
void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
|
||||
{
|
||||
struct drm_gem_object *obj;
|
||||
struct exynos_drm_gem_buf *buf;
|
||||
|
||||
obj = &exynos_gem_obj->base;
|
||||
buf = exynos_gem_obj->buffer;
|
||||
struct drm_gem_object *obj = &exynos_gem_obj->base;
|
||||
|
||||
DRM_DEBUG_KMS("handle count = %d\n", obj->handle_count);
|
||||
|
||||
@ -62,12 +158,9 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
|
||||
if (obj->import_attach)
|
||||
goto out;
|
||||
|
||||
exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags, buf);
|
||||
exynos_drm_free_buf(exynos_gem_obj);
|
||||
|
||||
out:
|
||||
exynos_drm_fini_buf(obj->dev, buf);
|
||||
exynos_gem_obj->buffer = NULL;
|
||||
|
||||
drm_gem_free_mmap_offset(obj);
|
||||
|
||||
/* release file pointer to gem object. */
|
||||
@ -94,7 +187,7 @@ unsigned long exynos_drm_gem_get_size(struct drm_device *dev,
|
||||
|
||||
drm_gem_object_unreference_unlocked(obj);
|
||||
|
||||
return exynos_gem_obj->buffer->size;
|
||||
return exynos_gem_obj->size;
|
||||
}
|
||||
|
||||
|
||||
@ -129,7 +222,6 @@ struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
|
||||
unsigned long size)
|
||||
{
|
||||
struct exynos_drm_gem_obj *exynos_gem_obj;
|
||||
struct exynos_drm_gem_buf *buf;
|
||||
int ret;
|
||||
|
||||
if (flags & ~(EXYNOS_BO_MASK)) {
|
||||
@ -144,33 +236,21 @@ struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
|
||||
|
||||
size = roundup(size, PAGE_SIZE);
|
||||
|
||||
buf = exynos_drm_init_buf(dev, size);
|
||||
if (!buf)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
exynos_gem_obj = exynos_drm_gem_init(dev, size);
|
||||
if (IS_ERR(exynos_gem_obj)) {
|
||||
ret = PTR_ERR(exynos_gem_obj);
|
||||
goto err_fini_buf;
|
||||
}
|
||||
|
||||
exynos_gem_obj->buffer = buf;
|
||||
if (IS_ERR(exynos_gem_obj))
|
||||
return exynos_gem_obj;
|
||||
|
||||
/* set memory type and cache attribute from user side. */
|
||||
exynos_gem_obj->flags = flags;
|
||||
|
||||
ret = exynos_drm_alloc_buf(dev, buf, flags);
|
||||
if (ret < 0)
|
||||
goto err_gem_fini;
|
||||
ret = exynos_drm_alloc_buf(exynos_gem_obj);
|
||||
if (ret < 0) {
|
||||
drm_gem_object_release(&exynos_gem_obj->base);
|
||||
kfree(exynos_gem_obj);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
return exynos_gem_obj;
|
||||
|
||||
err_gem_fini:
|
||||
drm_gem_object_release(&exynos_gem_obj->base);
|
||||
kfree(exynos_gem_obj);
|
||||
err_fini_buf:
|
||||
exynos_drm_fini_buf(dev, buf);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
|
||||
@ -209,7 +289,7 @@ dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
|
||||
|
||||
exynos_gem_obj = to_exynos_gem_obj(obj);
|
||||
|
||||
return &exynos_gem_obj->buffer->dma_addr;
|
||||
return &exynos_gem_obj->dma_addr;
|
||||
}
|
||||
|
||||
void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
|
||||
@ -237,7 +317,6 @@ int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem_obj *exynos_gem_obj,
|
||||
struct vm_area_struct *vma)
|
||||
{
|
||||
struct drm_device *drm_dev = exynos_gem_obj->base.dev;
|
||||
struct exynos_drm_gem_buf *buffer;
|
||||
unsigned long vm_size;
|
||||
int ret;
|
||||
|
||||
@ -246,19 +325,13 @@ int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem_obj *exynos_gem_obj,
|
||||
|
||||
vm_size = vma->vm_end - vma->vm_start;
|
||||
|
||||
/*
|
||||
* a buffer contains information to physically continuous memory
|
||||
* allocated by user request or at framebuffer creation.
|
||||
*/
|
||||
buffer = exynos_gem_obj->buffer;
|
||||
|
||||
/* check if user-requested size is valid. */
|
||||
if (vm_size > buffer->size)
|
||||
if (vm_size > exynos_gem_obj->size)
|
||||
return -EINVAL;
|
||||
|
||||
ret = dma_mmap_attrs(drm_dev->dev, vma, buffer->pages,
|
||||
buffer->dma_addr, buffer->size,
|
||||
&buffer->dma_attrs);
|
||||
ret = dma_mmap_attrs(drm_dev->dev, vma, exynos_gem_obj->pages,
|
||||
exynos_gem_obj->dma_addr, exynos_gem_obj->size,
|
||||
&exynos_gem_obj->dma_attrs);
|
||||
if (ret < 0) {
|
||||
DRM_ERROR("failed to mmap.\n");
|
||||
return ret;
|
||||
@ -418,12 +491,6 @@ void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev,
|
||||
|
||||
void exynos_drm_gem_free_object(struct drm_gem_object *obj)
|
||||
{
|
||||
struct exynos_drm_gem_obj *exynos_gem_obj;
|
||||
struct exynos_drm_gem_buf *buf;
|
||||
|
||||
exynos_gem_obj = to_exynos_gem_obj(obj);
|
||||
buf = exynos_gem_obj->buffer;
|
||||
|
||||
exynos_drm_gem_destroy(to_exynos_gem_obj(obj));
|
||||
}
|
||||
|
||||
@ -508,7 +575,6 @@ int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
|
||||
{
|
||||
struct drm_gem_object *obj = vma->vm_private_data;
|
||||
struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
|
||||
struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
|
||||
unsigned long pfn;
|
||||
pgoff_t page_offset;
|
||||
int ret;
|
||||
@ -516,13 +582,13 @@ int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
|
||||
page_offset = ((unsigned long)vmf->virtual_address -
|
||||
vma->vm_start) >> PAGE_SHIFT;
|
||||
|
||||
if (page_offset >= (buf->size >> PAGE_SHIFT)) {
|
||||
if (page_offset >= (exynos_gem_obj->size >> PAGE_SHIFT)) {
|
||||
DRM_ERROR("invalid page offset\n");
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
pfn = page_to_pfn(buf->pages[page_offset]);
|
||||
pfn = page_to_pfn(exynos_gem_obj->pages[page_offset]);
|
||||
ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn);
|
||||
|
||||
out:
|
||||
@ -583,12 +649,11 @@ err_close_vm:
|
||||
struct sg_table *exynos_drm_gem_prime_get_sg_table(struct drm_gem_object *obj)
|
||||
{
|
||||
struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
|
||||
struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
|
||||
int npages;
|
||||
|
||||
npages = buf->size >> PAGE_SHIFT;
|
||||
npages = exynos_gem_obj->size >> PAGE_SHIFT;
|
||||
|
||||
return drm_prime_pages_to_sg(buf->pages, npages);
|
||||
return drm_prime_pages_to_sg(exynos_gem_obj->pages, npages);
|
||||
}
|
||||
|
||||
struct drm_gem_object *
|
||||
@ -597,34 +662,29 @@ exynos_drm_gem_prime_import_sg_table(struct drm_device *dev,
|
||||
struct sg_table *sgt)
|
||||
{
|
||||
struct exynos_drm_gem_obj *exynos_gem_obj;
|
||||
struct exynos_drm_gem_buf *buf;
|
||||
int npages;
|
||||
int ret;
|
||||
|
||||
buf = kzalloc(sizeof(*buf), GFP_KERNEL);
|
||||
if (!buf)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
buf->size = attach->dmabuf->size;
|
||||
buf->dma_addr = sg_dma_address(sgt->sgl);
|
||||
|
||||
npages = buf->size >> PAGE_SHIFT;
|
||||
buf->pages = drm_malloc_ab(npages, sizeof(struct page *));
|
||||
if (!buf->pages) {
|
||||
ret = -ENOMEM;
|
||||
goto err;
|
||||
}
|
||||
|
||||
ret = drm_prime_sg_to_page_addr_arrays(sgt, buf->pages, NULL, npages);
|
||||
if (ret < 0)
|
||||
goto err_free_large;
|
||||
|
||||
exynos_gem_obj = exynos_drm_gem_init(dev, buf->size);
|
||||
exynos_gem_obj = exynos_drm_gem_init(dev, attach->dmabuf->size);
|
||||
if (IS_ERR(exynos_gem_obj)) {
|
||||
ret = PTR_ERR(exynos_gem_obj);
|
||||
goto err;
|
||||
}
|
||||
|
||||
exynos_gem_obj->dma_addr = sg_dma_address(sgt->sgl);
|
||||
|
||||
npages = exynos_gem_obj->size >> PAGE_SHIFT;
|
||||
exynos_gem_obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
|
||||
if (!exynos_gem_obj->pages) {
|
||||
ret = -ENOMEM;
|
||||
goto err;
|
||||
}
|
||||
|
||||
ret = drm_prime_sg_to_page_addr_arrays(sgt, exynos_gem_obj->pages, NULL,
|
||||
npages);
|
||||
if (ret < 0)
|
||||
goto err_free_large;
|
||||
|
||||
if (sgt->nents == 1) {
|
||||
/* always physically continuous memory if sgt->nents is 1. */
|
||||
exynos_gem_obj->flags |= EXYNOS_BO_CONTIG;
|
||||
@ -641,9 +701,10 @@ exynos_drm_gem_prime_import_sg_table(struct drm_device *dev,
|
||||
return &exynos_gem_obj->base;
|
||||
|
||||
err_free_large:
|
||||
drm_free_large(buf->pages);
|
||||
drm_free_large(exynos_gem_obj->pages);
|
||||
err:
|
||||
kfree(buf);
|
||||
drm_gem_object_release(&exynos_gem_obj->base);
|
||||
kfree(exynos_gem_obj);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
|
@ -19,26 +19,6 @@
|
||||
|
||||
#define IS_NONCONTIG_BUFFER(f) (f & EXYNOS_BO_NONCONTIG)
|
||||
|
||||
/*
|
||||
* exynos drm gem buffer structure.
|
||||
*
|
||||
* @cookie: cookie returned by dma_alloc_attrs
|
||||
* @kvaddr: kernel virtual address to allocated memory region.
|
||||
* @dma_addr: bus address(accessed by dma) to allocated memory region.
|
||||
* - this address could be physical address without IOMMU and
|
||||
* device address with IOMMU.
|
||||
* @pages: Array of backing pages.
|
||||
* @size: size of allocated memory region.
|
||||
*/
|
||||
struct exynos_drm_gem_buf {
|
||||
void *cookie;
|
||||
void __iomem *kvaddr;
|
||||
dma_addr_t dma_addr;
|
||||
struct dma_attrs dma_attrs;
|
||||
struct page **pages;
|
||||
unsigned long size;
|
||||
};
|
||||
|
||||
/*
|
||||
* exynos drm buffer structure.
|
||||
*
|
||||
@ -50,18 +30,28 @@ struct exynos_drm_gem_buf {
|
||||
* by user request or at framebuffer creation.
|
||||
* continuous memory region allocated by user request
|
||||
* or at framebuffer creation.
|
||||
* @flags: indicate memory type to allocated buffer and cache attruibute.
|
||||
* @size: size requested from user, in bytes and this size is aligned
|
||||
* in page unit.
|
||||
* @flags: indicate memory type to allocated buffer and cache attruibute.
|
||||
* @cookie: cookie returned by dma_alloc_attrs
|
||||
* @kvaddr: kernel virtual address to allocated memory region.
|
||||
* @dma_addr: bus address(accessed by dma) to allocated memory region.
|
||||
* - this address could be physical address without IOMMU and
|
||||
* device address with IOMMU.
|
||||
* @pages: Array of backing pages.
|
||||
*
|
||||
* P.S. this object would be transferred to user as kms_bo.handle so
|
||||
* user can access the buffer through kms_bo.handle.
|
||||
*/
|
||||
struct exynos_drm_gem_obj {
|
||||
struct drm_gem_object base;
|
||||
struct exynos_drm_gem_buf *buffer;
|
||||
unsigned long size;
|
||||
unsigned int flags;
|
||||
struct drm_gem_object base;
|
||||
unsigned int flags;
|
||||
unsigned long size;
|
||||
void *cookie;
|
||||
void __iomem *kvaddr;
|
||||
dma_addr_t dma_addr;
|
||||
struct dma_attrs dma_attrs;
|
||||
struct page **pages;
|
||||
};
|
||||
|
||||
struct page **exynos_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
|
||||
|
@ -134,15 +134,15 @@ static int exynos_plane_atomic_check(struct drm_plane *plane,
|
||||
|
||||
nr = exynos_drm_fb_get_buf_cnt(state->fb);
|
||||
for (i = 0; i < nr; i++) {
|
||||
struct exynos_drm_gem_buf *buffer =
|
||||
exynos_drm_fb_buffer(state->fb, i);
|
||||
struct exynos_drm_gem_obj *obj =
|
||||
exynos_drm_fb_gem_obj(state->fb, i);
|
||||
|
||||
if (!buffer) {
|
||||
DRM_DEBUG_KMS("buffer is null\n");
|
||||
if (!obj) {
|
||||
DRM_DEBUG_KMS("gem object is null\n");
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
exynos_plane->dma_addr[i] = buffer->dma_addr +
|
||||
exynos_plane->dma_addr[i] = obj->dma_addr +
|
||||
state->fb->offsets[i];
|
||||
|
||||
DRM_DEBUG_KMS("buffer: %d, dma_addr = 0x%lx\n",
|
||||
|
Loading…
Reference in New Issue
Block a user