Merge branch 'etnaviv/next' of https://git.pengutronix.de/git/lst/linux into drm-next

Highlights this time:
1. Fix for a nasty Kconfig dependency chain issue from Philipp.
2. Occlusion query buffer address added to the cmdstream validator by
Christian.
3. Fixes and cleanups to the job handling from me. This allows us to
turn on the GPU performance profiling added in the last cycle.
It is also prep work for hooking in the DRM GPU scheduler, which I hope
to land for the next cycle.

* 'etnaviv/next' of https://git.pengutronix.de/git/lst/linux: (32 commits)
  drm/etnaviv: use memset32 to init pagetable
  drm/etnaviv: move submit free out of critical section
  drm/etnaviv: re-enable perfmon support
  drm/etnaviv: couple runtime PM management to submit object lifetime
  drm/etnaviv: move GPU active handling to bo pin/unpin
  drm/etnaviv: move cmdbuf into submit object
  drm/etnaviv: use submit exec_state for perfmon sampling
  drm/etnaviv: move exec_state to submit object
  drm/etnaviv: move PMRs to submit object
  drm/etnaviv: refcount the submit object
  drm/etnaviv: move ww_acquire_ctx out of submit object
  drm/etnaviv: move object unpinning to submit cleanup
  drm/etnaviv: attach in fence to submit and move fence wait to fence_sync
  drm/etnaviv: rename submit fence to out_fence
  drm/etnaviv: move object fence attachment to gem_submit path
  drm/etnaviv: simplify submit_create
  drm/etnaviv: add lockdep annotations to buffer manipulation functions
  drm/etnaviv: hold GPU lock while inserting END command
  drm/etnaviv: move workqueue to be per GPU
  drm/etnaviv: remove switch_context member from etnaviv_gpu
  ...
This commit is contained in:
Dave Airlie 2018-01-05 09:42:04 +10:00
commit 066f9eb47d
19 changed files with 324 additions and 512 deletions

View File

@ -6,6 +6,7 @@ config DRM_ETNAVIV
depends on MMU
select SHMEM
select SYNC_FILE
select THERMAL if DRM_ETNAVIV_THERMAL
select TMPFS
select WANT_DEV_COREDUMP
select CMA if HAVE_DMA_CONTIGUOUS
@ -13,6 +14,14 @@ config DRM_ETNAVIV
help
DRM driver for Vivante GPUs.
config DRM_ETNAVIV_THERMAL
bool "enable ETNAVIV thermal throttling"
depends on DRM_ETNAVIV
default y
help
Compile in support for thermal throttling.
Say Y unless you want to risk burning your SoC.
config DRM_ETNAVIV_REGISTER_LOGGING
bool "enable ETNAVIV register logging"
depends on DRM_ETNAVIV

View File

@ -100,6 +100,8 @@ static void etnaviv_cmd_select_pipe(struct etnaviv_gpu *gpu,
{
u32 flush = 0;
lockdep_assert_held(&gpu->lock);
/*
* This assumes that if we're switching to 2D, we're switching
* away from 3D, and vice versa. Hence, if we're switching to
@ -164,7 +166,9 @@ static u32 etnaviv_buffer_reserve(struct etnaviv_gpu *gpu,
u16 etnaviv_buffer_init(struct etnaviv_gpu *gpu)
{
struct etnaviv_cmdbuf *buffer = gpu->buffer;
struct etnaviv_cmdbuf *buffer = &gpu->buffer;
lockdep_assert_held(&gpu->lock);
/* initialize buffer */
buffer->user_size = 0;
@ -178,7 +182,9 @@ u16 etnaviv_buffer_init(struct etnaviv_gpu *gpu)
u16 etnaviv_buffer_config_mmuv2(struct etnaviv_gpu *gpu, u32 mtlb_addr, u32 safe_addr)
{
struct etnaviv_cmdbuf *buffer = gpu->buffer;
struct etnaviv_cmdbuf *buffer = &gpu->buffer;
lockdep_assert_held(&gpu->lock);
buffer->user_size = 0;
@ -211,10 +217,12 @@ u16 etnaviv_buffer_config_mmuv2(struct etnaviv_gpu *gpu, u32 mtlb_addr, u32 safe
void etnaviv_buffer_end(struct etnaviv_gpu *gpu)
{
struct etnaviv_cmdbuf *buffer = gpu->buffer;
struct etnaviv_cmdbuf *buffer = &gpu->buffer;
unsigned int waitlink_offset = buffer->user_size - 16;
u32 link_target, flush = 0;
lockdep_assert_held(&gpu->lock);
if (gpu->exec_state == ETNA_PIPE_2D)
flush = VIVS_GL_FLUSH_CACHE_PE2D;
else if (gpu->exec_state == ETNA_PIPE_3D)
@ -253,10 +261,12 @@ void etnaviv_buffer_end(struct etnaviv_gpu *gpu)
/* Append a 'sync point' to the ring buffer. */
void etnaviv_sync_point_queue(struct etnaviv_gpu *gpu, unsigned int event)
{
struct etnaviv_cmdbuf *buffer = gpu->buffer;
struct etnaviv_cmdbuf *buffer = &gpu->buffer;
unsigned int waitlink_offset = buffer->user_size - 16;
u32 dwords, target;
lockdep_assert_held(&gpu->lock);
/*
* We need at most 3 dwords in the return target:
* 1 event + 1 end + 1 wait + 1 link.
@ -287,13 +297,16 @@ void etnaviv_sync_point_queue(struct etnaviv_gpu *gpu, unsigned int event)
}
/* Append a command buffer to the ring buffer. */
void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event,
struct etnaviv_cmdbuf *cmdbuf)
void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
unsigned int event, struct etnaviv_cmdbuf *cmdbuf)
{
struct etnaviv_cmdbuf *buffer = gpu->buffer;
struct etnaviv_cmdbuf *buffer = &gpu->buffer;
unsigned int waitlink_offset = buffer->user_size - 16;
u32 return_target, return_dwords;
u32 link_target, link_dwords;
bool switch_context = gpu->exec_state != exec_state;
lockdep_assert_held(&gpu->lock);
if (drm_debug & DRM_UT_DRIVER)
etnaviv_buffer_dump(gpu, buffer, 0, 0x50);
@ -306,7 +319,7 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event,
* need to append a mmu flush load state, followed by a new
* link to this buffer - a total of four additional words.
*/
if (gpu->mmu->need_flush || gpu->switch_context) {
if (gpu->mmu->need_flush || switch_context) {
u32 target, extra_dwords;
/* link command */
@ -321,7 +334,7 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event,
}
/* pipe switch commands */
if (gpu->switch_context)
if (switch_context)
extra_dwords += 4;
target = etnaviv_buffer_reserve(gpu, buffer, extra_dwords);
@ -349,10 +362,9 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event,
gpu->mmu->need_flush = false;
}
if (gpu->switch_context) {
etnaviv_cmd_select_pipe(gpu, buffer, cmdbuf->exec_state);
gpu->exec_state = cmdbuf->exec_state;
gpu->switch_context = false;
if (switch_context) {
etnaviv_cmd_select_pipe(gpu, buffer, exec_state);
gpu->exec_state = exec_state;
}
/* And the link to the submitted buffer */
@ -421,4 +433,6 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event,
if (drm_debug & DRM_UT_DRIVER)
etnaviv_buffer_dump(gpu, buffer, 0, 0x50);
gpu->lastctx = cmdbuf->ctx;
}

View File

@ -78,6 +78,7 @@ static const struct {
ST(0x17c0, 8),
ST(0x17e0, 8),
ST(0x2400, 14 * 16),
ST(0x3824, 1),
ST(0x10800, 32 * 16),
ST(0x14600, 16),
ST(0x14800, 8 * 8),

View File

@ -86,26 +86,11 @@ void etnaviv_cmdbuf_suballoc_destroy(struct etnaviv_cmdbuf_suballoc *suballoc)
kfree(suballoc);
}
struct etnaviv_cmdbuf *
etnaviv_cmdbuf_new(struct etnaviv_cmdbuf_suballoc *suballoc, u32 size,
size_t nr_bos, size_t nr_pmrs)
int etnaviv_cmdbuf_init(struct etnaviv_cmdbuf_suballoc *suballoc,
struct etnaviv_cmdbuf *cmdbuf, u32 size)
{
struct etnaviv_cmdbuf *cmdbuf;
struct etnaviv_perfmon_request *pmrs;
size_t sz = size_vstruct(nr_bos, sizeof(cmdbuf->bo_map[0]),
sizeof(*cmdbuf));
int granule_offs, order, ret;
cmdbuf = kzalloc(sz, GFP_KERNEL);
if (!cmdbuf)
return NULL;
sz = sizeof(*pmrs) * nr_pmrs;
pmrs = kzalloc(sz, GFP_KERNEL);
if (!pmrs)
goto out_free_cmdbuf;
cmdbuf->pmrs = pmrs;
cmdbuf->suballoc = suballoc;
cmdbuf->size = size;
@ -123,7 +108,7 @@ retry:
if (!ret) {
dev_err(suballoc->gpu->dev,
"Timeout waiting for cmdbuf space\n");
return NULL;
return -ETIMEDOUT;
}
goto retry;
}
@ -131,11 +116,7 @@ retry:
cmdbuf->suballoc_offset = granule_offs * SUBALLOC_GRANULE;
cmdbuf->vaddr = suballoc->vaddr + cmdbuf->suballoc_offset;
return cmdbuf;
out_free_cmdbuf:
kfree(cmdbuf);
return NULL;
return 0;
}
void etnaviv_cmdbuf_free(struct etnaviv_cmdbuf *cmdbuf)
@ -151,8 +132,6 @@ void etnaviv_cmdbuf_free(struct etnaviv_cmdbuf *cmdbuf)
suballoc->free_space = 1;
mutex_unlock(&suballoc->lock);
wake_up_all(&suballoc->free_event);
kfree(cmdbuf->pmrs);
kfree(cmdbuf);
}
u32 etnaviv_cmdbuf_get_va(struct etnaviv_cmdbuf *buf)

View File

@ -33,27 +33,15 @@ struct etnaviv_cmdbuf {
void *vaddr;
u32 size;
u32 user_size;
/* fence after which this buffer is to be disposed */
struct dma_fence *fence;
/* target exec state */
u32 exec_state;
/* per GPU in-flight list */
struct list_head node;
/* perfmon requests */
unsigned int nr_pmrs;
struct etnaviv_perfmon_request *pmrs;
/* BOs attached to this command buffer */
unsigned int nr_bos;
struct etnaviv_vram_mapping *bo_map[0];
};
struct etnaviv_cmdbuf_suballoc *
etnaviv_cmdbuf_suballoc_new(struct etnaviv_gpu * gpu);
void etnaviv_cmdbuf_suballoc_destroy(struct etnaviv_cmdbuf_suballoc *suballoc);
struct etnaviv_cmdbuf *
etnaviv_cmdbuf_new(struct etnaviv_cmdbuf_suballoc *suballoc, u32 size,
size_t nr_bos, size_t nr_pmrs);
int etnaviv_cmdbuf_init(struct etnaviv_cmdbuf_suballoc *suballoc,
struct etnaviv_cmdbuf *cmdbuf, u32 size);
void etnaviv_cmdbuf_free(struct etnaviv_cmdbuf *cmdbuf);
u32 etnaviv_cmdbuf_get_va(struct etnaviv_cmdbuf *buf);

View File

@ -172,7 +172,7 @@ static int etnaviv_mmu_show(struct etnaviv_gpu *gpu, struct seq_file *m)
static void etnaviv_buffer_dump(struct etnaviv_gpu *gpu, struct seq_file *m)
{
struct etnaviv_cmdbuf *buf = gpu->buffer;
struct etnaviv_cmdbuf *buf = &gpu->buffer;
u32 size = buf->size;
u32 *ptr = buf->vaddr;
u32 i;
@ -459,9 +459,6 @@ static int etnaviv_ioctl_pm_query_dom(struct drm_device *dev, void *data,
struct drm_etnaviv_pm_domain *args = data;
struct etnaviv_gpu *gpu;
/* reject as long as the feature isn't stable */
return -EINVAL;
if (args->pipe >= ETNA_MAX_PIPES)
return -EINVAL;
@ -479,9 +476,6 @@ static int etnaviv_ioctl_pm_query_sig(struct drm_device *dev, void *data,
struct drm_etnaviv_pm_signal *args = data;
struct etnaviv_gpu *gpu;
/* reject as long as the feature isn't stable */
return -EINVAL;
if (args->pipe >= ETNA_MAX_PIPES)
return -EINVAL;
@ -556,7 +550,7 @@ static struct drm_driver etnaviv_drm_driver = {
.desc = "etnaviv DRM",
.date = "20151214",
.major = 1,
.minor = 1,
.minor = 2,
};
/*
@ -580,12 +574,6 @@ static int etnaviv_bind(struct device *dev)
}
drm->dev_private = priv;
priv->wq = alloc_ordered_workqueue("etnaviv", 0);
if (!priv->wq) {
ret = -ENOMEM;
goto out_wq;
}
mutex_init(&priv->gem_lock);
INIT_LIST_HEAD(&priv->gem_list);
priv->num_gpus = 0;
@ -607,9 +595,6 @@ static int etnaviv_bind(struct device *dev)
out_register:
component_unbind_all(dev, drm);
out_bind:
flush_workqueue(priv->wq);
destroy_workqueue(priv->wq);
out_wq:
kfree(priv);
out_unref:
drm_dev_unref(drm);
@ -624,9 +609,6 @@ static void etnaviv_unbind(struct device *dev)
drm_dev_unregister(drm);
flush_workqueue(priv->wq);
destroy_workqueue(priv->wq);
component_unbind_all(dev, drm);
drm->dev_private = NULL;

View File

@ -56,18 +56,8 @@ struct etnaviv_drm_private {
/* list of GEM objects: */
struct mutex gem_lock;
struct list_head gem_list;
struct workqueue_struct *wq;
};
static inline void etnaviv_queue_work(struct drm_device *dev,
struct work_struct *w)
{
struct etnaviv_drm_private *priv = dev->dev_private;
queue_work(priv->wq, w);
}
int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
struct drm_file *file);
@ -97,8 +87,8 @@ u16 etnaviv_buffer_init(struct etnaviv_gpu *gpu);
u16 etnaviv_buffer_config_mmuv2(struct etnaviv_gpu *gpu, u32 mtlb_addr, u32 safe_addr);
void etnaviv_buffer_end(struct etnaviv_gpu *gpu);
void etnaviv_sync_point_queue(struct etnaviv_gpu *gpu, unsigned int event);
void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event,
struct etnaviv_cmdbuf *cmdbuf);
void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
unsigned int event, struct etnaviv_cmdbuf *cmdbuf);
void etnaviv_validate_init(void);
bool etnaviv_cmd_validate_one(struct etnaviv_gpu *gpu,
u32 *stream, unsigned int size,

View File

@ -120,7 +120,7 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu)
struct core_dump_iterator iter;
struct etnaviv_vram_mapping *vram;
struct etnaviv_gem_object *obj;
struct etnaviv_cmdbuf *cmd;
struct etnaviv_gem_submit *submit;
unsigned int n_obj, n_bomap_pages;
size_t file_size, mmu_size;
__le64 *bomap, *bomap_start;
@ -132,11 +132,11 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu)
n_bomap_pages = 0;
file_size = ARRAY_SIZE(etnaviv_dump_registers) *
sizeof(struct etnaviv_dump_registers) +
mmu_size + gpu->buffer->size;
mmu_size + gpu->buffer.size;
/* Add in the active command buffers */
list_for_each_entry(cmd, &gpu->active_cmd_list, node) {
file_size += cmd->size;
list_for_each_entry(submit, &gpu->active_submit_list, node) {
file_size += submit->cmdbuf.size;
n_obj++;
}
@ -176,13 +176,14 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu)
etnaviv_core_dump_registers(&iter, gpu);
etnaviv_core_dump_mmu(&iter, gpu, mmu_size);
etnaviv_core_dump_mem(&iter, ETDUMP_BUF_RING, gpu->buffer->vaddr,
gpu->buffer->size,
etnaviv_cmdbuf_get_va(gpu->buffer));
etnaviv_core_dump_mem(&iter, ETDUMP_BUF_RING, gpu->buffer.vaddr,
gpu->buffer.size,
etnaviv_cmdbuf_get_va(&gpu->buffer));
list_for_each_entry(cmd, &gpu->active_cmd_list, node)
etnaviv_core_dump_mem(&iter, ETDUMP_BUF_CMD, cmd->vaddr,
cmd->size, etnaviv_cmdbuf_get_va(cmd));
list_for_each_entry(submit, &gpu->active_submit_list, node)
etnaviv_core_dump_mem(&iter, ETDUMP_BUF_CMD,
submit->cmdbuf.vaddr, submit->cmdbuf.size,
etnaviv_cmdbuf_get_va(&submit->cmdbuf));
/* Reserve space for the bomap */
if (n_bomap_pages) {

View File

@ -24,6 +24,9 @@
#include "etnaviv_gpu.h"
#include "etnaviv_mmu.h"
static struct lock_class_key etnaviv_shm_lock_class;
static struct lock_class_key etnaviv_userptr_lock_class;
static void etnaviv_gem_scatter_map(struct etnaviv_gem_object *etnaviv_obj)
{
struct drm_device *dev = etnaviv_obj->base.dev;
@ -583,7 +586,7 @@ void etnaviv_gem_free_object(struct drm_gem_object *obj)
kfree(etnaviv_obj);
}
int etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj)
void etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj)
{
struct etnaviv_drm_private *priv = dev->dev_private;
struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
@ -591,8 +594,6 @@ int etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj)
mutex_lock(&priv->gem_lock);
list_add_tail(&etnaviv_obj->gem_node, &priv->gem_list);
mutex_unlock(&priv->gem_lock);
return 0;
}
static int etnaviv_gem_new_impl(struct drm_device *dev, u32 size, u32 flags,
@ -640,8 +641,9 @@ static int etnaviv_gem_new_impl(struct drm_device *dev, u32 size, u32 flags,
return 0;
}
static struct drm_gem_object *__etnaviv_gem_new(struct drm_device *dev,
u32 size, u32 flags)
/* convenience method to construct a GEM buffer object, and userspace handle */
int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file,
u32 size, u32 flags, u32 *handle)
{
struct drm_gem_object *obj = NULL;
int ret;
@ -653,6 +655,8 @@ static struct drm_gem_object *__etnaviv_gem_new(struct drm_device *dev,
if (ret)
goto fail;
lockdep_set_class(&to_etnaviv_bo(obj)->lock, &etnaviv_shm_lock_class);
ret = drm_gem_object_init(dev, obj, size);
if (ret == 0) {
struct address_space *mapping;
@ -660,7 +664,7 @@ static struct drm_gem_object *__etnaviv_gem_new(struct drm_device *dev,
/*
* Our buffers are kept pinned, so allocating them
* from the MOVABLE zone is a really bad idea, and
* conflicts with CMA. See coments above new_inode()
* conflicts with CMA. See comments above new_inode()
* why this is required _and_ expected if you're
* going to pin these pages.
*/
@ -672,33 +676,12 @@ static struct drm_gem_object *__etnaviv_gem_new(struct drm_device *dev,
if (ret)
goto fail;
return obj;
fail:
drm_gem_object_put_unlocked(obj);
return ERR_PTR(ret);
}
/* convenience method to construct a GEM buffer object, and userspace handle */
int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file,
u32 size, u32 flags, u32 *handle)
{
struct drm_gem_object *obj;
int ret;
obj = __etnaviv_gem_new(dev, size, flags);
if (IS_ERR(obj))
return PTR_ERR(obj);
ret = etnaviv_gem_obj_add(dev, obj);
if (ret < 0) {
drm_gem_object_put_unlocked(obj);
return ret;
}
etnaviv_gem_obj_add(dev, obj);
ret = drm_gem_handle_create(file, obj, handle);
/* drop reference from allocate - handle holds it now */
fail:
drm_gem_object_put_unlocked(obj);
return ret;
@ -722,139 +705,41 @@ int etnaviv_gem_new_private(struct drm_device *dev, size_t size, u32 flags,
return 0;
}
struct get_pages_work {
struct work_struct work;
struct mm_struct *mm;
struct task_struct *task;
struct etnaviv_gem_object *etnaviv_obj;
};
static struct page **etnaviv_gem_userptr_do_get_pages(
struct etnaviv_gem_object *etnaviv_obj, struct mm_struct *mm, struct task_struct *task)
{
int ret = 0, pinned, npages = etnaviv_obj->base.size >> PAGE_SHIFT;
struct page **pvec;
uintptr_t ptr;
unsigned int flags = 0;
pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
if (!pvec)
return ERR_PTR(-ENOMEM);
if (!etnaviv_obj->userptr.ro)
flags |= FOLL_WRITE;
pinned = 0;
ptr = etnaviv_obj->userptr.ptr;
down_read(&mm->mmap_sem);
while (pinned < npages) {
ret = get_user_pages_remote(task, mm, ptr, npages - pinned,
flags, pvec + pinned, NULL, NULL);
if (ret < 0)
break;
ptr += ret * PAGE_SIZE;
pinned += ret;
}
up_read(&mm->mmap_sem);
if (ret < 0) {
release_pages(pvec, pinned);
kvfree(pvec);
return ERR_PTR(ret);
}
return pvec;
}
static void __etnaviv_gem_userptr_get_pages(struct work_struct *_work)
{
struct get_pages_work *work = container_of(_work, typeof(*work), work);
struct etnaviv_gem_object *etnaviv_obj = work->etnaviv_obj;
struct page **pvec;
pvec = etnaviv_gem_userptr_do_get_pages(etnaviv_obj, work->mm, work->task);
mutex_lock(&etnaviv_obj->lock);
if (IS_ERR(pvec)) {
etnaviv_obj->userptr.work = ERR_CAST(pvec);
} else {
etnaviv_obj->userptr.work = NULL;
etnaviv_obj->pages = pvec;
}
mutex_unlock(&etnaviv_obj->lock);
drm_gem_object_put_unlocked(&etnaviv_obj->base);
mmput(work->mm);
put_task_struct(work->task);
kfree(work);
}
static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj)
{
struct page **pvec = NULL;
struct get_pages_work *work;
struct mm_struct *mm;
int ret, pinned, npages = etnaviv_obj->base.size >> PAGE_SHIFT;
struct etnaviv_gem_userptr *userptr = &etnaviv_obj->userptr;
int ret, pinned = 0, npages = etnaviv_obj->base.size >> PAGE_SHIFT;
if (etnaviv_obj->userptr.work) {
if (IS_ERR(etnaviv_obj->userptr.work)) {
ret = PTR_ERR(etnaviv_obj->userptr.work);
etnaviv_obj->userptr.work = NULL;
} else {
ret = -EAGAIN;
}
return ret;
}
might_lock_read(&current->mm->mmap_sem);
mm = get_task_mm(etnaviv_obj->userptr.task);
pinned = 0;
if (mm == current->mm) {
pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
if (!pvec) {
mmput(mm);
return -ENOMEM;
}
if (userptr->mm != current->mm)
return -EPERM;
pinned = __get_user_pages_fast(etnaviv_obj->userptr.ptr, npages,
!etnaviv_obj->userptr.ro, pvec);
if (pinned < 0) {
kvfree(pvec);
mmput(mm);
return pinned;
}
if (pinned == npages) {
etnaviv_obj->pages = pvec;
mmput(mm);
return 0;
}
}
release_pages(pvec, pinned);
kvfree(pvec);
work = kmalloc(sizeof(*work), GFP_KERNEL);
if (!work) {
mmput(mm);
pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
if (!pvec)
return -ENOMEM;
}
get_task_struct(current);
drm_gem_object_get(&etnaviv_obj->base);
do {
unsigned num_pages = npages - pinned;
uint64_t ptr = userptr->ptr + pinned * PAGE_SIZE;
struct page **pages = pvec + pinned;
work->mm = mm;
work->task = current;
work->etnaviv_obj = etnaviv_obj;
ret = get_user_pages_fast(ptr, num_pages,
!userptr->ro ? FOLL_WRITE : 0, pages);
if (ret < 0) {
release_pages(pvec, pinned);
kvfree(pvec);
return ret;
}
etnaviv_obj->userptr.work = &work->work;
INIT_WORK(&work->work, __etnaviv_gem_userptr_get_pages);
pinned += ret;
etnaviv_queue_work(etnaviv_obj->base.dev, &work->work);
} while (pinned < npages);
return -EAGAIN;
etnaviv_obj->pages = pvec;
return 0;
}
static void etnaviv_gem_userptr_release(struct etnaviv_gem_object *etnaviv_obj)
@ -870,7 +755,6 @@ static void etnaviv_gem_userptr_release(struct etnaviv_gem_object *etnaviv_obj)
release_pages(etnaviv_obj->pages, npages);
kvfree(etnaviv_obj->pages);
}
put_task_struct(etnaviv_obj->userptr.task);
}
static int etnaviv_gem_userptr_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
@ -897,17 +781,16 @@ int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file,
if (ret)
return ret;
etnaviv_obj->userptr.ptr = ptr;
etnaviv_obj->userptr.task = current;
etnaviv_obj->userptr.ro = !(flags & ETNA_USERPTR_WRITE);
get_task_struct(current);
lockdep_set_class(&etnaviv_obj->lock, &etnaviv_userptr_lock_class);
ret = etnaviv_gem_obj_add(dev, &etnaviv_obj->base);
if (ret)
goto unreference;
etnaviv_obj->userptr.ptr = ptr;
etnaviv_obj->userptr.mm = current->mm;
etnaviv_obj->userptr.ro = !(flags & ETNA_USERPTR_WRITE);
etnaviv_gem_obj_add(dev, &etnaviv_obj->base);
ret = drm_gem_handle_create(file, &etnaviv_obj->base, handle);
unreference:
/* drop reference from allocate - handle holds it now */
drm_gem_object_put_unlocked(&etnaviv_obj->base);
return ret;

View File

@ -18,6 +18,7 @@
#define __ETNAVIV_GEM_H__
#include <linux/reservation.h>
#include "etnaviv_cmdbuf.h"
#include "etnaviv_drv.h"
struct dma_fence;
@ -26,8 +27,7 @@ struct etnaviv_gem_object;
struct etnaviv_gem_userptr {
uintptr_t ptr;
struct task_struct *task;
struct work_struct *work;
struct mm_struct *mm;
bool ro;
};
@ -98,26 +98,32 @@ struct etnaviv_gem_submit_bo {
/* Created per submit-ioctl, to track bo's and cmdstream bufs, etc,
* associated with the cmdstream submission for synchronization (and
* make it easier to unwind when things go wrong, etc). This only
* lasts for the duration of the submit-ioctl.
* make it easier to unwind when things go wrong, etc).
*/
struct etnaviv_gem_submit {
struct drm_device *dev;
struct kref refcount;
struct etnaviv_gpu *gpu;
struct ww_acquire_ctx ticket;
struct dma_fence *fence;
struct dma_fence *out_fence, *in_fence;
struct list_head node; /* GPU active submit list */
struct etnaviv_cmdbuf cmdbuf;
bool runtime_resumed;
u32 exec_state;
u32 flags;
unsigned int nr_pmrs;
struct etnaviv_perfmon_request *pmrs;
unsigned int nr_bos;
struct etnaviv_gem_submit_bo bos[0];
/* No new members here, the previous one is variable-length! */
};
void etnaviv_submit_put(struct etnaviv_gem_submit * submit);
int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,
struct timespec *timeout);
int etnaviv_gem_new_private(struct drm_device *dev, size_t size, u32 flags,
struct reservation_object *robj, const struct etnaviv_gem_ops *ops,
struct etnaviv_gem_object **res);
int etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj);
void etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj);
struct page **etnaviv_gem_get_pages(struct etnaviv_gem_object *obj);
void etnaviv_gem_put_pages(struct etnaviv_gem_object *obj);

View File

@ -19,6 +19,7 @@
#include "etnaviv_drv.h"
#include "etnaviv_gem.h"
static struct lock_class_key etnaviv_prime_lock_class;
struct sg_table *etnaviv_gem_prime_get_sg_table(struct drm_gem_object *obj)
{
@ -125,6 +126,8 @@ struct drm_gem_object *etnaviv_gem_prime_import_sg_table(struct drm_device *dev,
if (ret < 0)
return ERR_PTR(ret);
lockdep_set_class(&etnaviv_obj->lock, &etnaviv_prime_lock_class);
npages = size / PAGE_SIZE;
etnaviv_obj->sgt = sgt;
@ -139,9 +142,7 @@ struct drm_gem_object *etnaviv_gem_prime_import_sg_table(struct drm_device *dev,
if (ret)
goto fail;
ret = etnaviv_gem_obj_add(dev, &etnaviv_obj->base);
if (ret)
goto fail;
etnaviv_gem_obj_add(dev, &etnaviv_obj->base);
return &etnaviv_obj->base;

View File

@ -33,22 +33,25 @@
#define BO_PINNED 0x2000
static struct etnaviv_gem_submit *submit_create(struct drm_device *dev,
struct etnaviv_gpu *gpu, size_t nr)
struct etnaviv_gpu *gpu, size_t nr_bos, size_t nr_pmrs)
{
struct etnaviv_gem_submit *submit;
size_t sz = size_vstruct(nr, sizeof(submit->bos[0]), sizeof(*submit));
size_t sz = size_vstruct(nr_bos, sizeof(submit->bos[0]), sizeof(*submit));
submit = kmalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
if (submit) {
submit->dev = dev;
submit->gpu = gpu;
submit = kzalloc(sz, GFP_KERNEL);
if (!submit)
return NULL;
/* initially, until copy_from_user() and bo lookup succeeds: */
submit->nr_bos = 0;
submit->fence = NULL;
ww_acquire_init(&submit->ticket, &reservation_ww_class);
submit->pmrs = kcalloc(nr_pmrs, sizeof(struct etnaviv_perfmon_request),
GFP_KERNEL);
if (!submit->pmrs) {
kfree(submit);
return NULL;
}
submit->nr_pmrs = nr_pmrs;
submit->gpu = gpu;
kref_init(&submit->refcount);
return submit;
}
@ -111,7 +114,8 @@ static void submit_unlock_object(struct etnaviv_gem_submit *submit, int i)
}
}
static int submit_lock_objects(struct etnaviv_gem_submit *submit)
static int submit_lock_objects(struct etnaviv_gem_submit *submit,
struct ww_acquire_ctx *ticket)
{
int contended, slow_locked = -1, i, ret = 0;
@ -126,7 +130,7 @@ retry:
if (!(submit->bos[i].flags & BO_LOCKED)) {
ret = ww_mutex_lock_interruptible(&etnaviv_obj->resv->lock,
&submit->ticket);
ticket);
if (ret == -EALREADY)
DRM_ERROR("BO at index %u already on submit list\n",
i);
@ -136,7 +140,7 @@ retry:
}
}
ww_acquire_done(&submit->ticket);
ww_acquire_done(ticket);
return 0;
@ -154,7 +158,7 @@ fail:
/* we lost out in a seqno race, lock and retry.. */
ret = ww_mutex_lock_slow_interruptible(&etnaviv_obj->resv->lock,
&submit->ticket);
ticket);
if (!ret) {
submit->bos[contended].flags |= BO_LOCKED;
slow_locked = contended;
@ -181,19 +185,33 @@ static int submit_fence_sync(const struct etnaviv_gem_submit *submit)
break;
}
if (submit->flags & ETNA_SUBMIT_FENCE_FD_IN) {
/*
* Wait if the fence is from a foreign context, or if the fence
* array contains any fence from a foreign context.
*/
if (!dma_fence_match_context(submit->in_fence, context))
ret = dma_fence_wait(submit->in_fence, true);
}
return ret;
}
static void submit_unpin_objects(struct etnaviv_gem_submit *submit)
static void submit_attach_object_fences(struct etnaviv_gem_submit *submit)
{
int i;
for (i = 0; i < submit->nr_bos; i++) {
if (submit->bos[i].flags & BO_PINNED)
etnaviv_gem_mapping_unreference(submit->bos[i].mapping);
struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
submit->bos[i].mapping = NULL;
submit->bos[i].flags &= ~BO_PINNED;
if (submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE)
reservation_object_add_excl_fence(etnaviv_obj->resv,
submit->out_fence);
else
reservation_object_add_shared_fence(etnaviv_obj->resv,
submit->out_fence);
submit_unlock_object(submit, i);
}
}
@ -211,6 +229,7 @@ static int submit_pin_objects(struct etnaviv_gem_submit *submit)
ret = PTR_ERR(mapping);
break;
}
atomic_inc(&etnaviv_obj->gpu_active);
submit->bos[i].flags |= BO_PINNED;
submit->bos[i].mapping = mapping;
@ -285,13 +304,11 @@ static int submit_reloc(struct etnaviv_gem_submit *submit, void *stream,
}
static int submit_perfmon_validate(struct etnaviv_gem_submit *submit,
struct etnaviv_cmdbuf *cmdbuf,
const struct drm_etnaviv_gem_submit_pmr *pmrs,
u32 nr_pms)
u32 exec_state, const struct drm_etnaviv_gem_submit_pmr *pmrs)
{
u32 i;
for (i = 0; i < nr_pms; i++) {
for (i = 0; i < submit->nr_pmrs; i++) {
const struct drm_etnaviv_gem_submit_pmr *r = pmrs + i;
struct etnaviv_gem_submit_bo *bo;
int ret;
@ -316,39 +333,65 @@ static int submit_perfmon_validate(struct etnaviv_gem_submit *submit,
return -EINVAL;
}
if (etnaviv_pm_req_validate(r, cmdbuf->exec_state)) {
if (etnaviv_pm_req_validate(r, exec_state)) {
DRM_ERROR("perfmon request: domain or signal not valid");
return -EINVAL;
}
cmdbuf->pmrs[i].flags = r->flags;
cmdbuf->pmrs[i].domain = r->domain;
cmdbuf->pmrs[i].signal = r->signal;
cmdbuf->pmrs[i].sequence = r->sequence;
cmdbuf->pmrs[i].offset = r->read_offset;
cmdbuf->pmrs[i].bo_vma = etnaviv_gem_vmap(&bo->obj->base);
submit->pmrs[i].flags = r->flags;
submit->pmrs[i].domain = r->domain;
submit->pmrs[i].signal = r->signal;
submit->pmrs[i].sequence = r->sequence;
submit->pmrs[i].offset = r->read_offset;
submit->pmrs[i].bo_vma = etnaviv_gem_vmap(&bo->obj->base);
}
return 0;
}
static void submit_cleanup(struct etnaviv_gem_submit *submit)
static void submit_cleanup(struct kref *kref)
{
struct etnaviv_gem_submit *submit =
container_of(kref, struct etnaviv_gem_submit, refcount);
unsigned i;
if (submit->runtime_resumed)
pm_runtime_put_autosuspend(submit->gpu->dev);
if (submit->cmdbuf.suballoc)
etnaviv_cmdbuf_free(&submit->cmdbuf);
for (i = 0; i < submit->nr_bos; i++) {
struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
/* unpin all objects */
if (submit->bos[i].flags & BO_PINNED) {
etnaviv_gem_mapping_unreference(submit->bos[i].mapping);
atomic_dec(&etnaviv_obj->gpu_active);
submit->bos[i].mapping = NULL;
submit->bos[i].flags &= ~BO_PINNED;
}
/* if the GPU submit failed, objects might still be locked */
submit_unlock_object(submit, i);
drm_gem_object_put_unlocked(&etnaviv_obj->base);
}
ww_acquire_fini(&submit->ticket);
if (submit->fence)
dma_fence_put(submit->fence);
wake_up_all(&submit->gpu->fence_event);
if (submit->in_fence)
dma_fence_put(submit->in_fence);
if (submit->out_fence)
dma_fence_put(submit->out_fence);
kfree(submit->pmrs);
kfree(submit);
}
void etnaviv_submit_put(struct etnaviv_gem_submit *submit)
{
kref_put(&submit->refcount, submit_cleanup);
}
int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
struct drm_file *file)
{
@ -358,10 +401,9 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
struct drm_etnaviv_gem_submit_pmr *pmrs;
struct drm_etnaviv_gem_submit_bo *bos;
struct etnaviv_gem_submit *submit;
struct etnaviv_cmdbuf *cmdbuf;
struct etnaviv_gpu *gpu;
struct dma_fence *in_fence = NULL;
struct sync_file *sync_file = NULL;
struct ww_acquire_ctx ticket;
int out_fence_fd = -1;
void *stream;
int ret;
@ -399,17 +441,11 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
relocs = kvmalloc_array(args->nr_relocs, sizeof(*relocs), GFP_KERNEL);
pmrs = kvmalloc_array(args->nr_pmrs, sizeof(*pmrs), GFP_KERNEL);
stream = kvmalloc_array(1, args->stream_size, GFP_KERNEL);
cmdbuf = etnaviv_cmdbuf_new(gpu->cmdbuf_suballoc,
ALIGN(args->stream_size, 8) + 8,
args->nr_bos, args->nr_pmrs);
if (!bos || !relocs || !pmrs || !stream || !cmdbuf) {
if (!bos || !relocs || !pmrs || !stream) {
ret = -ENOMEM;
goto err_submit_cmds;
}
cmdbuf->exec_state = args->exec_state;
cmdbuf->ctx = file->driver_priv;
ret = copy_from_user(bos, u64_to_user_ptr(args->bos),
args->nr_bos * sizeof(*bos));
if (ret) {
@ -430,7 +466,6 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
ret = -EFAULT;
goto err_submit_cmds;
}
cmdbuf->nr_pmrs = args->nr_pmrs;
ret = copy_from_user(stream, u64_to_user_ptr(args->stream),
args->stream_size);
@ -447,19 +482,28 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
}
}
submit = submit_create(dev, gpu, args->nr_bos);
ww_acquire_init(&ticket, &reservation_ww_class);
submit = submit_create(dev, gpu, args->nr_bos, args->nr_pmrs);
if (!submit) {
ret = -ENOMEM;
goto err_submit_cmds;
goto err_submit_ww_acquire;
}
ret = etnaviv_cmdbuf_init(gpu->cmdbuf_suballoc, &submit->cmdbuf,
ALIGN(args->stream_size, 8) + 8);
if (ret)
goto err_submit_objects;
submit->cmdbuf.ctx = file->driver_priv;
submit->exec_state = args->exec_state;
submit->flags = args->flags;
ret = submit_lookup_objects(submit, file, bos, args->nr_bos);
if (ret)
goto err_submit_objects;
ret = submit_lock_objects(submit);
ret = submit_lock_objects(submit, &ticket);
if (ret)
goto err_submit_objects;
@ -470,21 +514,11 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
}
if (args->flags & ETNA_SUBMIT_FENCE_FD_IN) {
in_fence = sync_file_get_fence(args->fence_fd);
if (!in_fence) {
submit->in_fence = sync_file_get_fence(args->fence_fd);
if (!submit->in_fence) {
ret = -EINVAL;
goto err_submit_objects;
}
/*
* Wait if the fence is from a foreign context, or if the fence
* array contains any fence from a foreign context.
*/
if (!dma_fence_match_context(in_fence, gpu->fence_context)) {
ret = dma_fence_wait(in_fence, true);
if (ret)
goto err_submit_objects;
}
}
ret = submit_fence_sync(submit);
@ -493,25 +527,25 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
ret = submit_pin_objects(submit);
if (ret)
goto out;
goto err_submit_objects;
ret = submit_reloc(submit, stream, args->stream_size / 4,
relocs, args->nr_relocs);
if (ret)
goto out;
goto err_submit_objects;
ret = submit_perfmon_validate(submit, cmdbuf, pmrs, args->nr_pmrs);
ret = submit_perfmon_validate(submit, args->exec_state, pmrs);
if (ret)
goto out;
goto err_submit_objects;
memcpy(cmdbuf->vaddr, stream, args->stream_size);
cmdbuf->user_size = ALIGN(args->stream_size, 8);
memcpy(submit->cmdbuf.vaddr, stream, args->stream_size);
submit->cmdbuf.user_size = ALIGN(args->stream_size, 8);
ret = etnaviv_gpu_submit(gpu, submit, cmdbuf);
ret = etnaviv_gpu_submit(gpu, submit);
if (ret)
goto out;
goto err_submit_objects;
cmdbuf = NULL;
submit_attach_object_fences(submit);
if (args->flags & ETNA_SUBMIT_FENCE_FD_OUT) {
/*
@ -520,39 +554,26 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
* fence to the sync file here, eliminating the ENOMEM
* possibility at this stage.
*/
sync_file = sync_file_create(submit->fence);
sync_file = sync_file_create(submit->out_fence);
if (!sync_file) {
ret = -ENOMEM;
goto out;
goto err_submit_objects;
}
fd_install(out_fence_fd, sync_file->file);
}
args->fence_fd = out_fence_fd;
args->fence = submit->fence->seqno;
out:
submit_unpin_objects(submit);
/*
* If we're returning -EAGAIN, it may be due to the userptr code
* wanting to run its workqueue outside of any locks. Flush our
* workqueue to ensure that it is run in a timely manner.
*/
if (ret == -EAGAIN)
flush_workqueue(priv->wq);
args->fence = submit->out_fence->seqno;
err_submit_objects:
if (in_fence)
dma_fence_put(in_fence);
submit_cleanup(submit);
etnaviv_submit_put(submit);
err_submit_ww_acquire:
ww_acquire_fini(&ticket);
err_submit_cmds:
if (ret && (out_fence_fd >= 0))
put_unused_fd(out_fence_fd);
/* if we still own the cmdbuf */
if (cmdbuf)
etnaviv_cmdbuf_free(cmdbuf);
if (stream)
kvfree(stream);
if (bos)

View File

@ -644,7 +644,7 @@ static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu)
prefetch = etnaviv_buffer_init(gpu);
gpu_write(gpu, VIVS_HI_INTR_ENBL, ~0U);
etnaviv_gpu_start_fe(gpu, etnaviv_cmdbuf_get_va(gpu->buffer),
etnaviv_gpu_start_fe(gpu, etnaviv_cmdbuf_get_va(&gpu->buffer),
prefetch);
}
@ -717,15 +717,15 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
}
/* Create buffer: */
gpu->buffer = etnaviv_cmdbuf_new(gpu->cmdbuf_suballoc, PAGE_SIZE, 0, 0);
if (!gpu->buffer) {
ret = -ENOMEM;
ret = etnaviv_cmdbuf_init(gpu->cmdbuf_suballoc, &gpu->buffer,
PAGE_SIZE);
if (ret) {
dev_err(gpu->dev, "could not create command buffer\n");
goto destroy_iommu;
}
if (gpu->mmu->version == ETNAVIV_IOMMU_V1 &&
etnaviv_cmdbuf_get_va(gpu->buffer) > 0x80000000) {
etnaviv_cmdbuf_get_va(&gpu->buffer) > 0x80000000) {
ret = -EINVAL;
dev_err(gpu->dev,
"command buffer outside valid memory window\n");
@ -751,8 +751,7 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
return 0;
free_buffer:
etnaviv_cmdbuf_free(gpu->buffer);
gpu->buffer = NULL;
etnaviv_cmdbuf_free(&gpu->buffer);
destroy_iommu:
etnaviv_iommu_destroy(gpu->mmu);
gpu->mmu = NULL;
@ -958,7 +957,7 @@ static void recover_worker(struct work_struct *work)
pm_runtime_put_autosuspend(gpu->dev);
/* Retire the buffer objects in a work */
etnaviv_queue_work(gpu->drm, &gpu->retire_work);
queue_work(gpu->wq, &gpu->retire_work);
}
static void hangcheck_timer_reset(struct etnaviv_gpu *gpu)
@ -994,7 +993,7 @@ static void hangcheck_handler(struct timer_list *t)
dev_err(gpu->dev, " completed fence: %u\n", fence);
dev_err(gpu->dev, " active fence: %u\n",
gpu->active_fence);
etnaviv_queue_work(gpu->drm, &gpu->recover_work);
queue_work(gpu->wq, &gpu->recover_work);
}
/* if still more pending work, reset the hangcheck timer: */
@ -1201,42 +1200,23 @@ static void retire_worker(struct work_struct *work)
struct etnaviv_gpu *gpu = container_of(work, struct etnaviv_gpu,
retire_work);
u32 fence = gpu->completed_fence;
struct etnaviv_cmdbuf *cmdbuf, *tmp;
unsigned int i;
struct etnaviv_gem_submit *submit, *tmp;
LIST_HEAD(retire_list);
mutex_lock(&gpu->lock);
list_for_each_entry_safe(cmdbuf, tmp, &gpu->active_cmd_list, node) {
if (!dma_fence_is_signaled(cmdbuf->fence))
list_for_each_entry_safe(submit, tmp, &gpu->active_submit_list, node) {
if (!dma_fence_is_signaled(submit->out_fence))
break;
list_del(&cmdbuf->node);
dma_fence_put(cmdbuf->fence);
for (i = 0; i < cmdbuf->nr_bos; i++) {
struct etnaviv_vram_mapping *mapping = cmdbuf->bo_map[i];
struct etnaviv_gem_object *etnaviv_obj = mapping->object;
atomic_dec(&etnaviv_obj->gpu_active);
/* drop the refcount taken in etnaviv_gpu_submit */
etnaviv_gem_mapping_unreference(mapping);
}
etnaviv_cmdbuf_free(cmdbuf);
/*
* We need to balance the runtime PM count caused by
* each submission. Upon submission, we increment
* the runtime PM counter, and allocate one event.
* So here, we put the runtime PM count for each
* completed event.
*/
pm_runtime_put_autosuspend(gpu->dev);
list_move(&submit->node, &retire_list);
}
gpu->retired_fence = fence;
mutex_unlock(&gpu->lock);
wake_up_all(&gpu->fence_event);
list_for_each_entry_safe(submit, tmp, &retire_list, node)
etnaviv_submit_put(submit);
}
int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu,
@ -1295,41 +1275,25 @@ int etnaviv_gpu_wait_obj_inactive(struct etnaviv_gpu *gpu,
ret = wait_event_interruptible_timeout(gpu->fence_event,
!is_active(etnaviv_obj),
remaining);
if (ret > 0) {
struct etnaviv_drm_private *priv = gpu->drm->dev_private;
/* Synchronise with the retire worker */
flush_workqueue(priv->wq);
if (ret > 0)
return 0;
} else if (ret == -ERESTARTSYS) {
else if (ret == -ERESTARTSYS)
return -ERESTARTSYS;
} else {
else
return -ETIMEDOUT;
}
}
int etnaviv_gpu_pm_get_sync(struct etnaviv_gpu *gpu)
{
return pm_runtime_get_sync(gpu->dev);
}
void etnaviv_gpu_pm_put(struct etnaviv_gpu *gpu)
{
pm_runtime_mark_last_busy(gpu->dev);
pm_runtime_put_autosuspend(gpu->dev);
}
static void sync_point_perfmon_sample(struct etnaviv_gpu *gpu,
struct etnaviv_event *event, unsigned int flags)
{
const struct etnaviv_cmdbuf *cmdbuf = event->cmdbuf;
const struct etnaviv_gem_submit *submit = event->submit;
unsigned int i;
for (i = 0; i < cmdbuf->nr_pmrs; i++) {
const struct etnaviv_perfmon_request *pmr = cmdbuf->pmrs + i;
for (i = 0; i < submit->nr_pmrs; i++) {
const struct etnaviv_perfmon_request *pmr = submit->pmrs + i;
if (pmr->flags == flags)
etnaviv_perfmon_process(gpu, pmr);
etnaviv_perfmon_process(gpu, pmr, submit->exec_state);
}
}
@ -1354,14 +1318,14 @@ static void sync_point_perfmon_sample_pre(struct etnaviv_gpu *gpu,
static void sync_point_perfmon_sample_post(struct etnaviv_gpu *gpu,
struct etnaviv_event *event)
{
const struct etnaviv_cmdbuf *cmdbuf = event->cmdbuf;
const struct etnaviv_gem_submit *submit = event->submit;
unsigned int i;
u32 val;
sync_point_perfmon_sample(gpu, event, ETNA_PM_PROCESS_POST);
for (i = 0; i < cmdbuf->nr_pmrs; i++) {
const struct etnaviv_perfmon_request *pmr = cmdbuf->pmrs + i;
for (i = 0; i < submit->nr_pmrs; i++) {
const struct etnaviv_perfmon_request *pmr = submit->pmrs + i;
*pmr->bo_vma = pmr->sequence;
}
@ -1380,24 +1344,15 @@ static void sync_point_perfmon_sample_post(struct etnaviv_gpu *gpu,
/* add bo's to gpu's ring, and kick gpu: */
int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
struct etnaviv_gem_submit *submit, struct etnaviv_cmdbuf *cmdbuf)
struct etnaviv_gem_submit *submit)
{
struct dma_fence *fence;
unsigned int i, nr_events = 1, event[3];
int ret;
ret = etnaviv_gpu_pm_get_sync(gpu);
ret = pm_runtime_get_sync(gpu->dev);
if (ret < 0)
return ret;
/*
* TODO
*
* - flush
* - data endian
* - prefetch
*
*/
submit->runtime_resumed = true;
/*
* if there are performance monitor requests we need to have
@ -1406,19 +1361,19 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
* - a sync point to re-configure gpu, process ETNA_PM_PROCESS_POST requests
* and update the sequence number for userspace.
*/
if (cmdbuf->nr_pmrs)
if (submit->nr_pmrs)
nr_events = 3;
ret = event_alloc(gpu, nr_events, event);
if (ret) {
DRM_ERROR("no free events\n");
goto out_pm_put;
return ret;
}
mutex_lock(&gpu->lock);
fence = etnaviv_gpu_fence_alloc(gpu);
if (!fence) {
submit->out_fence = etnaviv_gpu_fence_alloc(gpu);
if (!submit->out_fence) {
for (i = 0; i < nr_events; i++)
event_free(gpu, event[i]);
@ -1426,80 +1381,51 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
goto out_unlock;
}
gpu->event[event[0]].fence = fence;
submit->fence = dma_fence_get(fence);
gpu->active_fence = submit->fence->seqno;
gpu->active_fence = submit->out_fence->seqno;
if (gpu->lastctx != cmdbuf->ctx) {
gpu->mmu->need_flush = true;
gpu->switch_context = true;
gpu->lastctx = cmdbuf->ctx;
}
if (cmdbuf->nr_pmrs) {
if (submit->nr_pmrs) {
gpu->event[event[1]].sync_point = &sync_point_perfmon_sample_pre;
gpu->event[event[1]].cmdbuf = cmdbuf;
kref_get(&submit->refcount);
gpu->event[event[1]].submit = submit;
etnaviv_sync_point_queue(gpu, event[1]);
}
etnaviv_buffer_queue(gpu, event[0], cmdbuf);
kref_get(&submit->refcount);
gpu->event[event[0]].fence = submit->out_fence;
etnaviv_buffer_queue(gpu, submit->exec_state, event[0],
&submit->cmdbuf);
if (cmdbuf->nr_pmrs) {
if (submit->nr_pmrs) {
gpu->event[event[2]].sync_point = &sync_point_perfmon_sample_post;
gpu->event[event[2]].cmdbuf = cmdbuf;
kref_get(&submit->refcount);
gpu->event[event[2]].submit = submit;
etnaviv_sync_point_queue(gpu, event[2]);
}
cmdbuf->fence = fence;
list_add_tail(&cmdbuf->node, &gpu->active_cmd_list);
list_add_tail(&submit->node, &gpu->active_submit_list);
/* We're committed to adding this command buffer, hold a PM reference */
pm_runtime_get_noresume(gpu->dev);
for (i = 0; i < submit->nr_bos; i++) {
struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
/* Each cmdbuf takes a refcount on the mapping */
etnaviv_gem_mapping_reference(submit->bos[i].mapping);
cmdbuf->bo_map[i] = submit->bos[i].mapping;
atomic_inc(&etnaviv_obj->gpu_active);
if (submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE)
reservation_object_add_excl_fence(etnaviv_obj->resv,
fence);
else
reservation_object_add_shared_fence(etnaviv_obj->resv,
fence);
}
cmdbuf->nr_bos = submit->nr_bos;
hangcheck_timer_reset(gpu);
ret = 0;
out_unlock:
mutex_unlock(&gpu->lock);
out_pm_put:
etnaviv_gpu_pm_put(gpu);
return ret;
}
static void etnaviv_process_sync_point(struct etnaviv_gpu *gpu,
struct etnaviv_event *event)
{
u32 addr = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
event->sync_point(gpu, event);
etnaviv_gpu_start_fe(gpu, addr + 2, 2);
}
static void sync_point_worker(struct work_struct *work)
{
struct etnaviv_gpu *gpu = container_of(work, struct etnaviv_gpu,
sync_point_work);
struct etnaviv_event *event = &gpu->event[gpu->sync_point_event];
u32 addr = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
etnaviv_process_sync_point(gpu, &gpu->event[gpu->sync_point_event]);
event->sync_point(gpu, event);
etnaviv_submit_put(event->submit);
event_free(gpu, gpu->sync_point_event);
/* restart FE last to avoid GPU and IRQ racing against this worker */
etnaviv_gpu_start_fe(gpu, addr + 2, 2);
}
/*
@ -1550,7 +1476,7 @@ static irqreturn_t irq_handler(int irq, void *data)
if (gpu->event[event].sync_point) {
gpu->sync_point_event = event;
etnaviv_queue_work(gpu->drm, &gpu->sync_point_work);
queue_work(gpu->wq, &gpu->sync_point_work);
}
fence = gpu->event[event].fence;
@ -1576,7 +1502,7 @@ static irqreturn_t irq_handler(int irq, void *data)
}
/* Retire the buffer objects in a work */
etnaviv_queue_work(gpu->drm, &gpu->retire_work);
queue_work(gpu->wq, &gpu->retire_work);
ret = IRQ_HANDLED;
}
@ -1653,9 +1579,11 @@ int etnaviv_gpu_wait_idle(struct etnaviv_gpu *gpu, unsigned int timeout_ms)
static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu)
{
if (gpu->buffer) {
if (gpu->buffer.suballoc) {
/* Replace the last WAIT with END */
mutex_lock(&gpu->lock);
etnaviv_buffer_end(gpu);
mutex_unlock(&gpu->lock);
/*
* We know that only the FE is busy here, this should
@ -1680,7 +1608,7 @@ static int etnaviv_gpu_hw_resume(struct etnaviv_gpu *gpu)
etnaviv_gpu_update_clock(gpu);
etnaviv_gpu_hw_init(gpu);
gpu->switch_context = true;
gpu->lastctx = NULL;
gpu->exec_state = -1;
mutex_unlock(&gpu->lock);
@ -1738,20 +1666,29 @@ static int etnaviv_gpu_bind(struct device *dev, struct device *master,
struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
int ret;
if (IS_ENABLED(CONFIG_THERMAL)) {
if (IS_ENABLED(CONFIG_DRM_ETNAVIV_THERMAL)) {
gpu->cooling = thermal_of_cooling_device_register(dev->of_node,
(char *)dev_name(dev), gpu, &cooling_ops);
if (IS_ERR(gpu->cooling))
return PTR_ERR(gpu->cooling);
}
gpu->wq = alloc_ordered_workqueue(dev_name(dev), 0);
if (!gpu->wq) {
if (IS_ENABLED(CONFIG_DRM_ETNAVIV_THERMAL))
thermal_cooling_device_unregister(gpu->cooling);
return -ENOMEM;
}
#ifdef CONFIG_PM
ret = pm_runtime_get_sync(gpu->dev);
#else
ret = etnaviv_gpu_clk_enable(gpu);
#endif
if (ret < 0) {
thermal_cooling_device_unregister(gpu->cooling);
destroy_workqueue(gpu->wq);
if (IS_ENABLED(CONFIG_DRM_ETNAVIV_THERMAL))
thermal_cooling_device_unregister(gpu->cooling);
return ret;
}
@ -1759,7 +1696,7 @@ static int etnaviv_gpu_bind(struct device *dev, struct device *master,
gpu->fence_context = dma_fence_context_alloc(1);
spin_lock_init(&gpu->fence_spinlock);
INIT_LIST_HEAD(&gpu->active_cmd_list);
INIT_LIST_HEAD(&gpu->active_submit_list);
INIT_WORK(&gpu->retire_work, retire_worker);
INIT_WORK(&gpu->sync_point_work, sync_point_worker);
INIT_WORK(&gpu->recover_work, recover_worker);
@ -1784,6 +1721,9 @@ static void etnaviv_gpu_unbind(struct device *dev, struct device *master,
hangcheck_disable(gpu);
flush_workqueue(gpu->wq);
destroy_workqueue(gpu->wq);
#ifdef CONFIG_PM
pm_runtime_get_sync(gpu->dev);
pm_runtime_put_sync_suspend(gpu->dev);
@ -1791,10 +1731,8 @@ static void etnaviv_gpu_unbind(struct device *dev, struct device *master,
etnaviv_gpu_hw_suspend(gpu);
#endif
if (gpu->buffer) {
etnaviv_cmdbuf_free(gpu->buffer);
gpu->buffer = NULL;
}
if (gpu->buffer.suballoc)
etnaviv_cmdbuf_free(&gpu->buffer);
if (gpu->cmdbuf_suballoc) {
etnaviv_cmdbuf_suballoc_destroy(gpu->cmdbuf_suballoc);
@ -1808,7 +1746,8 @@ static void etnaviv_gpu_unbind(struct device *dev, struct device *master,
gpu->drm = NULL;
thermal_cooling_device_unregister(gpu->cooling);
if (IS_ENABLED(CONFIG_DRM_ETNAVIV_THERMAL))
thermal_cooling_device_unregister(gpu->cooling);
gpu->cooling = NULL;
}
@ -1931,7 +1870,7 @@ static int etnaviv_gpu_rpm_resume(struct device *dev)
return ret;
/* Re-initialise the basic hardware state */
if (gpu->drm && gpu->buffer) {
if (gpu->drm && gpu->buffer.suballoc) {
ret = etnaviv_gpu_hw_resume(gpu);
if (ret) {
etnaviv_gpu_clk_disable(gpu);

View File

@ -20,6 +20,7 @@
#include <linux/clk.h>
#include <linux/regulator/consumer.h>
#include "etnaviv_cmdbuf.h"
#include "etnaviv_drv.h"
struct etnaviv_gem_submit;
@ -89,7 +90,7 @@ struct etnaviv_chip_identity {
struct etnaviv_event {
struct dma_fence *fence;
struct etnaviv_cmdbuf *cmdbuf;
struct etnaviv_gem_submit *submit;
void (*sync_point)(struct etnaviv_gpu *gpu, struct etnaviv_event *event);
};
@ -106,10 +107,10 @@ struct etnaviv_gpu {
struct mutex lock;
struct etnaviv_chip_identity identity;
struct etnaviv_file_private *lastctx;
bool switch_context;
struct workqueue_struct *wq;
/* 'ring'-buffer: */
struct etnaviv_cmdbuf *buffer;
struct etnaviv_cmdbuf buffer;
int exec_state;
/* bus base address of memory */
@ -122,7 +123,7 @@ struct etnaviv_gpu {
spinlock_t event_spinlock;
/* list of currently in-flight command buffers */
struct list_head active_cmd_list;
struct list_head active_submit_list;
u32 idle_mask;
@ -202,7 +203,7 @@ int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu,
int etnaviv_gpu_wait_obj_inactive(struct etnaviv_gpu *gpu,
struct etnaviv_gem_object *etnaviv_obj, struct timespec *timeout);
int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
struct etnaviv_gem_submit *submit, struct etnaviv_cmdbuf *cmdbuf);
struct etnaviv_gem_submit *submit);
int etnaviv_gpu_pm_get_sync(struct etnaviv_gpu *gpu);
void etnaviv_gpu_pm_put(struct etnaviv_gpu *gpu);
int etnaviv_gpu_wait_idle(struct etnaviv_gpu *gpu, unsigned int timeout_ms);

View File

@ -70,9 +70,8 @@ static int __etnaviv_iommu_init(struct etnaviv_iommuv1_domain *etnaviv_domain)
return -ENOMEM;
}
for (i = 0; i < PT_ENTRIES; i++)
etnaviv_domain->pgtable_cpu[i] =
etnaviv_domain->base.bad_page_dma;
memset32(etnaviv_domain->pgtable_cpu, etnaviv_domain->base.bad_page_dma,
PT_ENTRIES);
return 0;
}

View File

@ -229,7 +229,7 @@ void etnaviv_iommuv2_restore(struct etnaviv_gpu *gpu)
prefetch = etnaviv_buffer_config_mmuv2(gpu,
(u32)etnaviv_domain->mtlb_dma,
(u32)etnaviv_domain->base.bad_page_dma);
etnaviv_gpu_start_fe(gpu, (u32)etnaviv_cmdbuf_get_pa(gpu->buffer),
etnaviv_gpu_start_fe(gpu, (u32)etnaviv_cmdbuf_get_pa(&gpu->buffer),
prefetch);
etnaviv_gpu_wait_idle(gpu, 100);

View File

@ -263,18 +263,16 @@ int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu,
if (iova < 0x80000000 - sg_dma_len(sgt->sgl)) {
mapping->iova = iova;
list_add_tail(&mapping->mmu_node, &mmu->mappings);
mutex_unlock(&mmu->lock);
return 0;
ret = 0;
goto unlock;
}
}
node = &mapping->vram_node;
ret = etnaviv_iommu_find_iova(mmu, node, etnaviv_obj->base.size);
if (ret < 0) {
mutex_unlock(&mmu->lock);
return ret;
}
if (ret < 0)
goto unlock;
mmu->last_iova = node->start + etnaviv_obj->base.size;
mapping->iova = node->start;
@ -283,12 +281,12 @@ int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu,
if (ret < 0) {
drm_mm_remove_node(node);
mutex_unlock(&mmu->lock);
return ret;
goto unlock;
}
list_add_tail(&mapping->mmu_node, &mmu->mappings);
mmu->need_flush = true;
unlock:
mutex_unlock(&mmu->lock);
return ret;

View File

@ -479,9 +479,9 @@ int etnaviv_pm_req_validate(const struct drm_etnaviv_gem_submit_pmr *r,
}
void etnaviv_perfmon_process(struct etnaviv_gpu *gpu,
const struct etnaviv_perfmon_request *pmr)
const struct etnaviv_perfmon_request *pmr, u32 exec_state)
{
const struct etnaviv_pm_domain_meta *meta = &doms_meta[gpu->exec_state];
const struct etnaviv_pm_domain_meta *meta = &doms_meta[exec_state];
const struct etnaviv_pm_domain *dom;
const struct etnaviv_pm_signal *sig;
u32 *bo = pmr->bo_vma;

View File

@ -44,6 +44,6 @@ int etnaviv_pm_req_validate(const struct drm_etnaviv_gem_submit_pmr *r,
u32 exec_state);
void etnaviv_perfmon_process(struct etnaviv_gpu *gpu,
const struct etnaviv_perfmon_request *pmr);
const struct etnaviv_perfmon_request *pmr, u32 exec_state);
#endif /* __ETNAVIV_PERFMON_H__ */