mirror of
https://github.com/torvalds/linux.git
synced 2024-11-21 19:41:42 +00:00
drm/gpuvm: rename struct drm_gpuva_manager to struct drm_gpuvm
Rename struct drm_gpuva_manager to struct drm_gpuvm including corresponding functions. This way the GPUVA manager's structures align very well with the documentation of VM_BIND [1] and VM_BIND locking [2]. It also provides a better foundation for the naming of data structures and functions introduced for implementing a common dma-resv per GPU-VM including tracking of external and evicted objects in subsequent patches. [1] Documentation/gpu/drm-vm-bind-async.rst [2] Documentation/gpu/drm-vm-bind-locking.rst Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com> Cc: Matthew Brost <matthew.brost@intel.com> Acked-by: Dave Airlie <airlied@redhat.com> Acked-by: Christian König <christian.koenig@amd.com> Signed-off-by: Danilo Krummrich <dakr@redhat.com> Link: https://patchwork.freedesktop.org/patch/msgid/20230920144343.64830-2-dakr@redhat.com
This commit is contained in:
parent
89755ee1d5
commit
f72c2db470
@ -45,7 +45,7 @@ drm-y := \
|
||||
drm_vblank.o \
|
||||
drm_vblank_work.o \
|
||||
drm_vma_manager.o \
|
||||
drm_gpuva_mgr.o \
|
||||
drm_gpuvm.o \
|
||||
drm_writeback.o
|
||||
drm-$(CONFIG_DRM_LEGACY) += \
|
||||
drm_agpsupport.o \
|
||||
|
@ -40,7 +40,7 @@
|
||||
#include <drm/drm_file.h>
|
||||
#include <drm/drm_gem.h>
|
||||
#include <drm/drm_managed.h>
|
||||
#include <drm/drm_gpuva_mgr.h>
|
||||
#include <drm/drm_gpuvm.h>
|
||||
|
||||
#include "drm_crtc_internal.h"
|
||||
#include "drm_internal.h"
|
||||
@ -189,31 +189,31 @@ static const struct file_operations drm_debugfs_fops = {
|
||||
/**
|
||||
* drm_debugfs_gpuva_info - dump the given DRM GPU VA space
|
||||
* @m: pointer to the &seq_file to write
|
||||
* @mgr: the &drm_gpuva_manager representing the GPU VA space
|
||||
* @gpuvm: the &drm_gpuvm representing the GPU VA space
|
||||
*
|
||||
* Dumps the GPU VA mappings of a given DRM GPU VA manager.
|
||||
*
|
||||
* For each DRM GPU VA space drivers should call this function from their
|
||||
* &drm_info_list's show callback.
|
||||
*
|
||||
* Returns: 0 on success, -ENODEV if the &mgr is not initialized
|
||||
* Returns: 0 on success, -ENODEV if the &gpuvm is not initialized
|
||||
*/
|
||||
int drm_debugfs_gpuva_info(struct seq_file *m,
|
||||
struct drm_gpuva_manager *mgr)
|
||||
struct drm_gpuvm *gpuvm)
|
||||
{
|
||||
struct drm_gpuva *va, *kva = &mgr->kernel_alloc_node;
|
||||
struct drm_gpuva *va, *kva = &gpuvm->kernel_alloc_node;
|
||||
|
||||
if (!mgr->name)
|
||||
if (!gpuvm->name)
|
||||
return -ENODEV;
|
||||
|
||||
seq_printf(m, "DRM GPU VA space (%s) [0x%016llx;0x%016llx]\n",
|
||||
mgr->name, mgr->mm_start, mgr->mm_start + mgr->mm_range);
|
||||
gpuvm->name, gpuvm->mm_start, gpuvm->mm_start + gpuvm->mm_range);
|
||||
seq_printf(m, "Kernel reserved node [0x%016llx;0x%016llx]\n",
|
||||
kva->va.addr, kva->va.addr + kva->va.range);
|
||||
seq_puts(m, "\n");
|
||||
seq_puts(m, " VAs | start | range | end | object | object offset\n");
|
||||
seq_puts(m, "-------------------------------------------------------------------------------------------------------------\n");
|
||||
drm_gpuva_for_each_va(va, mgr) {
|
||||
drm_gpuvm_for_each_va(va, gpuvm) {
|
||||
if (unlikely(va == kva))
|
||||
continue;
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -106,7 +106,7 @@ nouveau_exec_job_submit(struct nouveau_job *job)
|
||||
drm_exec_until_all_locked(exec) {
|
||||
struct drm_gpuva *va;
|
||||
|
||||
drm_gpuva_for_each_va(va, &uvmm->umgr) {
|
||||
drm_gpuvm_for_each_va(va, &uvmm->umgr) {
|
||||
if (unlikely(va == &uvmm->umgr.kernel_alloc_node))
|
||||
continue;
|
||||
|
||||
|
@ -329,7 +329,7 @@ nouveau_uvma_region_create(struct nouveau_uvmm *uvmm,
|
||||
struct nouveau_uvma_region *reg;
|
||||
int ret;
|
||||
|
||||
if (!drm_gpuva_interval_empty(&uvmm->umgr, addr, range))
|
||||
if (!drm_gpuvm_interval_empty(&uvmm->umgr, addr, range))
|
||||
return -ENOSPC;
|
||||
|
||||
ret = nouveau_uvma_region_alloc(®);
|
||||
@ -384,7 +384,7 @@ nouveau_uvma_region_empty(struct nouveau_uvma_region *reg)
|
||||
{
|
||||
struct nouveau_uvmm *uvmm = reg->uvmm;
|
||||
|
||||
return drm_gpuva_interval_empty(&uvmm->umgr,
|
||||
return drm_gpuvm_interval_empty(&uvmm->umgr,
|
||||
reg->va.addr,
|
||||
reg->va.range);
|
||||
}
|
||||
@ -444,7 +444,7 @@ op_map_prepare_unwind(struct nouveau_uvma *uvma)
|
||||
static void
|
||||
op_unmap_prepare_unwind(struct drm_gpuva *va)
|
||||
{
|
||||
drm_gpuva_insert(va->mgr, va);
|
||||
drm_gpuva_insert(va->vm, va);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -1194,7 +1194,7 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job)
|
||||
goto unwind_continue;
|
||||
}
|
||||
|
||||
op->ops = drm_gpuva_sm_unmap_ops_create(&uvmm->umgr,
|
||||
op->ops = drm_gpuvm_sm_unmap_ops_create(&uvmm->umgr,
|
||||
op->va.addr,
|
||||
op->va.range);
|
||||
if (IS_ERR(op->ops)) {
|
||||
@ -1240,7 +1240,7 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job)
|
||||
}
|
||||
}
|
||||
|
||||
op->ops = drm_gpuva_sm_map_ops_create(&uvmm->umgr,
|
||||
op->ops = drm_gpuvm_sm_map_ops_create(&uvmm->umgr,
|
||||
op->va.addr,
|
||||
op->va.range,
|
||||
op->gem.obj,
|
||||
@ -1264,7 +1264,7 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job)
|
||||
break;
|
||||
}
|
||||
case OP_UNMAP:
|
||||
op->ops = drm_gpuva_sm_unmap_ops_create(&uvmm->umgr,
|
||||
op->ops = drm_gpuvm_sm_unmap_ops_create(&uvmm->umgr,
|
||||
op->va.addr,
|
||||
op->va.range);
|
||||
if (IS_ERR(op->ops)) {
|
||||
@ -1836,11 +1836,11 @@ nouveau_uvmm_init(struct nouveau_uvmm *uvmm, struct nouveau_cli *cli,
|
||||
uvmm->kernel_managed_addr = kernel_managed_addr;
|
||||
uvmm->kernel_managed_size = kernel_managed_size;
|
||||
|
||||
drm_gpuva_manager_init(&uvmm->umgr, cli->name,
|
||||
NOUVEAU_VA_SPACE_START,
|
||||
NOUVEAU_VA_SPACE_END,
|
||||
kernel_managed_addr, kernel_managed_size,
|
||||
NULL);
|
||||
drm_gpuvm_init(&uvmm->umgr, cli->name,
|
||||
NOUVEAU_VA_SPACE_START,
|
||||
NOUVEAU_VA_SPACE_END,
|
||||
kernel_managed_addr, kernel_managed_size,
|
||||
NULL);
|
||||
|
||||
ret = nvif_vmm_ctor(&cli->mmu, "uvmm",
|
||||
cli->vmm.vmm.object.oclass, RAW,
|
||||
@ -1855,7 +1855,7 @@ nouveau_uvmm_init(struct nouveau_uvmm *uvmm, struct nouveau_cli *cli,
|
||||
return 0;
|
||||
|
||||
out_free_gpuva_mgr:
|
||||
drm_gpuva_manager_destroy(&uvmm->umgr);
|
||||
drm_gpuvm_destroy(&uvmm->umgr);
|
||||
out_unlock:
|
||||
mutex_unlock(&cli->mutex);
|
||||
return ret;
|
||||
@ -1877,7 +1877,7 @@ nouveau_uvmm_fini(struct nouveau_uvmm *uvmm)
|
||||
wait_event(entity->job.wq, list_empty(&entity->job.list.head));
|
||||
|
||||
nouveau_uvmm_lock(uvmm);
|
||||
drm_gpuva_for_each_va_safe(va, next, &uvmm->umgr) {
|
||||
drm_gpuvm_for_each_va_safe(va, next, &uvmm->umgr) {
|
||||
struct nouveau_uvma *uvma = uvma_from_va(va);
|
||||
struct drm_gem_object *obj = va->gem.obj;
|
||||
|
||||
@ -1910,7 +1910,7 @@ nouveau_uvmm_fini(struct nouveau_uvmm *uvmm)
|
||||
|
||||
mutex_lock(&cli->mutex);
|
||||
nouveau_vmm_fini(&uvmm->vmm);
|
||||
drm_gpuva_manager_destroy(&uvmm->umgr);
|
||||
drm_gpuvm_destroy(&uvmm->umgr);
|
||||
mutex_unlock(&cli->mutex);
|
||||
|
||||
dma_resv_fini(&uvmm->resv);
|
||||
|
@ -3,13 +3,13 @@
|
||||
#ifndef __NOUVEAU_UVMM_H__
|
||||
#define __NOUVEAU_UVMM_H__
|
||||
|
||||
#include <drm/drm_gpuva_mgr.h>
|
||||
#include <drm/drm_gpuvm.h>
|
||||
|
||||
#include "nouveau_drv.h"
|
||||
|
||||
struct nouveau_uvmm {
|
||||
struct nouveau_vmm vmm;
|
||||
struct drm_gpuva_manager umgr;
|
||||
struct drm_gpuvm umgr;
|
||||
struct maple_tree region_mt;
|
||||
struct mutex mutex;
|
||||
struct dma_resv resv;
|
||||
@ -44,7 +44,7 @@ struct nouveau_uvma {
|
||||
#define uvmm_from_mgr(x) container_of((x), struct nouveau_uvmm, umgr)
|
||||
#define uvma_from_va(x) container_of((x), struct nouveau_uvma, va)
|
||||
|
||||
#define to_uvmm(x) uvmm_from_mgr((x)->va.mgr)
|
||||
#define to_uvmm(x) uvmm_from_mgr((x)->va.vm)
|
||||
|
||||
struct nouveau_uvmm_bind_job {
|
||||
struct nouveau_job base;
|
||||
|
@ -35,7 +35,7 @@
|
||||
#include <linux/types.h>
|
||||
#include <linux/seq_file.h>
|
||||
|
||||
#include <drm/drm_gpuva_mgr.h>
|
||||
#include <drm/drm_gpuvm.h>
|
||||
|
||||
/**
|
||||
* DRM_DEBUGFS_GPUVA_INFO - &drm_info_list entry to dump a GPU VA space
|
||||
@ -152,7 +152,7 @@ void drm_debugfs_add_files(struct drm_device *dev,
|
||||
const struct drm_debugfs_info *files, int count);
|
||||
|
||||
int drm_debugfs_gpuva_info(struct seq_file *m,
|
||||
struct drm_gpuva_manager *mgr);
|
||||
struct drm_gpuvm *gpuvm);
|
||||
#else
|
||||
static inline void drm_debugfs_create_files(const struct drm_info_list *files,
|
||||
int count, struct dentry *root,
|
||||
@ -177,7 +177,7 @@ static inline void drm_debugfs_add_files(struct drm_device *dev,
|
||||
{}
|
||||
|
||||
static inline int drm_debugfs_gpuva_info(struct seq_file *m,
|
||||
struct drm_gpuva_manager *mgr)
|
||||
struct drm_gpuvm *gpuvm)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
@ -1,7 +1,7 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
|
||||
#ifndef __DRM_GPUVA_MGR_H__
|
||||
#define __DRM_GPUVA_MGR_H__
|
||||
#ifndef __DRM_GPUVM_H__
|
||||
#define __DRM_GPUVM_H__
|
||||
|
||||
/*
|
||||
* Copyright (c) 2022 Red Hat.
|
||||
@ -31,8 +31,8 @@
|
||||
|
||||
#include <drm/drm_gem.h>
|
||||
|
||||
struct drm_gpuva_manager;
|
||||
struct drm_gpuva_fn_ops;
|
||||
struct drm_gpuvm;
|
||||
struct drm_gpuvm_ops;
|
||||
|
||||
/**
|
||||
* enum drm_gpuva_flags - flags for struct drm_gpuva
|
||||
@ -62,15 +62,15 @@ enum drm_gpuva_flags {
|
||||
* struct drm_gpuva - structure to track a GPU VA mapping
|
||||
*
|
||||
* This structure represents a GPU VA mapping and is associated with a
|
||||
* &drm_gpuva_manager.
|
||||
* &drm_gpuvm.
|
||||
*
|
||||
* Typically, this structure is embedded in bigger driver structures.
|
||||
*/
|
||||
struct drm_gpuva {
|
||||
/**
|
||||
* @mgr: the &drm_gpuva_manager this object is associated with
|
||||
* @vm: the &drm_gpuvm this object is associated with
|
||||
*/
|
||||
struct drm_gpuva_manager *mgr;
|
||||
struct drm_gpuvm *vm;
|
||||
|
||||
/**
|
||||
* @flags: the &drm_gpuva_flags for this mapping
|
||||
@ -137,20 +137,18 @@ struct drm_gpuva {
|
||||
} rb;
|
||||
};
|
||||
|
||||
int drm_gpuva_insert(struct drm_gpuva_manager *mgr, struct drm_gpuva *va);
|
||||
int drm_gpuva_insert(struct drm_gpuvm *gpuvm, struct drm_gpuva *va);
|
||||
void drm_gpuva_remove(struct drm_gpuva *va);
|
||||
|
||||
void drm_gpuva_link(struct drm_gpuva *va);
|
||||
void drm_gpuva_unlink(struct drm_gpuva *va);
|
||||
|
||||
struct drm_gpuva *drm_gpuva_find(struct drm_gpuva_manager *mgr,
|
||||
struct drm_gpuva *drm_gpuva_find(struct drm_gpuvm *gpuvm,
|
||||
u64 addr, u64 range);
|
||||
struct drm_gpuva *drm_gpuva_find_first(struct drm_gpuva_manager *mgr,
|
||||
struct drm_gpuva *drm_gpuva_find_first(struct drm_gpuvm *gpuvm,
|
||||
u64 addr, u64 range);
|
||||
struct drm_gpuva *drm_gpuva_find_prev(struct drm_gpuva_manager *mgr, u64 start);
|
||||
struct drm_gpuva *drm_gpuva_find_next(struct drm_gpuva_manager *mgr, u64 end);
|
||||
|
||||
bool drm_gpuva_interval_empty(struct drm_gpuva_manager *mgr, u64 addr, u64 range);
|
||||
struct drm_gpuva *drm_gpuva_find_prev(struct drm_gpuvm *gpuvm, u64 start);
|
||||
struct drm_gpuva *drm_gpuva_find_next(struct drm_gpuvm *gpuvm, u64 end);
|
||||
|
||||
static inline void drm_gpuva_init(struct drm_gpuva *va, u64 addr, u64 range,
|
||||
struct drm_gem_object *obj, u64 offset)
|
||||
@ -186,7 +184,7 @@ static inline bool drm_gpuva_invalidated(struct drm_gpuva *va)
|
||||
}
|
||||
|
||||
/**
|
||||
* struct drm_gpuva_manager - DRM GPU VA Manager
|
||||
* struct drm_gpuvm - DRM GPU VA Manager
|
||||
*
|
||||
* The DRM GPU VA Manager keeps track of a GPU's virtual address space by using
|
||||
* &maple_tree structures. Typically, this structure is embedded in bigger
|
||||
@ -197,7 +195,7 @@ static inline bool drm_gpuva_invalidated(struct drm_gpuva *va)
|
||||
*
|
||||
* There should be one manager instance per GPU virtual address space.
|
||||
*/
|
||||
struct drm_gpuva_manager {
|
||||
struct drm_gpuvm {
|
||||
/**
|
||||
* @name: the name of the DRM GPU VA space
|
||||
*/
|
||||
@ -237,100 +235,101 @@ struct drm_gpuva_manager {
|
||||
struct drm_gpuva kernel_alloc_node;
|
||||
|
||||
/**
|
||||
* @ops: &drm_gpuva_fn_ops providing the split/merge steps to drivers
|
||||
* @ops: &drm_gpuvm_ops providing the split/merge steps to drivers
|
||||
*/
|
||||
const struct drm_gpuva_fn_ops *ops;
|
||||
const struct drm_gpuvm_ops *ops;
|
||||
};
|
||||
|
||||
void drm_gpuva_manager_init(struct drm_gpuva_manager *mgr,
|
||||
const char *name,
|
||||
u64 start_offset, u64 range,
|
||||
u64 reserve_offset, u64 reserve_range,
|
||||
const struct drm_gpuva_fn_ops *ops);
|
||||
void drm_gpuva_manager_destroy(struct drm_gpuva_manager *mgr);
|
||||
void drm_gpuvm_init(struct drm_gpuvm *gpuvm, const char *name,
|
||||
u64 start_offset, u64 range,
|
||||
u64 reserve_offset, u64 reserve_range,
|
||||
const struct drm_gpuvm_ops *ops);
|
||||
void drm_gpuvm_destroy(struct drm_gpuvm *gpuvm);
|
||||
|
||||
bool drm_gpuvm_interval_empty(struct drm_gpuvm *gpuvm, u64 addr, u64 range);
|
||||
|
||||
static inline struct drm_gpuva *
|
||||
__drm_gpuva_next(struct drm_gpuva *va)
|
||||
{
|
||||
if (va && !list_is_last(&va->rb.entry, &va->mgr->rb.list))
|
||||
if (va && !list_is_last(&va->rb.entry, &va->vm->rb.list))
|
||||
return list_next_entry(va, rb.entry);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_gpuva_for_each_va_range() - iterate over a range of &drm_gpuvas
|
||||
* drm_gpuvm_for_each_va_range() - iterate over a range of &drm_gpuvas
|
||||
* @va__: &drm_gpuva structure to assign to in each iteration step
|
||||
* @mgr__: &drm_gpuva_manager to walk over
|
||||
* @gpuvm__: &drm_gpuvm to walk over
|
||||
* @start__: starting offset, the first gpuva will overlap this
|
||||
* @end__: ending offset, the last gpuva will start before this (but may
|
||||
* overlap)
|
||||
*
|
||||
* This iterator walks over all &drm_gpuvas in the &drm_gpuva_manager that lie
|
||||
* This iterator walks over all &drm_gpuvas in the &drm_gpuvm that lie
|
||||
* between @start__ and @end__. It is implemented similarly to list_for_each(),
|
||||
* but is using the &drm_gpuva_manager's internal interval tree to accelerate
|
||||
* but is using the &drm_gpuvm's internal interval tree to accelerate
|
||||
* the search for the starting &drm_gpuva, and hence isn't safe against removal
|
||||
* of elements. It assumes that @end__ is within (or is the upper limit of) the
|
||||
* &drm_gpuva_manager. This iterator does not skip over the &drm_gpuva_manager's
|
||||
* &drm_gpuvm. This iterator does not skip over the &drm_gpuvm's
|
||||
* @kernel_alloc_node.
|
||||
*/
|
||||
#define drm_gpuva_for_each_va_range(va__, mgr__, start__, end__) \
|
||||
for (va__ = drm_gpuva_find_first((mgr__), (start__), (end__) - (start__)); \
|
||||
#define drm_gpuvm_for_each_va_range(va__, gpuvm__, start__, end__) \
|
||||
for (va__ = drm_gpuva_find_first((gpuvm__), (start__), (end__) - (start__)); \
|
||||
va__ && (va__->va.addr < (end__)); \
|
||||
va__ = __drm_gpuva_next(va__))
|
||||
|
||||
/**
|
||||
* drm_gpuva_for_each_va_range_safe() - safely iterate over a range of
|
||||
* drm_gpuvm_for_each_va_range_safe() - safely iterate over a range of
|
||||
* &drm_gpuvas
|
||||
* @va__: &drm_gpuva to assign to in each iteration step
|
||||
* @next__: another &drm_gpuva to use as temporary storage
|
||||
* @mgr__: &drm_gpuva_manager to walk over
|
||||
* @gpuvm__: &drm_gpuvm to walk over
|
||||
* @start__: starting offset, the first gpuva will overlap this
|
||||
* @end__: ending offset, the last gpuva will start before this (but may
|
||||
* overlap)
|
||||
*
|
||||
* This iterator walks over all &drm_gpuvas in the &drm_gpuva_manager that lie
|
||||
* This iterator walks over all &drm_gpuvas in the &drm_gpuvm that lie
|
||||
* between @start__ and @end__. It is implemented similarly to
|
||||
* list_for_each_safe(), but is using the &drm_gpuva_manager's internal interval
|
||||
* list_for_each_safe(), but is using the &drm_gpuvm's internal interval
|
||||
* tree to accelerate the search for the starting &drm_gpuva, and hence is safe
|
||||
* against removal of elements. It assumes that @end__ is within (or is the
|
||||
* upper limit of) the &drm_gpuva_manager. This iterator does not skip over the
|
||||
* &drm_gpuva_manager's @kernel_alloc_node.
|
||||
* upper limit of) the &drm_gpuvm. This iterator does not skip over the
|
||||
* &drm_gpuvm's @kernel_alloc_node.
|
||||
*/
|
||||
#define drm_gpuva_for_each_va_range_safe(va__, next__, mgr__, start__, end__) \
|
||||
for (va__ = drm_gpuva_find_first((mgr__), (start__), (end__) - (start__)), \
|
||||
#define drm_gpuvm_for_each_va_range_safe(va__, next__, gpuvm__, start__, end__) \
|
||||
for (va__ = drm_gpuva_find_first((gpuvm__), (start__), (end__) - (start__)), \
|
||||
next__ = __drm_gpuva_next(va__); \
|
||||
va__ && (va__->va.addr < (end__)); \
|
||||
va__ = next__, next__ = __drm_gpuva_next(va__))
|
||||
|
||||
/**
|
||||
* drm_gpuva_for_each_va() - iterate over all &drm_gpuvas
|
||||
* drm_gpuvm_for_each_va() - iterate over all &drm_gpuvas
|
||||
* @va__: &drm_gpuva to assign to in each iteration step
|
||||
* @mgr__: &drm_gpuva_manager to walk over
|
||||
* @gpuvm__: &drm_gpuvm to walk over
|
||||
*
|
||||
* This iterator walks over all &drm_gpuva structures associated with the given
|
||||
* &drm_gpuva_manager.
|
||||
* &drm_gpuvm.
|
||||
*/
|
||||
#define drm_gpuva_for_each_va(va__, mgr__) \
|
||||
list_for_each_entry(va__, &(mgr__)->rb.list, rb.entry)
|
||||
#define drm_gpuvm_for_each_va(va__, gpuvm__) \
|
||||
list_for_each_entry(va__, &(gpuvm__)->rb.list, rb.entry)
|
||||
|
||||
/**
|
||||
* drm_gpuva_for_each_va_safe() - safely iterate over all &drm_gpuvas
|
||||
* drm_gpuvm_for_each_va_safe() - safely iterate over all &drm_gpuvas
|
||||
* @va__: &drm_gpuva to assign to in each iteration step
|
||||
* @next__: another &drm_gpuva to use as temporary storage
|
||||
* @mgr__: &drm_gpuva_manager to walk over
|
||||
* @gpuvm__: &drm_gpuvm to walk over
|
||||
*
|
||||
* This iterator walks over all &drm_gpuva structures associated with the given
|
||||
* &drm_gpuva_manager. It is implemented with list_for_each_entry_safe(), and
|
||||
* &drm_gpuvm. It is implemented with list_for_each_entry_safe(), and
|
||||
* hence safe against the removal of elements.
|
||||
*/
|
||||
#define drm_gpuva_for_each_va_safe(va__, next__, mgr__) \
|
||||
list_for_each_entry_safe(va__, next__, &(mgr__)->rb.list, rb.entry)
|
||||
#define drm_gpuvm_for_each_va_safe(va__, next__, gpuvm__) \
|
||||
list_for_each_entry_safe(va__, next__, &(gpuvm__)->rb.list, rb.entry)
|
||||
|
||||
/**
|
||||
* enum drm_gpuva_op_type - GPU VA operation type
|
||||
*
|
||||
* Operations to alter the GPU VA mappings tracked by the &drm_gpuva_manager.
|
||||
* Operations to alter the GPU VA mappings tracked by the &drm_gpuvm.
|
||||
*/
|
||||
enum drm_gpuva_op_type {
|
||||
/**
|
||||
@ -413,7 +412,7 @@ struct drm_gpuva_op_unmap {
|
||||
*
|
||||
* Optionally, if &keep is set, drivers may keep the actual page table
|
||||
* mappings for this &drm_gpuva, adding the missing page table entries
|
||||
* only and update the &drm_gpuva_manager accordingly.
|
||||
* only and update the &drm_gpuvm accordingly.
|
||||
*/
|
||||
bool keep;
|
||||
};
|
||||
@ -584,22 +583,22 @@ struct drm_gpuva_ops {
|
||||
#define drm_gpuva_next_op(op) list_next_entry(op, entry)
|
||||
|
||||
struct drm_gpuva_ops *
|
||||
drm_gpuva_sm_map_ops_create(struct drm_gpuva_manager *mgr,
|
||||
drm_gpuvm_sm_map_ops_create(struct drm_gpuvm *gpuvm,
|
||||
u64 addr, u64 range,
|
||||
struct drm_gem_object *obj, u64 offset);
|
||||
struct drm_gpuva_ops *
|
||||
drm_gpuva_sm_unmap_ops_create(struct drm_gpuva_manager *mgr,
|
||||
drm_gpuvm_sm_unmap_ops_create(struct drm_gpuvm *gpuvm,
|
||||
u64 addr, u64 range);
|
||||
|
||||
struct drm_gpuva_ops *
|
||||
drm_gpuva_prefetch_ops_create(struct drm_gpuva_manager *mgr,
|
||||
drm_gpuvm_prefetch_ops_create(struct drm_gpuvm *gpuvm,
|
||||
u64 addr, u64 range);
|
||||
|
||||
struct drm_gpuva_ops *
|
||||
drm_gpuva_gem_unmap_ops_create(struct drm_gpuva_manager *mgr,
|
||||
drm_gpuvm_gem_unmap_ops_create(struct drm_gpuvm *gpuvm,
|
||||
struct drm_gem_object *obj);
|
||||
|
||||
void drm_gpuva_ops_free(struct drm_gpuva_manager *mgr,
|
||||
void drm_gpuva_ops_free(struct drm_gpuvm *gpuvm,
|
||||
struct drm_gpuva_ops *ops);
|
||||
|
||||
static inline void drm_gpuva_init_from_op(struct drm_gpuva *va,
|
||||
@ -610,15 +609,15 @@ static inline void drm_gpuva_init_from_op(struct drm_gpuva *va,
|
||||
}
|
||||
|
||||
/**
|
||||
* struct drm_gpuva_fn_ops - callbacks for split/merge steps
|
||||
* struct drm_gpuvm_ops - callbacks for split/merge steps
|
||||
*
|
||||
* This structure defines the callbacks used by &drm_gpuva_sm_map and
|
||||
* &drm_gpuva_sm_unmap to provide the split/merge steps for map and unmap
|
||||
* This structure defines the callbacks used by &drm_gpuvm_sm_map and
|
||||
* &drm_gpuvm_sm_unmap to provide the split/merge steps for map and unmap
|
||||
* operations to drivers.
|
||||
*/
|
||||
struct drm_gpuva_fn_ops {
|
||||
struct drm_gpuvm_ops {
|
||||
/**
|
||||
* @op_alloc: called when the &drm_gpuva_manager allocates
|
||||
* @op_alloc: called when the &drm_gpuvm allocates
|
||||
* a struct drm_gpuva_op
|
||||
*
|
||||
* Some drivers may want to embed struct drm_gpuva_op into driver
|
||||
@ -630,7 +629,7 @@ struct drm_gpuva_fn_ops {
|
||||
struct drm_gpuva_op *(*op_alloc)(void);
|
||||
|
||||
/**
|
||||
* @op_free: called when the &drm_gpuva_manager frees a
|
||||
* @op_free: called when the &drm_gpuvm frees a
|
||||
* struct drm_gpuva_op
|
||||
*
|
||||
* Some drivers may want to embed struct drm_gpuva_op into driver
|
||||
@ -642,19 +641,19 @@ struct drm_gpuva_fn_ops {
|
||||
void (*op_free)(struct drm_gpuva_op *op);
|
||||
|
||||
/**
|
||||
* @sm_step_map: called from &drm_gpuva_sm_map to finally insert the
|
||||
* @sm_step_map: called from &drm_gpuvm_sm_map to finally insert the
|
||||
* mapping once all previous steps were completed
|
||||
*
|
||||
* The &priv pointer matches the one the driver passed to
|
||||
* &drm_gpuva_sm_map or &drm_gpuva_sm_unmap, respectively.
|
||||
* &drm_gpuvm_sm_map or &drm_gpuvm_sm_unmap, respectively.
|
||||
*
|
||||
* Can be NULL if &drm_gpuva_sm_map is used.
|
||||
* Can be NULL if &drm_gpuvm_sm_map is used.
|
||||
*/
|
||||
int (*sm_step_map)(struct drm_gpuva_op *op, void *priv);
|
||||
|
||||
/**
|
||||
* @sm_step_remap: called from &drm_gpuva_sm_map and
|
||||
* &drm_gpuva_sm_unmap to split up an existent mapping
|
||||
* @sm_step_remap: called from &drm_gpuvm_sm_map and
|
||||
* &drm_gpuvm_sm_unmap to split up an existent mapping
|
||||
*
|
||||
* This callback is called when existent mapping needs to be split up.
|
||||
* This is the case when either a newly requested mapping overlaps or
|
||||
@ -662,38 +661,38 @@ struct drm_gpuva_fn_ops {
|
||||
* mapping is requested.
|
||||
*
|
||||
* The &priv pointer matches the one the driver passed to
|
||||
* &drm_gpuva_sm_map or &drm_gpuva_sm_unmap, respectively.
|
||||
* &drm_gpuvm_sm_map or &drm_gpuvm_sm_unmap, respectively.
|
||||
*
|
||||
* Can be NULL if neither &drm_gpuva_sm_map nor &drm_gpuva_sm_unmap is
|
||||
* Can be NULL if neither &drm_gpuvm_sm_map nor &drm_gpuvm_sm_unmap is
|
||||
* used.
|
||||
*/
|
||||
int (*sm_step_remap)(struct drm_gpuva_op *op, void *priv);
|
||||
|
||||
/**
|
||||
* @sm_step_unmap: called from &drm_gpuva_sm_map and
|
||||
* &drm_gpuva_sm_unmap to unmap an existent mapping
|
||||
* @sm_step_unmap: called from &drm_gpuvm_sm_map and
|
||||
* &drm_gpuvm_sm_unmap to unmap an existent mapping
|
||||
*
|
||||
* This callback is called when existent mapping needs to be unmapped.
|
||||
* This is the case when either a newly requested mapping encloses an
|
||||
* existent mapping or an unmap of an existent mapping is requested.
|
||||
*
|
||||
* The &priv pointer matches the one the driver passed to
|
||||
* &drm_gpuva_sm_map or &drm_gpuva_sm_unmap, respectively.
|
||||
* &drm_gpuvm_sm_map or &drm_gpuvm_sm_unmap, respectively.
|
||||
*
|
||||
* Can be NULL if neither &drm_gpuva_sm_map nor &drm_gpuva_sm_unmap is
|
||||
* Can be NULL if neither &drm_gpuvm_sm_map nor &drm_gpuvm_sm_unmap is
|
||||
* used.
|
||||
*/
|
||||
int (*sm_step_unmap)(struct drm_gpuva_op *op, void *priv);
|
||||
};
|
||||
|
||||
int drm_gpuva_sm_map(struct drm_gpuva_manager *mgr, void *priv,
|
||||
int drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, void *priv,
|
||||
u64 addr, u64 range,
|
||||
struct drm_gem_object *obj, u64 offset);
|
||||
|
||||
int drm_gpuva_sm_unmap(struct drm_gpuva_manager *mgr, void *priv,
|
||||
int drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm, void *priv,
|
||||
u64 addr, u64 range);
|
||||
|
||||
void drm_gpuva_map(struct drm_gpuva_manager *mgr,
|
||||
void drm_gpuva_map(struct drm_gpuvm *gpuvm,
|
||||
struct drm_gpuva *va,
|
||||
struct drm_gpuva_op_map *op);
|
||||
|
||||
@ -703,4 +702,4 @@ void drm_gpuva_remap(struct drm_gpuva *prev,
|
||||
|
||||
void drm_gpuva_unmap(struct drm_gpuva_op_unmap *op);
|
||||
|
||||
#endif /* __DRM_GPUVA_MGR_H__ */
|
||||
#endif /* __DRM_GPUVM_H__ */
|
Loading…
Reference in New Issue
Block a user