forked from Minki/linux
drm/amdgpu: move VM table mapping into the backend as well
Clean that up further and also fix another case where the BO wasn't kmapped for CPU based updates. Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
df399b0641
commit
ecf96b52bf
@ -659,17 +659,7 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
|||||||
if (bo->tbo.type != ttm_bo_type_kernel) {
|
if (bo->tbo.type != ttm_bo_type_kernel) {
|
||||||
amdgpu_vm_bo_moved(bo_base);
|
amdgpu_vm_bo_moved(bo_base);
|
||||||
} else {
|
} else {
|
||||||
if (vm->use_cpu_for_update)
|
vm->update_funcs->map_table(bo);
|
||||||
r = amdgpu_bo_kmap(bo, NULL);
|
|
||||||
else
|
|
||||||
r = amdgpu_ttm_alloc_gart(&bo->tbo);
|
|
||||||
if (r)
|
|
||||||
break;
|
|
||||||
if (bo->shadow) {
|
|
||||||
r = amdgpu_ttm_alloc_gart(&bo->shadow->tbo);
|
|
||||||
if (r)
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
amdgpu_vm_bo_relocated(bo_base);
|
amdgpu_vm_bo_relocated(bo_base);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -751,22 +741,17 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
|
|||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
r = amdgpu_ttm_alloc_gart(&bo->tbo);
|
|
||||||
if (r)
|
|
||||||
return r;
|
|
||||||
|
|
||||||
if (bo->shadow) {
|
if (bo->shadow) {
|
||||||
r = ttm_bo_validate(&bo->shadow->tbo, &bo->shadow->placement,
|
r = ttm_bo_validate(&bo->shadow->tbo, &bo->shadow->placement,
|
||||||
&ctx);
|
&ctx);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
r = amdgpu_ttm_alloc_gart(&bo->shadow->tbo);
|
|
||||||
if (r)
|
|
||||||
return r;
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
r = vm->update_funcs->map_table(bo);
|
||||||
|
if (r)
|
||||||
|
return r;
|
||||||
|
|
||||||
memset(¶ms, 0, sizeof(params));
|
memset(¶ms, 0, sizeof(params));
|
||||||
params.adev = adev;
|
params.adev = adev;
|
||||||
params.vm = vm;
|
params.vm = vm;
|
||||||
@ -877,12 +862,6 @@ static int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
|
|||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
if (vm->use_cpu_for_update) {
|
|
||||||
r = amdgpu_bo_kmap(pt, NULL);
|
|
||||||
if (r)
|
|
||||||
goto error_free_pt;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Keep a reference to the root directory to avoid
|
/* Keep a reference to the root directory to avoid
|
||||||
* freeing them up in the wrong order.
|
* freeing them up in the wrong order.
|
||||||
*/
|
*/
|
||||||
|
@ -215,7 +215,7 @@ struct amdgpu_vm_update_params {
|
|||||||
};
|
};
|
||||||
|
|
||||||
struct amdgpu_vm_update_funcs {
|
struct amdgpu_vm_update_funcs {
|
||||||
|
int (*map_table)(struct amdgpu_bo *bo);
|
||||||
int (*prepare)(struct amdgpu_vm_update_params *p, void * owner,
|
int (*prepare)(struct amdgpu_vm_update_params *p, void * owner,
|
||||||
struct dma_fence *exclusive);
|
struct dma_fence *exclusive);
|
||||||
int (*update)(struct amdgpu_vm_update_params *p,
|
int (*update)(struct amdgpu_vm_update_params *p,
|
||||||
|
@ -24,6 +24,16 @@
|
|||||||
#include "amdgpu_object.h"
|
#include "amdgpu_object.h"
|
||||||
#include "amdgpu_trace.h"
|
#include "amdgpu_trace.h"
|
||||||
|
|
||||||
|
/**
|
||||||
|
* amdgpu_vm_cpu_map_table - make sure new PDs/PTs are kmapped
|
||||||
|
*
|
||||||
|
* @table: newly allocated or validated PD/PT
|
||||||
|
*/
|
||||||
|
static int amdgpu_vm_cpu_map_table(struct amdgpu_bo *table)
|
||||||
|
{
|
||||||
|
return amdgpu_bo_kmap(table, NULL);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* amdgpu_vm_cpu_prepare - prepare page table update with the CPU
|
* amdgpu_vm_cpu_prepare - prepare page table update with the CPU
|
||||||
*
|
*
|
||||||
@ -110,6 +120,7 @@ static int amdgpu_vm_cpu_commit(struct amdgpu_vm_update_params *p,
|
|||||||
}
|
}
|
||||||
|
|
||||||
const struct amdgpu_vm_update_funcs amdgpu_vm_cpu_funcs = {
|
const struct amdgpu_vm_update_funcs amdgpu_vm_cpu_funcs = {
|
||||||
|
.map_table = amdgpu_vm_cpu_map_table,
|
||||||
.prepare = amdgpu_vm_cpu_prepare,
|
.prepare = amdgpu_vm_cpu_prepare,
|
||||||
.update = amdgpu_vm_cpu_update,
|
.update = amdgpu_vm_cpu_update,
|
||||||
.commit = amdgpu_vm_cpu_commit
|
.commit = amdgpu_vm_cpu_commit
|
||||||
|
@ -28,6 +28,25 @@
|
|||||||
#define AMDGPU_VM_SDMA_MIN_NUM_DW 256u
|
#define AMDGPU_VM_SDMA_MIN_NUM_DW 256u
|
||||||
#define AMDGPU_VM_SDMA_MAX_NUM_DW (16u * 1024u)
|
#define AMDGPU_VM_SDMA_MAX_NUM_DW (16u * 1024u)
|
||||||
|
|
||||||
|
/**
|
||||||
|
* amdgpu_vm_sdma_map_table - make sure new PDs/PTs are GTT mapped
|
||||||
|
*
|
||||||
|
* @table: newly allocated or validated PD/PT
|
||||||
|
*/
|
||||||
|
static int amdgpu_vm_sdma_map_table(struct amdgpu_bo *table)
|
||||||
|
{
|
||||||
|
int r;
|
||||||
|
|
||||||
|
r = amdgpu_ttm_alloc_gart(&table->tbo);
|
||||||
|
if (r)
|
||||||
|
return r;
|
||||||
|
|
||||||
|
if (table->shadow)
|
||||||
|
r = amdgpu_ttm_alloc_gart(&table->shadow->tbo);
|
||||||
|
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* amdgpu_vm_sdma_prepare - prepare SDMA command submission
|
* amdgpu_vm_sdma_prepare - prepare SDMA command submission
|
||||||
*
|
*
|
||||||
@ -242,6 +261,7 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
|
|||||||
}
|
}
|
||||||
|
|
||||||
const struct amdgpu_vm_update_funcs amdgpu_vm_sdma_funcs = {
|
const struct amdgpu_vm_update_funcs amdgpu_vm_sdma_funcs = {
|
||||||
|
.map_table = amdgpu_vm_sdma_map_table,
|
||||||
.prepare = amdgpu_vm_sdma_prepare,
|
.prepare = amdgpu_vm_sdma_prepare,
|
||||||
.update = amdgpu_vm_sdma_update,
|
.update = amdgpu_vm_sdma_update,
|
||||||
.commit = amdgpu_vm_sdma_commit
|
.commit = amdgpu_vm_sdma_commit
|
||||||
|
Loading…
Reference in New Issue
Block a user