drm/amdgpu: cleanup PTE flag generation v3
Move the ASIC specific code into a new callback function. v2: mask the flags for SI and CIK instead of a BUG_ON(). v3: remove last missed BUG_ON(). Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com> Reviewed-by: Huang Rui <ray.huang@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
71776b6dae
commit
cbfae36cea
@ -104,6 +104,10 @@ struct amdgpu_gmc_funcs {
|
|||||||
/* get the pde for a given mc addr */
|
/* get the pde for a given mc addr */
|
||||||
void (*get_vm_pde)(struct amdgpu_device *adev, int level,
|
void (*get_vm_pde)(struct amdgpu_device *adev, int level,
|
||||||
u64 *dst, u64 *flags);
|
u64 *dst, u64 *flags);
|
||||||
|
/* get the pte flags to use for a BO VA mapping */
|
||||||
|
void (*get_vm_pte)(struct amdgpu_device *adev,
|
||||||
|
struct amdgpu_bo_va_mapping *mapping,
|
||||||
|
uint64_t *flags);
|
||||||
};
|
};
|
||||||
|
|
||||||
struct amdgpu_xgmi {
|
struct amdgpu_xgmi {
|
||||||
@ -185,6 +189,7 @@ struct amdgpu_gmc {
|
|||||||
#define amdgpu_gmc_emit_pasid_mapping(r, vmid, pasid) (r)->adev->gmc.gmc_funcs->emit_pasid_mapping((r), (vmid), (pasid))
|
#define amdgpu_gmc_emit_pasid_mapping(r, vmid, pasid) (r)->adev->gmc.gmc_funcs->emit_pasid_mapping((r), (vmid), (pasid))
|
||||||
#define amdgpu_gmc_map_mtype(adev, flags) (adev)->gmc.gmc_funcs->map_mtype((adev),(flags))
|
#define amdgpu_gmc_map_mtype(adev, flags) (adev)->gmc.gmc_funcs->map_mtype((adev),(flags))
|
||||||
#define amdgpu_gmc_get_vm_pde(adev, level, dst, flags) (adev)->gmc.gmc_funcs->get_vm_pde((adev), (level), (dst), (flags))
|
#define amdgpu_gmc_get_vm_pde(adev, level, dst, flags) (adev)->gmc.gmc_funcs->get_vm_pde((adev), (level), (dst), (flags))
|
||||||
|
#define amdgpu_gmc_get_vm_pte(adev, mapping, flags) (adev)->gmc.gmc_funcs->get_vm_pte((adev), (mapping), (flags))
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* amdgpu_gmc_vram_full_visible - Check if full VRAM is visible through the BAR
|
* amdgpu_gmc_vram_full_visible - Check if full VRAM is visible through the BAR
|
||||||
|
@ -1571,33 +1571,8 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
|
|||||||
if (!(mapping->flags & AMDGPU_PTE_WRITEABLE))
|
if (!(mapping->flags & AMDGPU_PTE_WRITEABLE))
|
||||||
flags &= ~AMDGPU_PTE_WRITEABLE;
|
flags &= ~AMDGPU_PTE_WRITEABLE;
|
||||||
|
|
||||||
if (adev->asic_type >= CHIP_TONGA) {
|
/* Apply ASIC specific mapping flags */
|
||||||
flags &= ~AMDGPU_PTE_EXECUTABLE;
|
amdgpu_gmc_get_vm_pte(adev, mapping, &flags);
|
||||||
flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (adev->asic_type >= CHIP_NAVI10) {
|
|
||||||
flags &= ~AMDGPU_PTE_MTYPE_NV10_MASK;
|
|
||||||
flags |= (mapping->flags & AMDGPU_PTE_MTYPE_NV10_MASK);
|
|
||||||
} else {
|
|
||||||
flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK;
|
|
||||||
flags |= (mapping->flags & AMDGPU_PTE_MTYPE_VG10_MASK);
|
|
||||||
}
|
|
||||||
|
|
||||||
if ((mapping->flags & AMDGPU_PTE_PRT) &&
|
|
||||||
(adev->asic_type >= CHIP_VEGA10)) {
|
|
||||||
flags |= AMDGPU_PTE_PRT;
|
|
||||||
if (adev->asic_type >= CHIP_NAVI10) {
|
|
||||||
flags |= AMDGPU_PTE_SNOOPED;
|
|
||||||
flags |= AMDGPU_PTE_LOG;
|
|
||||||
flags |= AMDGPU_PTE_SYSTEM;
|
|
||||||
}
|
|
||||||
flags &= ~AMDGPU_PTE_VALID;
|
|
||||||
}
|
|
||||||
if (adev->asic_type == CHIP_ARCTURUS &&
|
|
||||||
!(flags & AMDGPU_PTE_SYSTEM) &&
|
|
||||||
mapping->bo_va->is_xgmi)
|
|
||||||
flags |= AMDGPU_PTE_SNOOPED;
|
|
||||||
|
|
||||||
trace_amdgpu_vm_bo_update(mapping);
|
trace_amdgpu_vm_bo_update(mapping);
|
||||||
|
|
||||||
|
@ -440,12 +440,32 @@ static void gmc_v10_0_get_vm_pde(struct amdgpu_device *adev, int level,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void gmc_v10_0_get_vm_pte(struct amdgpu_device *adev,
|
||||||
|
struct amdgpu_bo_va_mapping *mapping,
|
||||||
|
uint64_t *flags)
|
||||||
|
{
|
||||||
|
*flags &= ~AMDGPU_PTE_EXECUTABLE;
|
||||||
|
*flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
|
||||||
|
|
||||||
|
*flags &= ~AMDGPU_PTE_MTYPE_NV10_MASK;
|
||||||
|
*flags |= (mapping->flags & AMDGPU_PTE_MTYPE_NV10_MASK);
|
||||||
|
|
||||||
|
if (mapping->flags & AMDGPU_PTE_PRT) {
|
||||||
|
*flags |= AMDGPU_PTE_PRT;
|
||||||
|
*flags |= AMDGPU_PTE_SNOOPED;
|
||||||
|
*flags |= AMDGPU_PTE_LOG;
|
||||||
|
*flags |= AMDGPU_PTE_SYSTEM;
|
||||||
|
*flags &= ~AMDGPU_PTE_VALID;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static const struct amdgpu_gmc_funcs gmc_v10_0_gmc_funcs = {
|
static const struct amdgpu_gmc_funcs gmc_v10_0_gmc_funcs = {
|
||||||
.flush_gpu_tlb = gmc_v10_0_flush_gpu_tlb,
|
.flush_gpu_tlb = gmc_v10_0_flush_gpu_tlb,
|
||||||
.emit_flush_gpu_tlb = gmc_v10_0_emit_flush_gpu_tlb,
|
.emit_flush_gpu_tlb = gmc_v10_0_emit_flush_gpu_tlb,
|
||||||
.emit_pasid_mapping = gmc_v10_0_emit_pasid_mapping,
|
.emit_pasid_mapping = gmc_v10_0_emit_pasid_mapping,
|
||||||
.map_mtype = gmc_v10_0_map_mtype,
|
.map_mtype = gmc_v10_0_map_mtype,
|
||||||
.get_vm_pde = gmc_v10_0_get_vm_pde
|
.get_vm_pde = gmc_v10_0_get_vm_pde,
|
||||||
|
.get_vm_pte = gmc_v10_0_get_vm_pte
|
||||||
};
|
};
|
||||||
|
|
||||||
static void gmc_v10_0_set_gmc_funcs(struct amdgpu_device *adev)
|
static void gmc_v10_0_set_gmc_funcs(struct amdgpu_device *adev)
|
||||||
|
@ -392,6 +392,14 @@ static void gmc_v6_0_get_vm_pde(struct amdgpu_device *adev, int level,
|
|||||||
BUG_ON(*addr & 0xFFFFFF0000000FFFULL);
|
BUG_ON(*addr & 0xFFFFFF0000000FFFULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void gmc_v6_0_get_vm_pte(struct amdgpu_device *adev,
|
||||||
|
struct amdgpu_bo_va_mapping *mapping,
|
||||||
|
uint64_t *flags)
|
||||||
|
{
|
||||||
|
*flags &= ~AMDGPU_PTE_EXECUTABLE;
|
||||||
|
*flags &= ~AMDGPU_PTE_PRT;
|
||||||
|
}
|
||||||
|
|
||||||
static void gmc_v6_0_set_fault_enable_default(struct amdgpu_device *adev,
|
static void gmc_v6_0_set_fault_enable_default(struct amdgpu_device *adev,
|
||||||
bool value)
|
bool value)
|
||||||
{
|
{
|
||||||
@ -1138,6 +1146,7 @@ static const struct amdgpu_gmc_funcs gmc_v6_0_gmc_funcs = {
|
|||||||
.emit_flush_gpu_tlb = gmc_v6_0_emit_flush_gpu_tlb,
|
.emit_flush_gpu_tlb = gmc_v6_0_emit_flush_gpu_tlb,
|
||||||
.set_prt = gmc_v6_0_set_prt,
|
.set_prt = gmc_v6_0_set_prt,
|
||||||
.get_vm_pde = gmc_v6_0_get_vm_pde,
|
.get_vm_pde = gmc_v6_0_get_vm_pde,
|
||||||
|
.get_vm_pte = gmc_v6_0_get_vm_pte,
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct amdgpu_irq_src_funcs gmc_v6_0_irq_funcs = {
|
static const struct amdgpu_irq_src_funcs gmc_v6_0_irq_funcs = {
|
||||||
|
@ -469,6 +469,14 @@ static void gmc_v7_0_get_vm_pde(struct amdgpu_device *adev, int level,
|
|||||||
BUG_ON(*addr & 0xFFFFFF0000000FFFULL);
|
BUG_ON(*addr & 0xFFFFFF0000000FFFULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void gmc_v7_0_get_vm_pte(struct amdgpu_device *adev,
|
||||||
|
struct amdgpu_bo_va_mapping *mapping,
|
||||||
|
uint64_t *flags)
|
||||||
|
{
|
||||||
|
*flags &= ~AMDGPU_PTE_EXECUTABLE;
|
||||||
|
*flags &= ~AMDGPU_PTE_PRT;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* gmc_v8_0_set_fault_enable_default - update VM fault handling
|
* gmc_v8_0_set_fault_enable_default - update VM fault handling
|
||||||
*
|
*
|
||||||
@ -1328,7 +1336,8 @@ static const struct amdgpu_gmc_funcs gmc_v7_0_gmc_funcs = {
|
|||||||
.emit_flush_gpu_tlb = gmc_v7_0_emit_flush_gpu_tlb,
|
.emit_flush_gpu_tlb = gmc_v7_0_emit_flush_gpu_tlb,
|
||||||
.emit_pasid_mapping = gmc_v7_0_emit_pasid_mapping,
|
.emit_pasid_mapping = gmc_v7_0_emit_pasid_mapping,
|
||||||
.set_prt = gmc_v7_0_set_prt,
|
.set_prt = gmc_v7_0_set_prt,
|
||||||
.get_vm_pde = gmc_v7_0_get_vm_pde
|
.get_vm_pde = gmc_v7_0_get_vm_pde,
|
||||||
|
.get_vm_pte = gmc_v7_0_get_vm_pte
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct amdgpu_irq_src_funcs gmc_v7_0_irq_funcs = {
|
static const struct amdgpu_irq_src_funcs gmc_v7_0_irq_funcs = {
|
||||||
|
@ -692,6 +692,15 @@ static void gmc_v8_0_get_vm_pde(struct amdgpu_device *adev, int level,
|
|||||||
BUG_ON(*addr & 0xFFFFFF0000000FFFULL);
|
BUG_ON(*addr & 0xFFFFFF0000000FFFULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void gmc_v8_0_get_vm_pte(struct amdgpu_device *adev,
|
||||||
|
struct amdgpu_bo_va_mapping *mapping,
|
||||||
|
uint64_t *flags)
|
||||||
|
{
|
||||||
|
*flags &= ~AMDGPU_PTE_EXECUTABLE;
|
||||||
|
*flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
|
||||||
|
*flags &= ~AMDGPU_PTE_PRT;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* gmc_v8_0_set_fault_enable_default - update VM fault handling
|
* gmc_v8_0_set_fault_enable_default - update VM fault handling
|
||||||
*
|
*
|
||||||
@ -1694,7 +1703,8 @@ static const struct amdgpu_gmc_funcs gmc_v8_0_gmc_funcs = {
|
|||||||
.emit_flush_gpu_tlb = gmc_v8_0_emit_flush_gpu_tlb,
|
.emit_flush_gpu_tlb = gmc_v8_0_emit_flush_gpu_tlb,
|
||||||
.emit_pasid_mapping = gmc_v8_0_emit_pasid_mapping,
|
.emit_pasid_mapping = gmc_v8_0_emit_pasid_mapping,
|
||||||
.set_prt = gmc_v8_0_set_prt,
|
.set_prt = gmc_v8_0_set_prt,
|
||||||
.get_vm_pde = gmc_v8_0_get_vm_pde
|
.get_vm_pde = gmc_v8_0_get_vm_pde,
|
||||||
|
.get_vm_pte = gmc_v8_0_get_vm_pte
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct amdgpu_irq_src_funcs gmc_v8_0_irq_funcs = {
|
static const struct amdgpu_irq_src_funcs gmc_v8_0_irq_funcs = {
|
||||||
|
@ -653,12 +653,34 @@ static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void gmc_v9_0_get_vm_pte(struct amdgpu_device *adev,
|
||||||
|
struct amdgpu_bo_va_mapping *mapping,
|
||||||
|
uint64_t *flags)
|
||||||
|
{
|
||||||
|
*flags &= ~AMDGPU_PTE_EXECUTABLE;
|
||||||
|
*flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
|
||||||
|
|
||||||
|
*flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK;
|
||||||
|
*flags |= mapping->flags & AMDGPU_PTE_MTYPE_VG10_MASK;
|
||||||
|
|
||||||
|
if (mapping->flags & AMDGPU_PTE_PRT) {
|
||||||
|
*flags |= AMDGPU_PTE_PRT;
|
||||||
|
*flags &= ~AMDGPU_PTE_VALID;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (adev->asic_type == CHIP_ARCTURUS &&
|
||||||
|
!(*flags & AMDGPU_PTE_SYSTEM) &&
|
||||||
|
mapping->bo_va->is_xgmi)
|
||||||
|
*flags |= AMDGPU_PTE_SNOOPED;
|
||||||
|
}
|
||||||
|
|
||||||
static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = {
|
static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = {
|
||||||
.flush_gpu_tlb = gmc_v9_0_flush_gpu_tlb,
|
.flush_gpu_tlb = gmc_v9_0_flush_gpu_tlb,
|
||||||
.emit_flush_gpu_tlb = gmc_v9_0_emit_flush_gpu_tlb,
|
.emit_flush_gpu_tlb = gmc_v9_0_emit_flush_gpu_tlb,
|
||||||
.emit_pasid_mapping = gmc_v9_0_emit_pasid_mapping,
|
.emit_pasid_mapping = gmc_v9_0_emit_pasid_mapping,
|
||||||
.map_mtype = gmc_v9_0_map_mtype,
|
.map_mtype = gmc_v9_0_map_mtype,
|
||||||
.get_vm_pde = gmc_v9_0_get_vm_pde
|
.get_vm_pde = gmc_v9_0_get_vm_pde,
|
||||||
|
.get_vm_pte = gmc_v9_0_get_vm_pte
|
||||||
};
|
};
|
||||||
|
|
||||||
static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev)
|
static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev)
|
||||||
|
Loading…
Reference in New Issue
Block a user