forked from Minki/linux
drm/radeon: rework the vm_flush interface
Pass the vm and ring index rather than an IB. This allows us to use the vm_flush interface for non-IB cases in the future. Signed-off-by: Alex Deucher <alexander.deucher@amd.com> Reviewed-by: Christian König <christian.koenig@amd.com>
This commit is contained in:
parent
76c44f2c0d
commit
498522b455
@ -1565,10 +1565,9 @@ void cayman_vm_set_page(struct radeon_device *rdev, uint64_t pe,
|
||||
* Update the page table base and flush the VM TLB
|
||||
* using the CP (cayman-si).
|
||||
*/
|
||||
void cayman_vm_flush(struct radeon_device *rdev, struct radeon_ib *ib)
|
||||
void cayman_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
|
||||
{
|
||||
struct radeon_ring *ring = &rdev->ring[ib->ring];
|
||||
struct radeon_vm *vm = ib->vm;
|
||||
struct radeon_ring *ring = &rdev->ring[ridx];
|
||||
|
||||
if (vm == NULL)
|
||||
return;
|
||||
@ -1588,5 +1587,5 @@ void cayman_vm_flush(struct radeon_device *rdev, struct radeon_ib *ib)
|
||||
|
||||
/* bits 0-7 are the VM contexts0-7 */
|
||||
radeon_ring_write(ring, PACKET0(VM_INVALIDATE_REQUEST, 0));
|
||||
radeon_ring_write(ring, 1 << ib->vm->id);
|
||||
radeon_ring_write(ring, 1 << vm->id);
|
||||
}
|
||||
|
@ -1165,7 +1165,7 @@ struct radeon_asic {
|
||||
int (*ring_test)(struct radeon_device *rdev, struct radeon_ring *cp);
|
||||
int (*ib_test)(struct radeon_device *rdev, struct radeon_ring *cp);
|
||||
bool (*is_lockup)(struct radeon_device *rdev, struct radeon_ring *cp);
|
||||
void (*vm_flush)(struct radeon_device *rdev, struct radeon_ib *ib);
|
||||
void (*vm_flush)(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
|
||||
} ring[RADEON_NUM_RINGS];
|
||||
/* irqs */
|
||||
struct {
|
||||
@ -1772,7 +1772,7 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
|
||||
#define radeon_ring_ib_execute(rdev, r, ib) (rdev)->asic->ring[(r)].ib_execute((rdev), (ib))
|
||||
#define radeon_ring_ib_parse(rdev, r, ib) (rdev)->asic->ring[(r)].ib_parse((rdev), (ib))
|
||||
#define radeon_ring_is_lockup(rdev, r, cp) (rdev)->asic->ring[(r)].is_lockup((rdev), (cp))
|
||||
#define radeon_ring_vm_flush(rdev, r, ib) (rdev)->asic->ring[(r)].vm_flush((rdev), (ib))
|
||||
#define radeon_ring_vm_flush(rdev, r, vm) (rdev)->asic->ring[(r)].vm_flush((rdev), (r), (vm))
|
||||
#define radeon_irq_set(rdev) (rdev)->asic->irq.set((rdev))
|
||||
#define radeon_irq_process(rdev) (rdev)->asic->irq.process((rdev))
|
||||
#define radeon_get_vblank_counter(rdev, crtc) (rdev)->asic->display.get_vblank_counter((rdev), (crtc))
|
||||
|
@ -443,7 +443,7 @@ int cayman_asic_reset(struct radeon_device *rdev);
|
||||
void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
|
||||
int cayman_vm_init(struct radeon_device *rdev);
|
||||
void cayman_vm_fini(struct radeon_device *rdev);
|
||||
void cayman_vm_flush(struct radeon_device *rdev, struct radeon_ib *ib);
|
||||
void cayman_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
|
||||
uint32_t cayman_vm_page_flags(struct radeon_device *rdev, uint32_t flags);
|
||||
void cayman_vm_set_page(struct radeon_device *rdev, uint64_t pe,
|
||||
uint64_t addr, unsigned count,
|
||||
@ -470,7 +470,7 @@ int si_irq_set(struct radeon_device *rdev);
|
||||
int si_irq_process(struct radeon_device *rdev);
|
||||
int si_vm_init(struct radeon_device *rdev);
|
||||
void si_vm_fini(struct radeon_device *rdev);
|
||||
void si_vm_flush(struct radeon_device *rdev, struct radeon_ib *ib);
|
||||
void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
|
||||
int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
|
||||
uint64_t si_get_gpu_clock(struct radeon_device *rdev);
|
||||
|
||||
|
@ -162,7 +162,7 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
|
||||
}
|
||||
/* if we can't remember our last VM flush then flush now! */
|
||||
if (ib->vm && !ib->vm->last_flush) {
|
||||
radeon_ring_vm_flush(rdev, ib->ring, ib);
|
||||
radeon_ring_vm_flush(rdev, ib->ring, ib->vm);
|
||||
}
|
||||
if (const_ib) {
|
||||
radeon_ring_ib_execute(rdev, const_ib->ring, const_ib);
|
||||
|
@ -2789,10 +2789,9 @@ void si_vm_fini(struct radeon_device *rdev)
|
||||
{
|
||||
}
|
||||
|
||||
void si_vm_flush(struct radeon_device *rdev, struct radeon_ib *ib)
|
||||
void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
|
||||
{
|
||||
struct radeon_ring *ring = &rdev->ring[ib->ring];
|
||||
struct radeon_vm *vm = ib->vm;
|
||||
struct radeon_ring *ring = &rdev->ring[ridx];
|
||||
|
||||
if (vm == NULL)
|
||||
return;
|
||||
@ -2826,7 +2825,7 @@ void si_vm_flush(struct radeon_device *rdev, struct radeon_ib *ib)
|
||||
WRITE_DATA_DST_SEL(0)));
|
||||
radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
|
||||
radeon_ring_write(ring, 0);
|
||||
radeon_ring_write(ring, 1 << ib->vm->id);
|
||||
radeon_ring_write(ring, 1 << vm->id);
|
||||
}
|
||||
|
||||
/*
|
||||
|
Loading…
Reference in New Issue
Block a user