drm/amdgpu: remove more of the ring backup code
Not used anymore. Reviewed-by: Christian König <christian.koenig@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
		
							parent
							
								
									bb06d7ef99
								
							
						
					
					
						commit
						33b7ed0122
					
				| @ -776,8 +776,6 @@ struct amdgpu_ring { | ||||
| 	struct amdgpu_bo	*ring_obj; | ||||
| 	volatile uint32_t	*ring; | ||||
| 	unsigned		rptr_offs; | ||||
| 	u64			next_rptr_gpu_addr; | ||||
| 	volatile u32		*next_rptr_cpu_addr; | ||||
| 	unsigned		wptr; | ||||
| 	unsigned		wptr_old; | ||||
| 	unsigned		ring_size; | ||||
| @ -796,7 +794,6 @@ struct amdgpu_ring { | ||||
| 	u32			doorbell_index; | ||||
| 	bool			use_doorbell; | ||||
| 	unsigned		wptr_offs; | ||||
| 	unsigned		next_rptr_offs; | ||||
| 	unsigned		fence_offs; | ||||
| 	uint64_t		current_ctx; | ||||
| 	enum amdgpu_ring_type	type; | ||||
|  | ||||
| @ -190,14 +190,6 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, | ||||
| 		return r; | ||||
| 	} | ||||
| 
 | ||||
| 	r = amdgpu_wb_get(adev, &ring->next_rptr_offs); | ||||
| 	if (r) { | ||||
| 		dev_err(adev->dev, "(%d) ring next_rptr wb alloc failed\n", r); | ||||
| 		return r; | ||||
| 	} | ||||
| 	ring->next_rptr_gpu_addr = adev->wb.gpu_addr + ring->next_rptr_offs * 4; | ||||
| 	ring->next_rptr_cpu_addr = &adev->wb.wb[ring->next_rptr_offs]; | ||||
| 
 | ||||
| 	r = amdgpu_wb_get(adev, &ring->cond_exe_offs); | ||||
| 	if (r) { | ||||
| 		dev_err(adev->dev, "(%d) ring cond_exec_polling wb alloc failed\n", r); | ||||
| @ -280,7 +272,6 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring) | ||||
| 	amdgpu_wb_free(ring->adev, ring->fence_offs); | ||||
| 	amdgpu_wb_free(ring->adev, ring->rptr_offs); | ||||
| 	amdgpu_wb_free(ring->adev, ring->wptr_offs); | ||||
| 	amdgpu_wb_free(ring->adev, ring->next_rptr_offs); | ||||
| 
 | ||||
| 	if (ring_obj) { | ||||
| 		r = amdgpu_bo_reserve(ring_obj, false); | ||||
|  | ||||
| @ -224,17 +224,6 @@ static void cik_sdma_ring_emit_ib(struct amdgpu_ring *ring, | ||||
| 				  unsigned vm_id, bool ctx_switch) | ||||
| { | ||||
| 	u32 extra_bits = vm_id & 0xf; | ||||
| 	u32 next_rptr = ring->wptr + 5; | ||||
| 
 | ||||
| 	while ((next_rptr & 7) != 4) | ||||
| 		next_rptr++; | ||||
| 
 | ||||
| 	next_rptr += 4; | ||||
| 	amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0)); | ||||
| 	amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc); | ||||
| 	amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff); | ||||
| 	amdgpu_ring_write(ring, 1); /* number of DWs to follow */ | ||||
| 	amdgpu_ring_write(ring, next_rptr); | ||||
| 
 | ||||
| 	/* IB packet must end on a 8 DW boundary */ | ||||
| 	cik_sdma_ring_insert_nop(ring, (12 - (ring->wptr & 7)) % 8); | ||||
|  | ||||
| @ -2056,17 +2056,6 @@ static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, | ||||
| 				      unsigned vm_id, bool ctx_switch) | ||||
| { | ||||
| 	u32 header, control = 0; | ||||
| 	u32 next_rptr = ring->wptr + 5; | ||||
| 
 | ||||
| 	if (ctx_switch) | ||||
| 		next_rptr += 2; | ||||
| 
 | ||||
| 	next_rptr += 4; | ||||
| 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); | ||||
| 	amdgpu_ring_write(ring, WRITE_DATA_DST_SEL(5) | WR_CONFIRM); | ||||
| 	amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc); | ||||
| 	amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff); | ||||
| 	amdgpu_ring_write(ring, next_rptr); | ||||
| 
 | ||||
| 	/* insert SWITCH_BUFFER packet before first IB in the ring frame */ | ||||
| 	if (ctx_switch) { | ||||
| @ -2095,22 +2084,9 @@ static void gfx_v7_0_ring_emit_ib_compute(struct amdgpu_ring *ring, | ||||
| 					  struct amdgpu_ib *ib, | ||||
| 					  unsigned vm_id, bool ctx_switch) | ||||
| { | ||||
| 	u32 header, control = 0; | ||||
| 	u32 next_rptr = ring->wptr + 5; | ||||
| 	u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vm_id << 24); | ||||
| 
 | ||||
| 	control |= INDIRECT_BUFFER_VALID; | ||||
| 	next_rptr += 4; | ||||
| 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); | ||||
| 	amdgpu_ring_write(ring, WRITE_DATA_DST_SEL(5) | WR_CONFIRM); | ||||
| 	amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc); | ||||
| 	amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff); | ||||
| 	amdgpu_ring_write(ring, next_rptr); | ||||
| 
 | ||||
| 	header = PACKET3(PACKET3_INDIRECT_BUFFER, 2); | ||||
| 
 | ||||
| 	control |= ib->length_dw | (vm_id << 24); | ||||
| 
 | ||||
| 	amdgpu_ring_write(ring, header); | ||||
| 	amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); | ||||
| 	amdgpu_ring_write(ring, | ||||
| #ifdef __BIG_ENDIAN | ||||
| 					  (2 << 0) | | ||||
|  | ||||
| @ -5929,17 +5929,6 @@ static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, | ||||
| 				      unsigned vm_id, bool ctx_switch) | ||||
| { | ||||
| 	u32 header, control = 0; | ||||
| 	u32 next_rptr = ring->wptr + 5; | ||||
| 
 | ||||
| 	if (ctx_switch) | ||||
| 		next_rptr += 2; | ||||
| 
 | ||||
| 	next_rptr += 4; | ||||
| 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); | ||||
| 	amdgpu_ring_write(ring, WRITE_DATA_DST_SEL(5) | WR_CONFIRM); | ||||
| 	amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc); | ||||
| 	amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff); | ||||
| 	amdgpu_ring_write(ring, next_rptr); | ||||
| 
 | ||||
| 	/* insert SWITCH_BUFFER packet before first IB in the ring frame */ | ||||
| 	if (ctx_switch) { | ||||
| @ -5968,23 +5957,9 @@ static void gfx_v8_0_ring_emit_ib_compute(struct amdgpu_ring *ring, | ||||
| 					  struct amdgpu_ib *ib, | ||||
| 					  unsigned vm_id, bool ctx_switch) | ||||
| { | ||||
| 	u32 header, control = 0; | ||||
| 	u32 next_rptr = ring->wptr + 5; | ||||
| 	u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vm_id << 24); | ||||
| 
 | ||||
| 	control |= INDIRECT_BUFFER_VALID; | ||||
| 
 | ||||
| 	next_rptr += 4; | ||||
| 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); | ||||
| 	amdgpu_ring_write(ring, WRITE_DATA_DST_SEL(5) | WR_CONFIRM); | ||||
| 	amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc); | ||||
| 	amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff); | ||||
| 	amdgpu_ring_write(ring, next_rptr); | ||||
| 
 | ||||
| 	header = PACKET3(PACKET3_INDIRECT_BUFFER, 2); | ||||
| 
 | ||||
| 	control |= ib->length_dw | (vm_id << 24); | ||||
| 
 | ||||
| 	amdgpu_ring_write(ring, header); | ||||
| 	amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); | ||||
| 	amdgpu_ring_write(ring, | ||||
| #ifdef __BIG_ENDIAN | ||||
| 					  (2 << 0) | | ||||
|  | ||||
| @ -255,19 +255,6 @@ static void sdma_v2_4_ring_emit_ib(struct amdgpu_ring *ring, | ||||
| 				   unsigned vm_id, bool ctx_switch) | ||||
| { | ||||
| 	u32 vmid = vm_id & 0xf; | ||||
| 	u32 next_rptr = ring->wptr + 5; | ||||
| 
 | ||||
| 	while ((next_rptr & 7) != 2) | ||||
| 		next_rptr++; | ||||
| 
 | ||||
| 	next_rptr += 6; | ||||
| 
 | ||||
| 	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) | | ||||
| 			  SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR)); | ||||
| 	amdgpu_ring_write(ring, lower_32_bits(ring->next_rptr_gpu_addr) & 0xfffffffc); | ||||
| 	amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr)); | ||||
| 	amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1)); | ||||
| 	amdgpu_ring_write(ring, next_rptr); | ||||
| 
 | ||||
| 	/* IB packet must end on a 8 DW boundary */ | ||||
| 	sdma_v2_4_ring_insert_nop(ring, (10 - (ring->wptr & 7)) % 8); | ||||
|  | ||||
| @ -415,18 +415,6 @@ static void sdma_v3_0_ring_emit_ib(struct amdgpu_ring *ring, | ||||
| 				   unsigned vm_id, bool ctx_switch) | ||||
| { | ||||
| 	u32 vmid = vm_id & 0xf; | ||||
| 	u32 next_rptr = ring->wptr + 5; | ||||
| 
 | ||||
| 	while ((next_rptr & 7) != 2) | ||||
| 		next_rptr++; | ||||
| 	next_rptr += 6; | ||||
| 
 | ||||
| 	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) | | ||||
| 			  SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR)); | ||||
| 	amdgpu_ring_write(ring, lower_32_bits(ring->next_rptr_gpu_addr) & 0xfffffffc); | ||||
| 	amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr)); | ||||
| 	amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1)); | ||||
| 	amdgpu_ring_write(ring, next_rptr); | ||||
| 
 | ||||
| 	/* IB packet must end on a 8 DW boundary */ | ||||
| 	sdma_v3_0_ring_insert_nop(ring, (10 - (ring->wptr & 7)) % 8); | ||||
|  | ||||
		Loading…
	
		Reference in New Issue
	
	Block a user