forked from Minki/linux
drm/amdgpu: move JPEG2.5 out from VCN2.5
And clean up the duplicated stuff Signed-off-by: Leo Liu <leo.liu@amd.com> Reviewed-by: Alex Deucher <alexander.deucher@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
5be45a26c9
commit
14f43e8f88
@ -155,7 +155,8 @@ amdgpu-y += \
|
||||
vcn_v2_5.o \
|
||||
amdgpu_jpeg.o \
|
||||
jpeg_v1_0.o \
|
||||
jpeg_v2_0.o
|
||||
jpeg_v2_0.o \
|
||||
jpeg_v2_5.o
|
||||
|
||||
# add ATHUB block
|
||||
amdgpu-y += \
|
||||
|
@ -26,6 +26,9 @@
|
||||
|
||||
#define AMDGPU_MAX_JPEG_INSTANCES 2
|
||||
|
||||
#define AMDGPU_JPEG_HARVEST_JPEG0 (1 << 0)
|
||||
#define AMDGPU_JPEG_HARVEST_JPEG1 (1 << 1)
|
||||
|
||||
struct amdgpu_jpeg_reg{
|
||||
unsigned jpeg_pitch;
|
||||
};
|
||||
|
@ -706,108 +706,3 @@ error:
|
||||
amdgpu_bo_unref(&bo);
|
||||
return r;
|
||||
}
|
||||
|
||||
int amdgpu_vcn_jpeg_ring_test_ring(struct amdgpu_ring *ring)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
uint32_t tmp = 0;
|
||||
unsigned i;
|
||||
int r;
|
||||
|
||||
WREG32(adev->vcn.inst[ring->me].external.jpeg_pitch, 0xCAFEDEAD);
|
||||
r = amdgpu_ring_alloc(ring, 3);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.jpeg_pitch, 0));
|
||||
amdgpu_ring_write(ring, 0xDEADBEEF);
|
||||
amdgpu_ring_commit(ring);
|
||||
|
||||
for (i = 0; i < adev->usec_timeout; i++) {
|
||||
tmp = RREG32(adev->vcn.inst[ring->me].external.jpeg_pitch);
|
||||
if (tmp == 0xDEADBEEF)
|
||||
break;
|
||||
udelay(1);
|
||||
}
|
||||
|
||||
if (i >= adev->usec_timeout)
|
||||
r = -ETIMEDOUT;
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
static int amdgpu_vcn_jpeg_set_reg(struct amdgpu_ring *ring, uint32_t handle,
|
||||
struct dma_fence **fence)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
struct amdgpu_job *job;
|
||||
struct amdgpu_ib *ib;
|
||||
struct dma_fence *f = NULL;
|
||||
const unsigned ib_size_dw = 16;
|
||||
int i, r;
|
||||
|
||||
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
ib = &job->ibs[0];
|
||||
|
||||
ib->ptr[0] = PACKETJ(adev->vcn.internal.jpeg_pitch, 0, 0, PACKETJ_TYPE0);
|
||||
ib->ptr[1] = 0xDEADBEEF;
|
||||
for (i = 2; i < 16; i += 2) {
|
||||
ib->ptr[i] = PACKETJ(0, 0, 0, PACKETJ_TYPE6);
|
||||
ib->ptr[i+1] = 0;
|
||||
}
|
||||
ib->length_dw = 16;
|
||||
|
||||
r = amdgpu_job_submit_direct(job, ring, &f);
|
||||
if (r)
|
||||
goto err;
|
||||
|
||||
if (fence)
|
||||
*fence = dma_fence_get(f);
|
||||
dma_fence_put(f);
|
||||
|
||||
return 0;
|
||||
|
||||
err:
|
||||
amdgpu_job_free(job);
|
||||
return r;
|
||||
}
|
||||
|
||||
int amdgpu_vcn_jpeg_ring_test_ib(struct amdgpu_ring *ring, long timeout)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
uint32_t tmp = 0;
|
||||
unsigned i;
|
||||
struct dma_fence *fence = NULL;
|
||||
long r = 0;
|
||||
|
||||
r = amdgpu_vcn_jpeg_set_reg(ring, 1, &fence);
|
||||
if (r)
|
||||
goto error;
|
||||
|
||||
r = dma_fence_wait_timeout(fence, false, timeout);
|
||||
if (r == 0) {
|
||||
r = -ETIMEDOUT;
|
||||
goto error;
|
||||
} else if (r < 0) {
|
||||
goto error;
|
||||
} else {
|
||||
r = 0;
|
||||
}
|
||||
|
||||
for (i = 0; i < adev->usec_timeout; i++) {
|
||||
tmp = RREG32(adev->vcn.inst[ring->me].external.jpeg_pitch);
|
||||
if (tmp == 0xDEADBEEF)
|
||||
break;
|
||||
udelay(1);
|
||||
}
|
||||
|
||||
if (i >= adev->usec_timeout)
|
||||
r = -ETIMEDOUT;
|
||||
|
||||
dma_fence_put(fence);
|
||||
error:
|
||||
return r;
|
||||
}
|
||||
|
@ -158,7 +158,6 @@ struct amdgpu_vcn_reg{
|
||||
unsigned ib_size;
|
||||
unsigned gp_scratch8;
|
||||
unsigned scratch9;
|
||||
unsigned jpeg_pitch;
|
||||
};
|
||||
|
||||
struct amdgpu_vcn_inst {
|
||||
@ -168,7 +167,6 @@ struct amdgpu_vcn_inst {
|
||||
void *saved_bo;
|
||||
struct amdgpu_ring ring_dec;
|
||||
struct amdgpu_ring ring_enc[AMDGPU_VCN_MAX_ENC_RINGS];
|
||||
struct amdgpu_ring ring_jpeg;
|
||||
struct amdgpu_irq_src irq;
|
||||
struct amdgpu_vcn_reg external;
|
||||
};
|
||||
@ -209,7 +207,4 @@ int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout);
|
||||
int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring);
|
||||
int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout);
|
||||
|
||||
int amdgpu_vcn_jpeg_ring_test_ring(struct amdgpu_ring *ring);
|
||||
int amdgpu_vcn_jpeg_ring_test_ib(struct amdgpu_ring *ring, long timeout);
|
||||
|
||||
#endif
|
||||
|
641
drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
Normal file
641
drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
Normal file
@ -0,0 +1,641 @@
|
||||
/*
|
||||
* Copyright 2019 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "amdgpu.h"
|
||||
#include "amdgpu_jpeg.h"
|
||||
#include "soc15.h"
|
||||
#include "soc15d.h"
|
||||
#include "jpeg_v2_0.h"
|
||||
|
||||
#include "vcn/vcn_2_5_offset.h"
|
||||
#include "vcn/vcn_2_5_sh_mask.h"
|
||||
#include "ivsrcid/vcn/irqsrcs_vcn_2_0.h"
|
||||
|
||||
#define mmUVD_JPEG_PITCH_INTERNAL_OFFSET 0x401f
|
||||
|
||||
#define JPEG25_MAX_HW_INSTANCES_ARCTURUS 2
|
||||
|
||||
static void jpeg_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev);
|
||||
static void jpeg_v2_5_set_irq_funcs(struct amdgpu_device *adev);
|
||||
static int jpeg_v2_5_set_powergating_state(void *handle,
|
||||
enum amd_powergating_state state);
|
||||
|
||||
static int amdgpu_ih_clientid_jpeg[] = {
|
||||
SOC15_IH_CLIENTID_VCN,
|
||||
SOC15_IH_CLIENTID_VCN1
|
||||
};
|
||||
|
||||
/**
|
||||
* jpeg_v2_5_early_init - set function pointers
|
||||
*
|
||||
* @handle: amdgpu_device pointer
|
||||
*
|
||||
* Set ring and irq function pointers
|
||||
*/
|
||||
static int jpeg_v2_5_early_init(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
if (adev->asic_type == CHIP_ARCTURUS) {
|
||||
u32 harvest;
|
||||
int i;
|
||||
|
||||
adev->jpeg.num_jpeg_inst = JPEG25_MAX_HW_INSTANCES_ARCTURUS;
|
||||
for (i = 0; i < adev->jpeg.num_jpeg_inst; i++) {
|
||||
harvest = RREG32_SOC15(JPEG, i, mmCC_UVD_HARVESTING);
|
||||
if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK)
|
||||
adev->jpeg.harvest_config |= 1 << i;
|
||||
}
|
||||
|
||||
if (adev->jpeg.harvest_config == (AMDGPU_JPEG_HARVEST_JPEG0 |
|
||||
AMDGPU_JPEG_HARVEST_JPEG1))
|
||||
return -ENOENT;
|
||||
} else
|
||||
adev->jpeg.num_jpeg_inst = 1;
|
||||
|
||||
jpeg_v2_5_set_dec_ring_funcs(adev);
|
||||
jpeg_v2_5_set_irq_funcs(adev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* jpeg_v2_5_sw_init - sw init for JPEG block
|
||||
*
|
||||
* @handle: amdgpu_device pointer
|
||||
*
|
||||
* Load firmware and sw initialization
|
||||
*/
|
||||
static int jpeg_v2_5_sw_init(void *handle)
|
||||
{
|
||||
struct amdgpu_ring *ring;
|
||||
int i, r;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
|
||||
if (adev->jpeg.harvest_config & (1 << i))
|
||||
continue;
|
||||
|
||||
/* JPEG TRAP */
|
||||
r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_jpeg[i],
|
||||
VCN_2_0__SRCID__JPEG_DECODE, &adev->jpeg.inst[i].irq);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
||||
r = amdgpu_jpeg_sw_init(adev);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = amdgpu_jpeg_resume(adev);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
|
||||
if (adev->jpeg.harvest_config & (1 << i))
|
||||
continue;
|
||||
|
||||
ring = &adev->jpeg.inst[i].ring_dec;
|
||||
ring->use_doorbell = true;
|
||||
ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1 + 8 * i;
|
||||
sprintf(ring->name, "jpeg_dec_%d", i);
|
||||
r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst[i].irq, 0);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
adev->jpeg.internal.jpeg_pitch = mmUVD_JPEG_PITCH_INTERNAL_OFFSET;
|
||||
adev->jpeg.inst[i].external.jpeg_pitch = SOC15_REG_OFFSET(JPEG, i, mmUVD_JPEG_PITCH);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* jpeg_v2_5_sw_fini - sw fini for JPEG block
|
||||
*
|
||||
* @handle: amdgpu_device pointer
|
||||
*
|
||||
* JPEG suspend and free up sw allocation
|
||||
*/
|
||||
static int jpeg_v2_5_sw_fini(void *handle)
|
||||
{
|
||||
int r;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
r = amdgpu_jpeg_suspend(adev);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = amdgpu_jpeg_sw_fini(adev);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
/**
|
||||
* jpeg_v2_5_hw_init - start and test JPEG block
|
||||
*
|
||||
* @handle: amdgpu_device pointer
|
||||
*
|
||||
*/
|
||||
static int jpeg_v2_5_hw_init(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_ring *ring;
|
||||
int i, r;
|
||||
|
||||
for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
|
||||
if (adev->jpeg.harvest_config & (1 << i))
|
||||
continue;
|
||||
|
||||
ring = &adev->jpeg.inst[i].ring_dec;
|
||||
adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
|
||||
(adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 8 * i, i);
|
||||
|
||||
r = amdgpu_ring_test_helper(ring);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
||||
DRM_INFO("JPEG decode initialized successfully.\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* jpeg_v2_5_hw_fini - stop the hardware block
|
||||
*
|
||||
* @handle: amdgpu_device pointer
|
||||
*
|
||||
* Stop the JPEG block, mark ring as not ready any more
|
||||
*/
|
||||
static int jpeg_v2_5_hw_fini(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_ring *ring;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
|
||||
if (adev->jpeg.harvest_config & (1 << i))
|
||||
continue;
|
||||
|
||||
ring = &adev->jpeg.inst[i].ring_dec;
|
||||
if (adev->jpeg.cur_state != AMD_PG_STATE_GATE &&
|
||||
RREG32_SOC15(JPEG, i, mmUVD_JRBC_STATUS))
|
||||
jpeg_v2_5_set_powergating_state(adev, AMD_PG_STATE_GATE);
|
||||
|
||||
ring->sched.ready = false;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* jpeg_v2_5_suspend - suspend JPEG block
|
||||
*
|
||||
* @handle: amdgpu_device pointer
|
||||
*
|
||||
* HW fini and suspend JPEG block
|
||||
*/
|
||||
static int jpeg_v2_5_suspend(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
int r;
|
||||
|
||||
r = jpeg_v2_5_hw_fini(adev);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = amdgpu_jpeg_suspend(adev);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
/**
|
||||
* jpeg_v2_5_resume - resume JPEG block
|
||||
*
|
||||
* @handle: amdgpu_device pointer
|
||||
*
|
||||
* Resume firmware and hw init JPEG block
|
||||
*/
|
||||
static int jpeg_v2_5_resume(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
int r;
|
||||
|
||||
r = amdgpu_jpeg_resume(adev);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = jpeg_v2_5_hw_init(adev);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
static void jpeg_v2_5_disable_clock_gating(struct amdgpu_device* adev, int inst)
|
||||
{
|
||||
uint32_t data;
|
||||
|
||||
data = RREG32_SOC15(JPEG, inst, mmJPEG_CGC_CTRL);
|
||||
if (adev->cg_flags & AMD_CG_SUPPORT_JPEG_MGCG)
|
||||
data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
|
||||
else
|
||||
data &= ~JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
|
||||
|
||||
data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
|
||||
data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
|
||||
WREG32_SOC15(JPEG, inst, mmJPEG_CGC_CTRL, data);
|
||||
|
||||
data = RREG32_SOC15(JPEG, inst, mmJPEG_CGC_GATE);
|
||||
data &= ~(JPEG_CGC_GATE__JPEG_DEC_MASK
|
||||
| JPEG_CGC_GATE__JPEG2_DEC_MASK
|
||||
| JPEG_CGC_GATE__JPEG_ENC_MASK
|
||||
| JPEG_CGC_GATE__JMCIF_MASK
|
||||
| JPEG_CGC_GATE__JRBBM_MASK);
|
||||
WREG32_SOC15(JPEG, inst, mmJPEG_CGC_GATE, data);
|
||||
|
||||
data = RREG32_SOC15(JPEG, inst, mmJPEG_CGC_CTRL);
|
||||
data &= ~(JPEG_CGC_CTRL__JPEG_DEC_MODE_MASK
|
||||
| JPEG_CGC_CTRL__JPEG2_DEC_MODE_MASK
|
||||
| JPEG_CGC_CTRL__JMCIF_MODE_MASK
|
||||
| JPEG_CGC_CTRL__JRBBM_MODE_MASK);
|
||||
WREG32_SOC15(JPEG, inst, mmJPEG_CGC_CTRL, data);
|
||||
}
|
||||
|
||||
static void jpeg_v2_5_enable_clock_gating(struct amdgpu_device* adev, int inst)
|
||||
{
|
||||
uint32_t data;
|
||||
|
||||
data = RREG32_SOC15(JPEG, inst, mmJPEG_CGC_GATE);
|
||||
data |= (JPEG_CGC_GATE__JPEG_DEC_MASK
|
||||
|JPEG_CGC_GATE__JPEG2_DEC_MASK
|
||||
|JPEG_CGC_GATE__JPEG_ENC_MASK
|
||||
|JPEG_CGC_GATE__JMCIF_MASK
|
||||
|JPEG_CGC_GATE__JRBBM_MASK);
|
||||
WREG32_SOC15(JPEG, inst, mmJPEG_CGC_GATE, data);
|
||||
}
|
||||
|
||||
/**
|
||||
* jpeg_v2_5_start - start JPEG block
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
*
|
||||
* Setup and start the JPEG block
|
||||
*/
|
||||
static int jpeg_v2_5_start(struct amdgpu_device *adev)
|
||||
{
|
||||
struct amdgpu_ring *ring;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
|
||||
if (adev->jpeg.harvest_config & (1 << i))
|
||||
continue;
|
||||
|
||||
ring = &adev->jpeg.inst[i].ring_dec;
|
||||
/* disable anti hang mechanism */
|
||||
WREG32_P(SOC15_REG_OFFSET(JPEG, i, mmUVD_JPEG_POWER_STATUS), 0,
|
||||
~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
|
||||
|
||||
/* JPEG disable CGC */
|
||||
jpeg_v2_5_disable_clock_gating(adev, i);
|
||||
|
||||
/* MJPEG global tiling registers */
|
||||
WREG32_SOC15(JPEG, i, mmJPEG_DEC_GFX8_ADDR_CONFIG,
|
||||
adev->gfx.config.gb_addr_config);
|
||||
WREG32_SOC15(JPEG, i, mmJPEG_DEC_GFX10_ADDR_CONFIG,
|
||||
adev->gfx.config.gb_addr_config);
|
||||
|
||||
/* enable JMI channel */
|
||||
WREG32_P(SOC15_REG_OFFSET(JPEG, i, mmUVD_JMI_CNTL), 0,
|
||||
~UVD_JMI_CNTL__SOFT_RESET_MASK);
|
||||
|
||||
/* enable System Interrupt for JRBC */
|
||||
WREG32_P(SOC15_REG_OFFSET(JPEG, i, mmJPEG_SYS_INT_EN),
|
||||
JPEG_SYS_INT_EN__DJRBC_MASK,
|
||||
~JPEG_SYS_INT_EN__DJRBC_MASK);
|
||||
|
||||
WREG32_SOC15(JPEG, i, mmUVD_LMI_JRBC_RB_VMID, 0);
|
||||
WREG32_SOC15(JPEG, i, mmUVD_JRBC_RB_CNTL, (0x00000001L | 0x00000002L));
|
||||
WREG32_SOC15(JPEG, i, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW,
|
||||
lower_32_bits(ring->gpu_addr));
|
||||
WREG32_SOC15(JPEG, i, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH,
|
||||
upper_32_bits(ring->gpu_addr));
|
||||
WREG32_SOC15(JPEG, i, mmUVD_JRBC_RB_RPTR, 0);
|
||||
WREG32_SOC15(JPEG, i, mmUVD_JRBC_RB_WPTR, 0);
|
||||
WREG32_SOC15(JPEG, i, mmUVD_JRBC_RB_CNTL, 0x00000002L);
|
||||
WREG32_SOC15(JPEG, i, mmUVD_JRBC_RB_SIZE, ring->ring_size / 4);
|
||||
ring->wptr = RREG32_SOC15(JPEG, i, mmUVD_JRBC_RB_WPTR);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* jpeg_v2_5_stop - stop JPEG block
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
*
|
||||
* stop the JPEG block
|
||||
*/
|
||||
static int jpeg_v2_5_stop(struct amdgpu_device *adev)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
|
||||
if (adev->jpeg.harvest_config & (1 << i))
|
||||
continue;
|
||||
|
||||
/* reset JMI */
|
||||
WREG32_P(SOC15_REG_OFFSET(JPEG, i, mmUVD_JMI_CNTL),
|
||||
UVD_JMI_CNTL__SOFT_RESET_MASK,
|
||||
~UVD_JMI_CNTL__SOFT_RESET_MASK);
|
||||
|
||||
jpeg_v2_5_enable_clock_gating(adev, i);
|
||||
|
||||
/* enable anti hang mechanism */
|
||||
WREG32_P(SOC15_REG_OFFSET(JPEG, i, mmUVD_JPEG_POWER_STATUS),
|
||||
UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK,
|
||||
~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* jpeg_v2_5_dec_ring_get_rptr - get read pointer
|
||||
*
|
||||
* @ring: amdgpu_ring pointer
|
||||
*
|
||||
* Returns the current hardware read pointer
|
||||
*/
|
||||
static uint64_t jpeg_v2_5_dec_ring_get_rptr(struct amdgpu_ring *ring)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
|
||||
return RREG32_SOC15(JPEG, ring->me, mmUVD_JRBC_RB_RPTR);
|
||||
}
|
||||
|
||||
/**
|
||||
* jpeg_v2_5_dec_ring_get_wptr - get write pointer
|
||||
*
|
||||
* @ring: amdgpu_ring pointer
|
||||
*
|
||||
* Returns the current hardware write pointer
|
||||
*/
|
||||
static uint64_t jpeg_v2_5_dec_ring_get_wptr(struct amdgpu_ring *ring)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
|
||||
if (ring->use_doorbell)
|
||||
return adev->wb.wb[ring->wptr_offs];
|
||||
else
|
||||
return RREG32_SOC15(JPEG, ring->me, mmUVD_JRBC_RB_WPTR);
|
||||
}
|
||||
|
||||
/**
|
||||
* jpeg_v2_5_dec_ring_set_wptr - set write pointer
|
||||
*
|
||||
* @ring: amdgpu_ring pointer
|
||||
*
|
||||
* Commits the write pointer to the hardware
|
||||
*/
|
||||
static void jpeg_v2_5_dec_ring_set_wptr(struct amdgpu_ring *ring)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
|
||||
if (ring->use_doorbell) {
|
||||
adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
|
||||
WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
|
||||
} else {
|
||||
WREG32_SOC15(JPEG, ring->me, mmUVD_JRBC_RB_WPTR, lower_32_bits(ring->wptr));
|
||||
}
|
||||
}
|
||||
|
||||
static bool jpeg_v2_5_is_idle(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
int i, ret = 1;
|
||||
|
||||
for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
|
||||
if (adev->jpeg.harvest_config & (1 << i))
|
||||
continue;
|
||||
|
||||
ret &= (((RREG32_SOC15(JPEG, i, mmUVD_JRBC_STATUS) &
|
||||
UVD_JRBC_STATUS__RB_JOB_DONE_MASK) ==
|
||||
UVD_JRBC_STATUS__RB_JOB_DONE_MASK));
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int jpeg_v2_5_wait_for_idle(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
int i, ret = 0;
|
||||
|
||||
for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
|
||||
if (adev->jpeg.harvest_config & (1 << i))
|
||||
continue;
|
||||
|
||||
SOC15_WAIT_ON_RREG(JPEG, i, mmUVD_JRBC_STATUS,
|
||||
UVD_JRBC_STATUS__RB_JOB_DONE_MASK,
|
||||
UVD_JRBC_STATUS__RB_JOB_DONE_MASK, ret);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int jpeg_v2_5_set_clockgating_state(void *handle,
|
||||
enum amd_clockgating_state state)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
|
||||
if (adev->jpeg.harvest_config & (1 << i))
|
||||
continue;
|
||||
|
||||
if (enable) {
|
||||
if (jpeg_v2_5_is_idle(handle))
|
||||
return -EBUSY;
|
||||
jpeg_v2_5_enable_clock_gating(adev, i);
|
||||
} else {
|
||||
jpeg_v2_5_disable_clock_gating(adev, i);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int jpeg_v2_5_set_powergating_state(void *handle,
|
||||
enum amd_powergating_state state)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
int ret;
|
||||
|
||||
if(state == adev->jpeg.cur_state)
|
||||
return 0;
|
||||
|
||||
if (state == AMD_PG_STATE_GATE)
|
||||
ret = jpeg_v2_5_stop(adev);
|
||||
else
|
||||
ret = jpeg_v2_5_start(adev);
|
||||
|
||||
if(!ret)
|
||||
adev->jpeg.cur_state = state;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int jpeg_v2_5_set_interrupt_state(struct amdgpu_device *adev,
|
||||
struct amdgpu_irq_src *source,
|
||||
unsigned type,
|
||||
enum amdgpu_interrupt_state state)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int jpeg_v2_5_process_interrupt(struct amdgpu_device *adev,
|
||||
struct amdgpu_irq_src *source,
|
||||
struct amdgpu_iv_entry *entry)
|
||||
{
|
||||
uint32_t ip_instance;
|
||||
|
||||
switch (entry->client_id) {
|
||||
case SOC15_IH_CLIENTID_VCN:
|
||||
ip_instance = 0;
|
||||
break;
|
||||
case SOC15_IH_CLIENTID_VCN1:
|
||||
ip_instance = 1;
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("Unhandled client id: %d\n", entry->client_id);
|
||||
return 0;
|
||||
}
|
||||
|
||||
DRM_DEBUG("IH: JPEG TRAP\n");
|
||||
|
||||
switch (entry->src_id) {
|
||||
case VCN_2_0__SRCID__JPEG_DECODE:
|
||||
amdgpu_fence_process(&adev->jpeg.inst[ip_instance].ring_dec);
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("Unhandled interrupt: %d %d\n",
|
||||
entry->src_id, entry->src_data[0]);
|
||||
break;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct amd_ip_funcs jpeg_v2_5_ip_funcs = {
|
||||
.name = "jpeg_v2_5",
|
||||
.early_init = jpeg_v2_5_early_init,
|
||||
.late_init = NULL,
|
||||
.sw_init = jpeg_v2_5_sw_init,
|
||||
.sw_fini = jpeg_v2_5_sw_fini,
|
||||
.hw_init = jpeg_v2_5_hw_init,
|
||||
.hw_fini = jpeg_v2_5_hw_fini,
|
||||
.suspend = jpeg_v2_5_suspend,
|
||||
.resume = jpeg_v2_5_resume,
|
||||
.is_idle = jpeg_v2_5_is_idle,
|
||||
.wait_for_idle = jpeg_v2_5_wait_for_idle,
|
||||
.check_soft_reset = NULL,
|
||||
.pre_soft_reset = NULL,
|
||||
.soft_reset = NULL,
|
||||
.post_soft_reset = NULL,
|
||||
.set_clockgating_state = jpeg_v2_5_set_clockgating_state,
|
||||
.set_powergating_state = jpeg_v2_5_set_powergating_state,
|
||||
};
|
||||
|
||||
static const struct amdgpu_ring_funcs jpeg_v2_5_dec_ring_vm_funcs = {
|
||||
.type = AMDGPU_RING_TYPE_VCN_JPEG,
|
||||
.align_mask = 0xf,
|
||||
.vmhub = AMDGPU_MMHUB_1,
|
||||
.get_rptr = jpeg_v2_5_dec_ring_get_rptr,
|
||||
.get_wptr = jpeg_v2_5_dec_ring_get_wptr,
|
||||
.set_wptr = jpeg_v2_5_dec_ring_set_wptr,
|
||||
.emit_frame_size =
|
||||
SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
|
||||
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
|
||||
8 + /* jpeg_v2_5_dec_ring_emit_vm_flush */
|
||||
18 + 18 + /* jpeg_v2_5_dec_ring_emit_fence x2 vm fence */
|
||||
8 + 16,
|
||||
.emit_ib_size = 22, /* jpeg_v2_5_dec_ring_emit_ib */
|
||||
.emit_ib = jpeg_v2_0_dec_ring_emit_ib,
|
||||
.emit_fence = jpeg_v2_0_dec_ring_emit_fence,
|
||||
.emit_vm_flush = jpeg_v2_0_dec_ring_emit_vm_flush,
|
||||
.test_ring = amdgpu_jpeg_dec_ring_test_ring,
|
||||
.test_ib = amdgpu_jpeg_dec_ring_test_ib,
|
||||
.insert_nop = jpeg_v2_0_dec_ring_nop,
|
||||
.insert_start = jpeg_v2_0_dec_ring_insert_start,
|
||||
.insert_end = jpeg_v2_0_dec_ring_insert_end,
|
||||
.pad_ib = amdgpu_ring_generic_pad_ib,
|
||||
.begin_use = amdgpu_jpeg_ring_begin_use,
|
||||
.end_use = amdgpu_jpeg_ring_end_use,
|
||||
.emit_wreg = jpeg_v2_0_dec_ring_emit_wreg,
|
||||
.emit_reg_wait = jpeg_v2_0_dec_ring_emit_reg_wait,
|
||||
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
|
||||
};
|
||||
|
||||
static void jpeg_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
|
||||
if (adev->jpeg.harvest_config & (1 << i))
|
||||
continue;
|
||||
|
||||
adev->jpeg.inst[i].ring_dec.funcs = &jpeg_v2_5_dec_ring_vm_funcs;
|
||||
adev->jpeg.inst[i].ring_dec.me = i;
|
||||
DRM_INFO("JPEG(%d) JPEG decode is enabled in VM mode\n", i);
|
||||
}
|
||||
}
|
||||
|
||||
static const struct amdgpu_irq_src_funcs jpeg_v2_5_irq_funcs = {
|
||||
.set = jpeg_v2_5_set_interrupt_state,
|
||||
.process = jpeg_v2_5_process_interrupt,
|
||||
};
|
||||
|
||||
static void jpeg_v2_5_set_irq_funcs(struct amdgpu_device *adev)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
|
||||
if (adev->jpeg.harvest_config & (1 << i))
|
||||
continue;
|
||||
|
||||
adev->jpeg.inst[i].irq.num_types = 1;
|
||||
adev->jpeg.inst[i].irq.funcs = &jpeg_v2_5_irq_funcs;
|
||||
}
|
||||
}
|
||||
|
||||
const struct amdgpu_ip_block_version jpeg_v2_5_ip_block =
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_JPEG,
|
||||
.major = 2,
|
||||
.minor = 5,
|
||||
.rev = 0,
|
||||
.funcs = &jpeg_v2_5_ip_funcs,
|
||||
};
|
29
drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.h
Normal file
29
drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.h
Normal file
@ -0,0 +1,29 @@
|
||||
/*
|
||||
* Copyright 2019 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __JPEG_V2_5_H__
|
||||
#define __JPEG_V2_5_H__
|
||||
|
||||
extern const struct amdgpu_ip_block_version jpeg_v2_5_ip_block;
|
||||
|
||||
#endif /* __JPEG_V2_5_H__ */
|
@ -47,26 +47,6 @@
|
||||
#define mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET 0x5a7
|
||||
#define mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET 0x1e2
|
||||
|
||||
#define mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET 0x1bfff
|
||||
#define mmUVD_JPEG_GPCOM_CMD_INTERNAL_OFFSET 0x4029
|
||||
#define mmUVD_JPEG_GPCOM_DATA0_INTERNAL_OFFSET 0x402a
|
||||
#define mmUVD_JPEG_GPCOM_DATA1_INTERNAL_OFFSET 0x402b
|
||||
#define mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW_INTERNAL_OFFSET 0x40ea
|
||||
#define mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH_INTERNAL_OFFSET 0x40eb
|
||||
#define mmUVD_LMI_JRBC_IB_VMID_INTERNAL_OFFSET 0x40cf
|
||||
#define mmUVD_LMI_JPEG_VMID_INTERNAL_OFFSET 0x40d1
|
||||
#define mmUVD_LMI_JRBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET 0x40e8
|
||||
#define mmUVD_LMI_JRBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET 0x40e9
|
||||
#define mmUVD_JRBC_IB_SIZE_INTERNAL_OFFSET 0x4082
|
||||
#define mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW_INTERNAL_OFFSET 0x40ec
|
||||
#define mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH_INTERNAL_OFFSET 0x40ed
|
||||
#define mmUVD_JRBC_RB_COND_RD_TIMER_INTERNAL_OFFSET 0x4085
|
||||
#define mmUVD_JRBC_RB_REF_DATA_INTERNAL_OFFSET 0x4084
|
||||
#define mmUVD_JRBC_STATUS_INTERNAL_OFFSET 0x4089
|
||||
#define mmUVD_JPEG_PITCH_INTERNAL_OFFSET 0x401f
|
||||
|
||||
#define JRBC_DEC_EXTERNAL_REG_WRITE_ADDR 0x18000
|
||||
|
||||
#define mmUVD_RBC_XX_IB_REG_CHECK 0x026b
|
||||
#define mmUVD_RBC_XX_IB_REG_CHECK_BASE_IDX 1
|
||||
#define mmUVD_REG_XX_MASK 0x026c
|
||||
@ -1616,222 +1596,6 @@ void vcn_v2_0_enc_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_
|
||||
amdgpu_ring_write(ring, val);
|
||||
}
|
||||
|
||||
/**
|
||||
* vcn_v2_0_jpeg_ring_insert_start - insert a start command
|
||||
*
|
||||
* @ring: amdgpu_ring pointer
|
||||
*
|
||||
* Write a start command to the ring.
|
||||
*/
|
||||
void vcn_v2_0_jpeg_ring_insert_start(struct amdgpu_ring *ring)
|
||||
{
|
||||
amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET,
|
||||
0, 0, PACKETJ_TYPE0));
|
||||
amdgpu_ring_write(ring, 0x68e04);
|
||||
|
||||
amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR,
|
||||
0, 0, PACKETJ_TYPE0));
|
||||
amdgpu_ring_write(ring, 0x80010000);
|
||||
}
|
||||
|
||||
/**
|
||||
* vcn_v2_0_jpeg_ring_insert_end - insert a end command
|
||||
*
|
||||
* @ring: amdgpu_ring pointer
|
||||
*
|
||||
* Write a end command to the ring.
|
||||
*/
|
||||
void vcn_v2_0_jpeg_ring_insert_end(struct amdgpu_ring *ring)
|
||||
{
|
||||
amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET,
|
||||
0, 0, PACKETJ_TYPE0));
|
||||
amdgpu_ring_write(ring, 0x68e04);
|
||||
|
||||
amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR,
|
||||
0, 0, PACKETJ_TYPE0));
|
||||
amdgpu_ring_write(ring, 0x00010000);
|
||||
}
|
||||
|
||||
/**
|
||||
* vcn_v2_0_jpeg_ring_emit_fence - emit an fence & trap command
|
||||
*
|
||||
* @ring: amdgpu_ring pointer
|
||||
* @fence: fence to emit
|
||||
*
|
||||
* Write a fence and a trap command to the ring.
|
||||
*/
|
||||
void vcn_v2_0_jpeg_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
|
||||
unsigned flags)
|
||||
{
|
||||
WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
|
||||
|
||||
amdgpu_ring_write(ring, PACKETJ(mmUVD_JPEG_GPCOM_DATA0_INTERNAL_OFFSET,
|
||||
0, 0, PACKETJ_TYPE0));
|
||||
amdgpu_ring_write(ring, seq);
|
||||
|
||||
amdgpu_ring_write(ring, PACKETJ(mmUVD_JPEG_GPCOM_DATA1_INTERNAL_OFFSET,
|
||||
0, 0, PACKETJ_TYPE0));
|
||||
amdgpu_ring_write(ring, seq);
|
||||
|
||||
amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW_INTERNAL_OFFSET,
|
||||
0, 0, PACKETJ_TYPE0));
|
||||
amdgpu_ring_write(ring, lower_32_bits(addr));
|
||||
|
||||
amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH_INTERNAL_OFFSET,
|
||||
0, 0, PACKETJ_TYPE0));
|
||||
amdgpu_ring_write(ring, upper_32_bits(addr));
|
||||
|
||||
amdgpu_ring_write(ring, PACKETJ(mmUVD_JPEG_GPCOM_CMD_INTERNAL_OFFSET,
|
||||
0, 0, PACKETJ_TYPE0));
|
||||
amdgpu_ring_write(ring, 0x8);
|
||||
|
||||
amdgpu_ring_write(ring, PACKETJ(mmUVD_JPEG_GPCOM_CMD_INTERNAL_OFFSET,
|
||||
0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE4));
|
||||
amdgpu_ring_write(ring, 0);
|
||||
|
||||
amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET,
|
||||
0, 0, PACKETJ_TYPE0));
|
||||
amdgpu_ring_write(ring, 0x3fbc);
|
||||
|
||||
amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR,
|
||||
0, 0, PACKETJ_TYPE0));
|
||||
amdgpu_ring_write(ring, 0x1);
|
||||
|
||||
amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE7));
|
||||
amdgpu_ring_write(ring, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* vcn_v2_0_jpeg_ring_emit_ib - execute indirect buffer
|
||||
*
|
||||
* @ring: amdgpu_ring pointer
|
||||
* @ib: indirect buffer to execute
|
||||
*
|
||||
* Write ring commands to execute the indirect buffer.
|
||||
*/
|
||||
void vcn_v2_0_jpeg_ring_emit_ib(struct amdgpu_ring *ring,
|
||||
struct amdgpu_job *job,
|
||||
struct amdgpu_ib *ib,
|
||||
uint32_t flags)
|
||||
{
|
||||
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
|
||||
|
||||
amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_IB_VMID_INTERNAL_OFFSET,
|
||||
0, 0, PACKETJ_TYPE0));
|
||||
amdgpu_ring_write(ring, (vmid | (vmid << 4)));
|
||||
|
||||
amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JPEG_VMID_INTERNAL_OFFSET,
|
||||
0, 0, PACKETJ_TYPE0));
|
||||
amdgpu_ring_write(ring, (vmid | (vmid << 4)));
|
||||
|
||||
amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET,
|
||||
0, 0, PACKETJ_TYPE0));
|
||||
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
|
||||
|
||||
amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET,
|
||||
0, 0, PACKETJ_TYPE0));
|
||||
amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
|
||||
|
||||
amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_IB_SIZE_INTERNAL_OFFSET,
|
||||
0, 0, PACKETJ_TYPE0));
|
||||
amdgpu_ring_write(ring, ib->length_dw);
|
||||
|
||||
amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW_INTERNAL_OFFSET,
|
||||
0, 0, PACKETJ_TYPE0));
|
||||
amdgpu_ring_write(ring, lower_32_bits(ring->gpu_addr));
|
||||
|
||||
amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH_INTERNAL_OFFSET,
|
||||
0, 0, PACKETJ_TYPE0));
|
||||
amdgpu_ring_write(ring, upper_32_bits(ring->gpu_addr));
|
||||
|
||||
amdgpu_ring_write(ring, PACKETJ(0, 0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE2));
|
||||
amdgpu_ring_write(ring, 0);
|
||||
|
||||
amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_RB_COND_RD_TIMER_INTERNAL_OFFSET,
|
||||
0, 0, PACKETJ_TYPE0));
|
||||
amdgpu_ring_write(ring, 0x01400200);
|
||||
|
||||
amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_RB_REF_DATA_INTERNAL_OFFSET,
|
||||
0, 0, PACKETJ_TYPE0));
|
||||
amdgpu_ring_write(ring, 0x2);
|
||||
|
||||
amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_STATUS_INTERNAL_OFFSET,
|
||||
0, PACKETJ_CONDITION_CHECK3, PACKETJ_TYPE3));
|
||||
amdgpu_ring_write(ring, 0x2);
|
||||
}
|
||||
|
||||
void vcn_v2_0_jpeg_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
|
||||
uint32_t val, uint32_t mask)
|
||||
{
|
||||
uint32_t reg_offset = (reg << 2);
|
||||
|
||||
amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_RB_COND_RD_TIMER_INTERNAL_OFFSET,
|
||||
0, 0, PACKETJ_TYPE0));
|
||||
amdgpu_ring_write(ring, 0x01400200);
|
||||
|
||||
amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_RB_REF_DATA_INTERNAL_OFFSET,
|
||||
0, 0, PACKETJ_TYPE0));
|
||||
amdgpu_ring_write(ring, val);
|
||||
|
||||
amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET,
|
||||
0, 0, PACKETJ_TYPE0));
|
||||
if (reg_offset >= 0x10000 && reg_offset <= 0x105ff) {
|
||||
amdgpu_ring_write(ring, 0);
|
||||
amdgpu_ring_write(ring,
|
||||
PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE3));
|
||||
} else {
|
||||
amdgpu_ring_write(ring, reg_offset);
|
||||
amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR,
|
||||
0, 0, PACKETJ_TYPE3));
|
||||
}
|
||||
amdgpu_ring_write(ring, mask);
|
||||
}
|
||||
|
||||
void vcn_v2_0_jpeg_ring_emit_vm_flush(struct amdgpu_ring *ring,
|
||||
unsigned vmid, uint64_t pd_addr)
|
||||
{
|
||||
struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
|
||||
uint32_t data0, data1, mask;
|
||||
|
||||
pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
|
||||
|
||||
/* wait for register write */
|
||||
data0 = hub->ctx0_ptb_addr_lo32 + vmid * 2;
|
||||
data1 = lower_32_bits(pd_addr);
|
||||
mask = 0xffffffff;
|
||||
vcn_v2_0_jpeg_ring_emit_reg_wait(ring, data0, data1, mask);
|
||||
}
|
||||
|
||||
void vcn_v2_0_jpeg_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val)
|
||||
{
|
||||
uint32_t reg_offset = (reg << 2);
|
||||
|
||||
amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET,
|
||||
0, 0, PACKETJ_TYPE0));
|
||||
if (reg_offset >= 0x10000 && reg_offset <= 0x105ff) {
|
||||
amdgpu_ring_write(ring, 0);
|
||||
amdgpu_ring_write(ring,
|
||||
PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE0));
|
||||
} else {
|
||||
amdgpu_ring_write(ring, reg_offset);
|
||||
amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR,
|
||||
0, 0, PACKETJ_TYPE0));
|
||||
}
|
||||
amdgpu_ring_write(ring, val);
|
||||
}
|
||||
|
||||
void vcn_v2_0_jpeg_ring_nop(struct amdgpu_ring *ring, uint32_t count)
|
||||
{
|
||||
int i;
|
||||
|
||||
WARN_ON(ring->wptr % 2 || count % 2);
|
||||
|
||||
for (i = 0; i < count / 2; i++) {
|
||||
amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE6));
|
||||
amdgpu_ring_write(ring, 0);
|
||||
}
|
||||
}
|
||||
|
||||
static int vcn_v2_0_set_interrupt_state(struct amdgpu_device *adev,
|
||||
struct amdgpu_irq_src *source,
|
||||
unsigned type,
|
||||
|
@ -49,19 +49,6 @@ extern void vcn_v2_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
|
||||
unsigned int vmid, uint64_t pd_addr);
|
||||
extern void vcn_v2_0_enc_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val);
|
||||
|
||||
extern void vcn_v2_0_jpeg_ring_insert_start(struct amdgpu_ring *ring);
|
||||
extern void vcn_v2_0_jpeg_ring_insert_end(struct amdgpu_ring *ring);
|
||||
extern void vcn_v2_0_jpeg_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
|
||||
unsigned flags);
|
||||
extern void vcn_v2_0_jpeg_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_job *job,
|
||||
struct amdgpu_ib *ib, uint32_t flags);
|
||||
extern void vcn_v2_0_jpeg_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
|
||||
uint32_t val, uint32_t mask);
|
||||
extern void vcn_v2_0_jpeg_ring_emit_vm_flush(struct amdgpu_ring *ring,
|
||||
unsigned vmid, uint64_t pd_addr);
|
||||
extern void vcn_v2_0_jpeg_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val);
|
||||
extern void vcn_v2_0_jpeg_ring_nop(struct amdgpu_ring *ring, uint32_t count);
|
||||
|
||||
extern const struct amdgpu_ip_block_version vcn_v2_0_ip_block;
|
||||
|
||||
#endif /* __VCN_V2_0_H__ */
|
||||
|
@ -47,13 +47,10 @@
|
||||
#define mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET 0x3b5
|
||||
#define mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET 0x25c
|
||||
|
||||
#define mmUVD_JPEG_PITCH_INTERNAL_OFFSET 0x401f
|
||||
|
||||
#define VCN25_MAX_HW_INSTANCES_ARCTURUS 2
|
||||
#define VCN25_MAX_HW_INSTANCES_ARCTURUS 2
|
||||
|
||||
static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev);
|
||||
static void vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device *adev);
|
||||
static void vcn_v2_5_set_jpeg_ring_funcs(struct amdgpu_device *adev);
|
||||
static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev);
|
||||
static int vcn_v2_5_set_powergating_state(void *handle,
|
||||
enum amd_powergating_state state);
|
||||
@ -95,7 +92,6 @@ static int vcn_v2_5_early_init(void *handle)
|
||||
|
||||
vcn_v2_5_set_dec_ring_funcs(adev);
|
||||
vcn_v2_5_set_enc_ring_funcs(adev);
|
||||
vcn_v2_5_set_jpeg_ring_funcs(adev);
|
||||
vcn_v2_5_set_irq_funcs(adev);
|
||||
|
||||
return 0;
|
||||
@ -130,12 +126,6 @@ static int vcn_v2_5_sw_init(void *handle)
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
||||
/* VCN JPEG TRAP */
|
||||
r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[j],
|
||||
VCN_2_0__SRCID__JPEG_DECODE, &adev->vcn.inst[j].irq);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
||||
r = amdgpu_vcn_sw_init(adev);
|
||||
@ -184,9 +174,6 @@ static int vcn_v2_5_sw_init(void *handle)
|
||||
adev->vcn.internal.nop = mmUVD_NO_OP_INTERNAL_OFFSET;
|
||||
adev->vcn.inst[j].external.nop = SOC15_REG_OFFSET(UVD, j, mmUVD_NO_OP);
|
||||
|
||||
adev->vcn.internal.jpeg_pitch = mmUVD_JPEG_PITCH_INTERNAL_OFFSET;
|
||||
adev->vcn.inst[j].external.jpeg_pitch = SOC15_REG_OFFSET(UVD, j, mmUVD_JPEG_PITCH);
|
||||
|
||||
ring = &adev->vcn.inst[j].ring_dec;
|
||||
ring->use_doorbell = true;
|
||||
ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 8*j;
|
||||
@ -204,14 +191,6 @@ static int vcn_v2_5_sw_init(void *handle)
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
||||
ring = &adev->vcn.inst[j].ring_jpeg;
|
||||
ring->use_doorbell = true;
|
||||
ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1 + 8*j;
|
||||
sprintf(ring->name, "vcn_jpeg_%d", j);
|
||||
r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq, 0);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -269,12 +248,8 @@ static int vcn_v2_5_hw_init(void *handle)
|
||||
if (r)
|
||||
goto done;
|
||||
}
|
||||
|
||||
ring = &adev->vcn.inst[j].ring_jpeg;
|
||||
r = amdgpu_ring_test_helper(ring);
|
||||
if (r)
|
||||
goto done;
|
||||
}
|
||||
|
||||
done:
|
||||
if (!r)
|
||||
DRM_INFO("VCN decode and encode initialized successfully.\n");
|
||||
@ -309,9 +284,6 @@ static int vcn_v2_5_hw_fini(void *handle)
|
||||
ring = &adev->vcn.inst[i].ring_enc[i];
|
||||
ring->sched.ready = false;
|
||||
}
|
||||
|
||||
ring = &adev->vcn.inst[i].ring_jpeg;
|
||||
ring->sched.ready = false;
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -592,115 +564,6 @@ static void vcn_v2_5_enable_clock_gating(struct amdgpu_device *adev)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* jpeg_v2_5_start - start JPEG block
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
*
|
||||
* Setup and start the JPEG block
|
||||
*/
|
||||
static int jpeg_v2_5_start(struct amdgpu_device *adev)
|
||||
{
|
||||
struct amdgpu_ring *ring;
|
||||
uint32_t tmp;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
|
||||
if (adev->vcn.harvest_config & (1 << i))
|
||||
continue;
|
||||
ring = &adev->vcn.inst[i].ring_jpeg;
|
||||
/* disable anti hang mechanism */
|
||||
WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_JPEG_POWER_STATUS), 0,
|
||||
~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
|
||||
|
||||
/* JPEG disable CGC */
|
||||
tmp = RREG32_SOC15(VCN, i, mmJPEG_CGC_CTRL);
|
||||
tmp |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
|
||||
tmp |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
|
||||
tmp |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
|
||||
WREG32_SOC15(VCN, i, mmJPEG_CGC_CTRL, tmp);
|
||||
|
||||
tmp = RREG32_SOC15(VCN, i, mmJPEG_CGC_GATE);
|
||||
tmp &= ~(JPEG_CGC_GATE__JPEG_DEC_MASK
|
||||
| JPEG_CGC_GATE__JPEG2_DEC_MASK
|
||||
| JPEG_CGC_GATE__JMCIF_MASK
|
||||
| JPEG_CGC_GATE__JRBBM_MASK);
|
||||
WREG32_SOC15(VCN, i, mmJPEG_CGC_GATE, tmp);
|
||||
|
||||
tmp = RREG32_SOC15(VCN, i, mmJPEG_CGC_CTRL);
|
||||
tmp &= ~(JPEG_CGC_CTRL__JPEG_DEC_MODE_MASK
|
||||
| JPEG_CGC_CTRL__JPEG2_DEC_MODE_MASK
|
||||
| JPEG_CGC_CTRL__JMCIF_MODE_MASK
|
||||
| JPEG_CGC_CTRL__JRBBM_MODE_MASK);
|
||||
WREG32_SOC15(VCN, i, mmJPEG_CGC_CTRL, tmp);
|
||||
|
||||
/* MJPEG global tiling registers */
|
||||
WREG32_SOC15(UVD, i, mmJPEG_DEC_GFX8_ADDR_CONFIG,
|
||||
adev->gfx.config.gb_addr_config);
|
||||
WREG32_SOC15(UVD, i, mmJPEG_DEC_GFX10_ADDR_CONFIG,
|
||||
adev->gfx.config.gb_addr_config);
|
||||
|
||||
/* enable JMI channel */
|
||||
WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_JMI_CNTL), 0,
|
||||
~UVD_JMI_CNTL__SOFT_RESET_MASK);
|
||||
|
||||
/* enable System Interrupt for JRBC */
|
||||
WREG32_P(SOC15_REG_OFFSET(VCN, i, mmJPEG_SYS_INT_EN),
|
||||
JPEG_SYS_INT_EN__DJRBC_MASK,
|
||||
~JPEG_SYS_INT_EN__DJRBC_MASK);
|
||||
|
||||
WREG32_SOC15(UVD, i, mmUVD_LMI_JRBC_RB_VMID, 0);
|
||||
WREG32_SOC15(UVD, i, mmUVD_JRBC_RB_CNTL, (0x00000001L | 0x00000002L));
|
||||
WREG32_SOC15(UVD, i, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW,
|
||||
lower_32_bits(ring->gpu_addr));
|
||||
WREG32_SOC15(UVD, i, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH,
|
||||
upper_32_bits(ring->gpu_addr));
|
||||
WREG32_SOC15(UVD, i, mmUVD_JRBC_RB_RPTR, 0);
|
||||
WREG32_SOC15(UVD, i, mmUVD_JRBC_RB_WPTR, 0);
|
||||
WREG32_SOC15(UVD, i, mmUVD_JRBC_RB_CNTL, 0x00000002L);
|
||||
WREG32_SOC15(UVD, i, mmUVD_JRBC_RB_SIZE, ring->ring_size / 4);
|
||||
ring->wptr = RREG32_SOC15(UVD, i, mmUVD_JRBC_RB_WPTR);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* jpeg_v2_5_stop - stop JPEG block
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
*
|
||||
* stop the JPEG block
|
||||
*/
|
||||
static int jpeg_v2_5_stop(struct amdgpu_device *adev)
|
||||
{
|
||||
uint32_t tmp;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
|
||||
if (adev->vcn.harvest_config & (1 << i))
|
||||
continue;
|
||||
/* reset JMI */
|
||||
WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_JMI_CNTL),
|
||||
UVD_JMI_CNTL__SOFT_RESET_MASK,
|
||||
~UVD_JMI_CNTL__SOFT_RESET_MASK);
|
||||
|
||||
tmp = RREG32_SOC15(VCN, i, mmJPEG_CGC_GATE);
|
||||
tmp |= (JPEG_CGC_GATE__JPEG_DEC_MASK
|
||||
|JPEG_CGC_GATE__JPEG2_DEC_MASK
|
||||
|JPEG_CGC_GATE__JMCIF_MASK
|
||||
|JPEG_CGC_GATE__JRBBM_MASK);
|
||||
WREG32_SOC15(VCN, i, mmJPEG_CGC_GATE, tmp);
|
||||
|
||||
/* enable anti hang mechanism */
|
||||
WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_JPEG_POWER_STATUS),
|
||||
UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK,
|
||||
~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vcn_v2_5_start(struct amdgpu_device *adev)
|
||||
{
|
||||
struct amdgpu_ring *ring;
|
||||
@ -874,19 +737,14 @@ static int vcn_v2_5_start(struct amdgpu_device *adev)
|
||||
WREG32_SOC15(UVD, i, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
|
||||
WREG32_SOC15(UVD, i, mmUVD_RB_SIZE2, ring->ring_size / 4);
|
||||
}
|
||||
r = jpeg_v2_5_start(adev);
|
||||
|
||||
return r;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vcn_v2_5_stop(struct amdgpu_device *adev)
|
||||
{
|
||||
uint32_t tmp;
|
||||
int i, r;
|
||||
|
||||
r = jpeg_v2_5_stop(adev);
|
||||
if (r)
|
||||
return r;
|
||||
int i, r = 0;
|
||||
|
||||
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
|
||||
if (adev->vcn.harvest_config & (1 << i))
|
||||
@ -1125,86 +983,6 @@ static const struct amdgpu_ring_funcs vcn_v2_5_enc_ring_vm_funcs = {
|
||||
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
|
||||
};
|
||||
|
||||
/**
|
||||
* vcn_v2_5_jpeg_ring_get_rptr - get read pointer
|
||||
*
|
||||
* @ring: amdgpu_ring pointer
|
||||
*
|
||||
* Returns the current hardware read pointer
|
||||
*/
|
||||
static uint64_t vcn_v2_5_jpeg_ring_get_rptr(struct amdgpu_ring *ring)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
|
||||
return RREG32_SOC15(UVD, ring->me, mmUVD_JRBC_RB_RPTR);
|
||||
}
|
||||
|
||||
/**
|
||||
* vcn_v2_5_jpeg_ring_get_wptr - get write pointer
|
||||
*
|
||||
* @ring: amdgpu_ring pointer
|
||||
*
|
||||
* Returns the current hardware write pointer
|
||||
*/
|
||||
static uint64_t vcn_v2_5_jpeg_ring_get_wptr(struct amdgpu_ring *ring)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
|
||||
if (ring->use_doorbell)
|
||||
return adev->wb.wb[ring->wptr_offs];
|
||||
else
|
||||
return RREG32_SOC15(UVD, ring->me, mmUVD_JRBC_RB_WPTR);
|
||||
}
|
||||
|
||||
/**
|
||||
* vcn_v2_5_jpeg_ring_set_wptr - set write pointer
|
||||
*
|
||||
* @ring: amdgpu_ring pointer
|
||||
*
|
||||
* Commits the write pointer to the hardware
|
||||
*/
|
||||
static void vcn_v2_5_jpeg_ring_set_wptr(struct amdgpu_ring *ring)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
|
||||
if (ring->use_doorbell) {
|
||||
adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
|
||||
WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
|
||||
} else {
|
||||
WREG32_SOC15(UVD, ring->me, mmUVD_JRBC_RB_WPTR, lower_32_bits(ring->wptr));
|
||||
}
|
||||
}
|
||||
|
||||
static const struct amdgpu_ring_funcs vcn_v2_5_jpeg_ring_vm_funcs = {
|
||||
.type = AMDGPU_RING_TYPE_VCN_JPEG,
|
||||
.align_mask = 0xf,
|
||||
.vmhub = AMDGPU_MMHUB_1,
|
||||
.get_rptr = vcn_v2_5_jpeg_ring_get_rptr,
|
||||
.get_wptr = vcn_v2_5_jpeg_ring_get_wptr,
|
||||
.set_wptr = vcn_v2_5_jpeg_ring_set_wptr,
|
||||
.emit_frame_size =
|
||||
SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
|
||||
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
|
||||
8 + /* vcn_v2_0_jpeg_ring_emit_vm_flush */
|
||||
18 + 18 + /* vcn_v2_0_jpeg_ring_emit_fence x2 vm fence */
|
||||
8 + 16,
|
||||
.emit_ib_size = 22, /* vcn_v2_0_jpeg_ring_emit_ib */
|
||||
.emit_ib = vcn_v2_0_jpeg_ring_emit_ib,
|
||||
.emit_fence = vcn_v2_0_jpeg_ring_emit_fence,
|
||||
.emit_vm_flush = vcn_v2_0_jpeg_ring_emit_vm_flush,
|
||||
.test_ring = amdgpu_vcn_jpeg_ring_test_ring,
|
||||
.test_ib = amdgpu_vcn_jpeg_ring_test_ib,
|
||||
.insert_nop = vcn_v2_0_jpeg_ring_nop,
|
||||
.insert_start = vcn_v2_0_jpeg_ring_insert_start,
|
||||
.insert_end = vcn_v2_0_jpeg_ring_insert_end,
|
||||
.pad_ib = amdgpu_ring_generic_pad_ib,
|
||||
.begin_use = amdgpu_vcn_ring_begin_use,
|
||||
.end_use = amdgpu_vcn_ring_end_use,
|
||||
.emit_wreg = vcn_v2_0_jpeg_ring_emit_wreg,
|
||||
.emit_reg_wait = vcn_v2_0_jpeg_ring_emit_reg_wait,
|
||||
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
|
||||
};
|
||||
|
||||
static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev)
|
||||
{
|
||||
int i;
|
||||
@ -1233,19 +1011,6 @@ static void vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device *adev)
|
||||
}
|
||||
}
|
||||
|
||||
static void vcn_v2_5_set_jpeg_ring_funcs(struct amdgpu_device *adev)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
|
||||
if (adev->vcn.harvest_config & (1 << i))
|
||||
continue;
|
||||
adev->vcn.inst[i].ring_jpeg.funcs = &vcn_v2_5_jpeg_ring_vm_funcs;
|
||||
adev->vcn.inst[i].ring_jpeg.me = i;
|
||||
DRM_INFO("VCN(%d) jpeg decode is enabled in VM mode\n", i);
|
||||
}
|
||||
}
|
||||
|
||||
static bool vcn_v2_5_is_idle(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
@ -1352,9 +1117,6 @@ static int vcn_v2_5_process_interrupt(struct amdgpu_device *adev,
|
||||
case VCN_2_0__SRCID__UVD_ENC_LOW_LATENCY:
|
||||
amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[1]);
|
||||
break;
|
||||
case VCN_2_0__SRCID__JPEG_DECODE:
|
||||
amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_jpeg);
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("Unhandled interrupt: %d %d\n",
|
||||
entry->src_id, entry->src_data[0]);
|
||||
|
Loading…
Reference in New Issue
Block a user