mirror of
https://github.com/torvalds/linux.git
synced 2024-11-24 13:11:40 +00:00
Merge branch 'drm-fixes' of git://people.freedesktop.org/~airlied/linux
Pull drm fixes from Dave Airlie: "This is mostly amdgpu/radeon fixes, and imx related fixes. There is also one one TTM fix, one nouveau fix, and one hdlcd fix. The AMD ones are some fixes for power management after suspend/resume one some GPUs, and some vblank fixes. The IMX ones are for more stricter plane checks and some cleanups. I'm off until Monday, so therre might be some fixes early next week if anyone missed me" * 'drm-fixes' of git://people.freedesktop.org/~airlied/linux: (34 commits) drm/nouveau/tegra: acquire and enable reference clock if needed drm/amdgpu: total vram size also reduces pin size drm/amd/powerplay: add uvd/vce dpm enabling flag default. drm/amd/powerplay: fix issue that resume back, dpm can't work on FIJI. drm/amdgpu: save and restore the firwmware cache part when suspend resume drm/amdgpu: save and restore UVD context with suspend and resume drm/ttm: use phys_addr_t for ttm_bus_placement drm: ARM HDLCD - fix an error code drm: ARM HDLCD - get rid of devm_clk_put() drm/radeon: Only call drm_vblank_on/off between drm_vblank_init/cleanup drm/amdgpu: fence wait old rcu slot drm/amdgpu: fix leaking fence in the pageflip code drm/amdgpu: print vram type rather than just DDR drm/amdgpu/gmc: use proper register for vram type on Fiji drm/amdgpu/gmc: move vram type fetching into sw_init drm/amdgpu: Set vblank_disable_allowed = true drm/radeon: Set vblank_disable_allowed = true drm/amd/powerplay: Need to change boot to performance state in resume. drm/amd/powerplay: add new Fiji function for not setting same ps. drm/amdgpu: check dpm state before pm system fs initialized. ...
This commit is contained in:
commit
741f37b8cc
@ -1591,6 +1591,7 @@ struct amdgpu_uvd {
|
||||
struct amdgpu_bo *vcpu_bo;
|
||||
void *cpu_addr;
|
||||
uint64_t gpu_addr;
|
||||
void *saved_bo;
|
||||
atomic_t handles[AMDGPU_MAX_UVD_HANDLES];
|
||||
struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES];
|
||||
struct delayed_work idle_work;
|
||||
|
@ -816,10 +816,13 @@ static int amdgpu_cgs_get_active_displays_info(void *cgs_device,
|
||||
struct drm_device *ddev = adev->ddev;
|
||||
struct drm_crtc *crtc;
|
||||
uint32_t line_time_us, vblank_lines;
|
||||
struct cgs_mode_info *mode_info;
|
||||
|
||||
if (info == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
mode_info = info->mode_info;
|
||||
|
||||
if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
|
||||
list_for_each_entry(crtc,
|
||||
&ddev->mode_config.crtc_list, head) {
|
||||
@ -828,7 +831,7 @@ static int amdgpu_cgs_get_active_displays_info(void *cgs_device,
|
||||
info->active_display_mask |= (1 << amdgpu_crtc->crtc_id);
|
||||
info->display_count++;
|
||||
}
|
||||
if (info->mode_info != NULL &&
|
||||
if (mode_info != NULL &&
|
||||
crtc->enabled && amdgpu_crtc->enabled &&
|
||||
amdgpu_crtc->hw_mode.clock) {
|
||||
line_time_us = (amdgpu_crtc->hw_mode.crtc_htotal * 1000) /
|
||||
@ -836,10 +839,10 @@ static int amdgpu_cgs_get_active_displays_info(void *cgs_device,
|
||||
vblank_lines = amdgpu_crtc->hw_mode.crtc_vblank_end -
|
||||
amdgpu_crtc->hw_mode.crtc_vdisplay +
|
||||
(amdgpu_crtc->v_border * 2);
|
||||
info->mode_info->vblank_time_us = vblank_lines * line_time_us;
|
||||
info->mode_info->refresh_rate = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
|
||||
info->mode_info->ref_clock = adev->clock.spll.reference_freq;
|
||||
info->mode_info++;
|
||||
mode_info->vblank_time_us = vblank_lines * line_time_us;
|
||||
mode_info->refresh_rate = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
|
||||
mode_info->ref_clock = adev->clock.spll.reference_freq;
|
||||
mode_info = NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -847,6 +850,16 @@ static int amdgpu_cgs_get_active_displays_info(void *cgs_device,
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static int amdgpu_cgs_notify_dpm_enabled(void *cgs_device, bool enabled)
|
||||
{
|
||||
CGS_FUNC_ADEV;
|
||||
|
||||
adev->pm.dpm_enabled = enabled;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/** \brief evaluate acpi namespace object, handle or pathname must be valid
|
||||
* \param cgs_device
|
||||
* \param info input/output arguments for the control method
|
||||
@ -1097,6 +1110,7 @@ static const struct cgs_ops amdgpu_cgs_ops = {
|
||||
amdgpu_cgs_set_powergating_state,
|
||||
amdgpu_cgs_set_clockgating_state,
|
||||
amdgpu_cgs_get_active_displays_info,
|
||||
amdgpu_cgs_notify_dpm_enabled,
|
||||
amdgpu_cgs_call_acpi_method,
|
||||
amdgpu_cgs_query_system_info,
|
||||
};
|
||||
|
@ -57,7 +57,7 @@ static bool amdgpu_flip_handle_fence(struct amdgpu_flip_work *work,
|
||||
if (!fence_add_callback(fence, &work->cb, amdgpu_flip_callback))
|
||||
return true;
|
||||
|
||||
fence_put(*f);
|
||||
fence_put(fence);
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -121,7 +121,7 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **f)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
struct amdgpu_fence *fence;
|
||||
struct fence **ptr;
|
||||
struct fence *old, **ptr;
|
||||
uint32_t seq;
|
||||
|
||||
fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL);
|
||||
@ -141,7 +141,11 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **f)
|
||||
/* This function can't be called concurrently anyway, otherwise
|
||||
* emitting the fence would mess up the hardware ring buffer.
|
||||
*/
|
||||
BUG_ON(rcu_dereference_protected(*ptr, 1));
|
||||
old = rcu_dereference_protected(*ptr, 1);
|
||||
if (old && !fence_is_signaled(old)) {
|
||||
DRM_INFO("rcu slot is busy\n");
|
||||
fence_wait(old, false);
|
||||
}
|
||||
|
||||
rcu_assign_pointer(*ptr, fence_get(&fence->base));
|
||||
|
||||
|
@ -219,6 +219,8 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
|
||||
if (r) {
|
||||
return r;
|
||||
}
|
||||
adev->ddev->vblank_disable_allowed = true;
|
||||
|
||||
/* enable msi */
|
||||
adev->irq.msi_enabled = false;
|
||||
|
||||
|
@ -382,6 +382,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
|
||||
struct drm_amdgpu_info_vram_gtt vram_gtt;
|
||||
|
||||
vram_gtt.vram_size = adev->mc.real_vram_size;
|
||||
vram_gtt.vram_size -= adev->vram_pin_size;
|
||||
vram_gtt.vram_cpu_accessible_size = adev->mc.visible_vram_size;
|
||||
vram_gtt.vram_cpu_accessible_size -= adev->vram_pin_size;
|
||||
vram_gtt.gtt_size = adev->mc.gtt_size;
|
||||
|
@ -476,6 +476,17 @@ int amdgpu_bo_evict_vram(struct amdgpu_device *adev)
|
||||
return ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_VRAM);
|
||||
}
|
||||
|
||||
static const char *amdgpu_vram_names[] = {
|
||||
"UNKNOWN",
|
||||
"GDDR1",
|
||||
"DDR2",
|
||||
"GDDR3",
|
||||
"GDDR4",
|
||||
"GDDR5",
|
||||
"HBM",
|
||||
"DDR3"
|
||||
};
|
||||
|
||||
int amdgpu_bo_init(struct amdgpu_device *adev)
|
||||
{
|
||||
/* Add an MTRR for the VRAM */
|
||||
@ -484,8 +495,8 @@ int amdgpu_bo_init(struct amdgpu_device *adev)
|
||||
DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
|
||||
adev->mc.mc_vram_size >> 20,
|
||||
(unsigned long long)adev->mc.aper_size >> 20);
|
||||
DRM_INFO("RAM width %dbits DDR\n",
|
||||
adev->mc.vram_width);
|
||||
DRM_INFO("RAM width %dbits %s\n",
|
||||
adev->mc.vram_width, amdgpu_vram_names[adev->mc.vram_type]);
|
||||
return amdgpu_ttm_init(adev);
|
||||
}
|
||||
|
||||
|
@ -143,7 +143,7 @@ static int amdgpu_pp_late_init(void *handle)
|
||||
adev->powerplay.pp_handle);
|
||||
|
||||
#ifdef CONFIG_DRM_AMD_POWERPLAY
|
||||
if (adev->pp_enabled) {
|
||||
if (adev->pp_enabled && adev->pm.dpm_enabled) {
|
||||
amdgpu_pm_sysfs_init(adev);
|
||||
amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_COMPLETE_INIT, NULL, NULL);
|
||||
}
|
||||
@ -161,12 +161,8 @@ static int amdgpu_pp_sw_init(void *handle)
|
||||
adev->powerplay.pp_handle);
|
||||
|
||||
#ifdef CONFIG_DRM_AMD_POWERPLAY
|
||||
if (adev->pp_enabled) {
|
||||
if (amdgpu_dpm == 0)
|
||||
adev->pm.dpm_enabled = false;
|
||||
else
|
||||
adev->pm.dpm_enabled = true;
|
||||
}
|
||||
if (adev->pp_enabled)
|
||||
adev->pm.dpm_enabled = true;
|
||||
#endif
|
||||
|
||||
return ret;
|
||||
|
@ -241,32 +241,28 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
|
||||
|
||||
int amdgpu_uvd_suspend(struct amdgpu_device *adev)
|
||||
{
|
||||
struct amdgpu_ring *ring = &adev->uvd.ring;
|
||||
int i, r;
|
||||
unsigned size;
|
||||
void *ptr;
|
||||
int i;
|
||||
|
||||
if (adev->uvd.vcpu_bo == NULL)
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
|
||||
uint32_t handle = atomic_read(&adev->uvd.handles[i]);
|
||||
if (handle != 0) {
|
||||
struct fence *fence;
|
||||
for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i)
|
||||
if (atomic_read(&adev->uvd.handles[i]))
|
||||
break;
|
||||
|
||||
amdgpu_uvd_note_usage(adev);
|
||||
if (i == AMDGPU_MAX_UVD_HANDLES)
|
||||
return 0;
|
||||
|
||||
r = amdgpu_uvd_get_destroy_msg(ring, handle, false, &fence);
|
||||
if (r) {
|
||||
DRM_ERROR("Error destroying UVD (%d)!\n", r);
|
||||
continue;
|
||||
}
|
||||
size = amdgpu_bo_size(adev->uvd.vcpu_bo);
|
||||
ptr = adev->uvd.cpu_addr;
|
||||
|
||||
fence_wait(fence, false);
|
||||
fence_put(fence);
|
||||
adev->uvd.saved_bo = kmalloc(size, GFP_KERNEL);
|
||||
if (!adev->uvd.saved_bo)
|
||||
return -ENOMEM;
|
||||
|
||||
adev->uvd.filp[i] = NULL;
|
||||
atomic_set(&adev->uvd.handles[i], 0);
|
||||
}
|
||||
}
|
||||
memcpy(adev->uvd.saved_bo, ptr, size);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -275,23 +271,29 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev)
|
||||
{
|
||||
unsigned size;
|
||||
void *ptr;
|
||||
const struct common_firmware_header *hdr;
|
||||
unsigned offset;
|
||||
|
||||
if (adev->uvd.vcpu_bo == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
|
||||
offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
|
||||
memcpy(adev->uvd.cpu_addr, (adev->uvd.fw->data) + offset,
|
||||
(adev->uvd.fw->size) - offset);
|
||||
|
||||
size = amdgpu_bo_size(adev->uvd.vcpu_bo);
|
||||
size -= le32_to_cpu(hdr->ucode_size_bytes);
|
||||
ptr = adev->uvd.cpu_addr;
|
||||
ptr += le32_to_cpu(hdr->ucode_size_bytes);
|
||||
|
||||
memset(ptr, 0, size);
|
||||
if (adev->uvd.saved_bo != NULL) {
|
||||
memcpy(ptr, adev->uvd.saved_bo, size);
|
||||
kfree(adev->uvd.saved_bo);
|
||||
adev->uvd.saved_bo = NULL;
|
||||
} else {
|
||||
const struct common_firmware_header *hdr;
|
||||
unsigned offset;
|
||||
|
||||
hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
|
||||
offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
|
||||
memcpy(adev->uvd.cpu_addr, (adev->uvd.fw->data) + offset,
|
||||
(adev->uvd.fw->size) - offset);
|
||||
size -= le32_to_cpu(hdr->ucode_size_bytes);
|
||||
ptr += le32_to_cpu(hdr->ucode_size_bytes);
|
||||
memset(ptr, 0, size);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -903,14 +903,6 @@ static int gmc_v7_0_early_init(void *handle)
|
||||
gmc_v7_0_set_gart_funcs(adev);
|
||||
gmc_v7_0_set_irq_funcs(adev);
|
||||
|
||||
if (adev->flags & AMD_IS_APU) {
|
||||
adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
|
||||
} else {
|
||||
u32 tmp = RREG32(mmMC_SEQ_MISC0);
|
||||
tmp &= MC_SEQ_MISC0__MT__MASK;
|
||||
adev->mc.vram_type = gmc_v7_0_convert_vram_type(tmp);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -927,6 +919,14 @@ static int gmc_v7_0_sw_init(void *handle)
|
||||
int dma_bits;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
if (adev->flags & AMD_IS_APU) {
|
||||
adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
|
||||
} else {
|
||||
u32 tmp = RREG32(mmMC_SEQ_MISC0);
|
||||
tmp &= MC_SEQ_MISC0__MT__MASK;
|
||||
adev->mc.vram_type = gmc_v7_0_convert_vram_type(tmp);
|
||||
}
|
||||
|
||||
r = amdgpu_irq_add_id(adev, 146, &adev->mc.vm_fault);
|
||||
if (r)
|
||||
return r;
|
||||
|
@ -863,14 +863,6 @@ static int gmc_v8_0_early_init(void *handle)
|
||||
gmc_v8_0_set_gart_funcs(adev);
|
||||
gmc_v8_0_set_irq_funcs(adev);
|
||||
|
||||
if (adev->flags & AMD_IS_APU) {
|
||||
adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
|
||||
} else {
|
||||
u32 tmp = RREG32(mmMC_SEQ_MISC0);
|
||||
tmp &= MC_SEQ_MISC0__MT__MASK;
|
||||
adev->mc.vram_type = gmc_v8_0_convert_vram_type(tmp);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -881,12 +873,27 @@ static int gmc_v8_0_late_init(void *handle)
|
||||
return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
|
||||
}
|
||||
|
||||
#define mmMC_SEQ_MISC0_FIJI 0xA71
|
||||
|
||||
static int gmc_v8_0_sw_init(void *handle)
|
||||
{
|
||||
int r;
|
||||
int dma_bits;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
if (adev->flags & AMD_IS_APU) {
|
||||
adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
|
||||
} else {
|
||||
u32 tmp;
|
||||
|
||||
if (adev->asic_type == CHIP_FIJI)
|
||||
tmp = RREG32(mmMC_SEQ_MISC0_FIJI);
|
||||
else
|
||||
tmp = RREG32(mmMC_SEQ_MISC0);
|
||||
tmp &= MC_SEQ_MISC0__MT__MASK;
|
||||
adev->mc.vram_type = gmc_v8_0_convert_vram_type(tmp);
|
||||
}
|
||||
|
||||
r = amdgpu_irq_add_id(adev, 146, &adev->mc.vm_fault);
|
||||
if (r)
|
||||
return r;
|
||||
|
@ -224,11 +224,11 @@ static int uvd_v4_2_suspend(void *handle)
|
||||
int r;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
r = amdgpu_uvd_suspend(adev);
|
||||
r = uvd_v4_2_hw_fini(adev);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = uvd_v4_2_hw_fini(adev);
|
||||
r = amdgpu_uvd_suspend(adev);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
@ -220,11 +220,11 @@ static int uvd_v5_0_suspend(void *handle)
|
||||
int r;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
r = amdgpu_uvd_suspend(adev);
|
||||
r = uvd_v5_0_hw_fini(adev);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = uvd_v5_0_hw_fini(adev);
|
||||
r = amdgpu_uvd_suspend(adev);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
@ -214,15 +214,16 @@ static int uvd_v6_0_suspend(void *handle)
|
||||
int r;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
r = uvd_v6_0_hw_fini(adev);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
/* Skip this for APU for now */
|
||||
if (!(adev->flags & AMD_IS_APU)) {
|
||||
r = amdgpu_uvd_suspend(adev);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
r = uvd_v6_0_hw_fini(adev);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
return r;
|
||||
}
|
||||
|
@ -589,6 +589,8 @@ typedef int(*cgs_get_active_displays_info)(
|
||||
void *cgs_device,
|
||||
struct cgs_display_info *info);
|
||||
|
||||
typedef int (*cgs_notify_dpm_enabled)(void *cgs_device, bool enabled);
|
||||
|
||||
typedef int (*cgs_call_acpi_method)(void *cgs_device,
|
||||
uint32_t acpi_method,
|
||||
uint32_t acpi_function,
|
||||
@ -644,6 +646,8 @@ struct cgs_ops {
|
||||
cgs_set_clockgating_state set_clockgating_state;
|
||||
/* display manager */
|
||||
cgs_get_active_displays_info get_active_displays_info;
|
||||
/* notify dpm enabled */
|
||||
cgs_notify_dpm_enabled notify_dpm_enabled;
|
||||
/* ACPI */
|
||||
cgs_call_acpi_method call_acpi_method;
|
||||
/* get system info */
|
||||
@ -734,8 +738,12 @@ struct cgs_device
|
||||
CGS_CALL(set_powergating_state, dev, block_type, state)
|
||||
#define cgs_set_clockgating_state(dev, block_type, state) \
|
||||
CGS_CALL(set_clockgating_state, dev, block_type, state)
|
||||
#define cgs_notify_dpm_enabled(dev, enabled) \
|
||||
CGS_CALL(notify_dpm_enabled, dev, enabled)
|
||||
|
||||
#define cgs_get_active_displays_info(dev, info) \
|
||||
CGS_CALL(get_active_displays_info, dev, info)
|
||||
|
||||
#define cgs_call_acpi_method(dev, acpi_method, acpi_function, pintput, poutput, output_count, input_size, output_size) \
|
||||
CGS_CALL(call_acpi_method, dev, acpi_method, acpi_function, pintput, poutput, output_count, input_size, output_size)
|
||||
#define cgs_query_system_info(dev, sys_info) \
|
||||
|
@ -137,14 +137,14 @@ static const pem_event_action *resume_event[] = {
|
||||
reset_display_configCounter_tasks,
|
||||
update_dal_configuration_tasks,
|
||||
vari_bright_resume_tasks,
|
||||
block_adjust_power_state_tasks,
|
||||
setup_asic_tasks,
|
||||
enable_stutter_mode_tasks, /*must do this in boot state and before SMC is started */
|
||||
enable_dynamic_state_management_tasks,
|
||||
enable_clock_power_gatings_tasks,
|
||||
enable_disable_bapm_tasks,
|
||||
initialize_thermal_controller_tasks,
|
||||
reset_boot_state_tasks,
|
||||
get_2d_performance_state_tasks,
|
||||
set_performance_state_tasks,
|
||||
adjust_power_state_tasks,
|
||||
enable_disable_fps_tasks,
|
||||
notify_hw_power_source_tasks,
|
||||
|
@ -2389,6 +2389,7 @@ static int fiji_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
|
||||
|
||||
for(count = 0; count < table->VceLevelCount; count++) {
|
||||
table->VceLevel[count].Frequency = mm_table->entries[count].eclk;
|
||||
table->VceLevel[count].MinVoltage = 0;
|
||||
table->VceLevel[count].MinVoltage |=
|
||||
(mm_table->entries[count].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
|
||||
table->VceLevel[count].MinVoltage |=
|
||||
@ -2465,6 +2466,7 @@ static int fiji_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
|
||||
|
||||
for (count = 0; count < table->SamuLevelCount; count++) {
|
||||
/* not sure whether we need evclk or not */
|
||||
table->SamuLevel[count].MinVoltage = 0;
|
||||
table->SamuLevel[count].Frequency = mm_table->entries[count].samclock;
|
||||
table->SamuLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
|
||||
VOLTAGE_SCALE) << VDDC_SHIFT;
|
||||
@ -2562,6 +2564,7 @@ static int fiji_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
|
||||
table->UvdBootLevel = 0;
|
||||
|
||||
for (count = 0; count < table->UvdLevelCount; count++) {
|
||||
table->UvdLevel[count].MinVoltage = 0;
|
||||
table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk;
|
||||
table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk;
|
||||
table->UvdLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
|
||||
@ -2900,6 +2903,8 @@ static int fiji_init_smc_table(struct pp_hwmgr *hwmgr)
|
||||
if(FIJI_VOLTAGE_CONTROL_NONE != data->voltage_control)
|
||||
fiji_populate_smc_voltage_tables(hwmgr, table);
|
||||
|
||||
table->SystemFlags = 0;
|
||||
|
||||
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
|
||||
PHM_PlatformCaps_AutomaticDCTransition))
|
||||
table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
|
||||
@ -2997,6 +3002,7 @@ static int fiji_init_smc_table(struct pp_hwmgr *hwmgr)
|
||||
table->MemoryThermThrottleEnable = 1;
|
||||
table->PCIeBootLinkLevel = 0; /* 0:Gen1 1:Gen2 2:Gen3*/
|
||||
table->PCIeGenInterval = 1;
|
||||
table->VRConfig = 0;
|
||||
|
||||
result = fiji_populate_vr_config(hwmgr, table);
|
||||
PP_ASSERT_WITH_CODE(0 == result,
|
||||
@ -5195,6 +5201,67 @@ static int fiji_print_clock_levels(struct pp_hwmgr *hwmgr,
|
||||
return size;
|
||||
}
|
||||
|
||||
static inline bool fiji_are_power_levels_equal(const struct fiji_performance_level *pl1,
|
||||
const struct fiji_performance_level *pl2)
|
||||
{
|
||||
return ((pl1->memory_clock == pl2->memory_clock) &&
|
||||
(pl1->engine_clock == pl2->engine_clock) &&
|
||||
(pl1->pcie_gen == pl2->pcie_gen) &&
|
||||
(pl1->pcie_lane == pl2->pcie_lane));
|
||||
}
|
||||
|
||||
int fiji_check_states_equal(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *pstate1, const struct pp_hw_power_state *pstate2, bool *equal)
|
||||
{
|
||||
const struct fiji_power_state *psa = cast_const_phw_fiji_power_state(pstate1);
|
||||
const struct fiji_power_state *psb = cast_const_phw_fiji_power_state(pstate2);
|
||||
int i;
|
||||
|
||||
if (equal == NULL || psa == NULL || psb == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
/* If the two states don't even have the same number of performance levels they cannot be the same state. */
|
||||
if (psa->performance_level_count != psb->performance_level_count) {
|
||||
*equal = false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
for (i = 0; i < psa->performance_level_count; i++) {
|
||||
if (!fiji_are_power_levels_equal(&(psa->performance_levels[i]), &(psb->performance_levels[i]))) {
|
||||
/* If we have found even one performance level pair that is different the states are different. */
|
||||
*equal = false;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
/* If all performance levels are the same try to use the UVD clocks to break the tie.*/
|
||||
*equal = ((psa->uvd_clks.vclk == psb->uvd_clks.vclk) && (psa->uvd_clks.dclk == psb->uvd_clks.dclk));
|
||||
*equal &= ((psa->vce_clks.evclk == psb->vce_clks.evclk) && (psa->vce_clks.ecclk == psb->vce_clks.ecclk));
|
||||
*equal &= (psa->sclk_threshold == psb->sclk_threshold);
|
||||
*equal &= (psa->acp_clk == psb->acp_clk);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool fiji_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
|
||||
bool is_update_required = false;
|
||||
struct cgs_display_info info = {0,0,NULL};
|
||||
|
||||
cgs_get_active_displays_info(hwmgr->device, &info);
|
||||
|
||||
if (data->display_timing.num_existing_displays != info.display_count)
|
||||
is_update_required = true;
|
||||
/* TO DO NEED TO GET DEEP SLEEP CLOCK FROM DAL
|
||||
if (phm_cap_enabled(hwmgr->hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
|
||||
cgs_get_min_clock_settings(hwmgr->device, &min_clocks);
|
||||
if(min_clocks.engineClockInSR != data->display_timing.minClockInSR)
|
||||
is_update_required = true;
|
||||
*/
|
||||
return is_update_required;
|
||||
}
|
||||
|
||||
|
||||
static const struct pp_hwmgr_func fiji_hwmgr_funcs = {
|
||||
.backend_init = &fiji_hwmgr_backend_init,
|
||||
.backend_fini = &tonga_hwmgr_backend_fini,
|
||||
@ -5230,6 +5297,8 @@ static const struct pp_hwmgr_func fiji_hwmgr_funcs = {
|
||||
.register_internal_thermal_interrupt = fiji_register_internal_thermal_interrupt,
|
||||
.set_fan_control_mode = fiji_set_fan_control_mode,
|
||||
.get_fan_control_mode = fiji_get_fan_control_mode,
|
||||
.check_states_equal = fiji_check_states_equal,
|
||||
.check_smc_update_required_for_display_configuration = fiji_check_smc_update_required_for_display_configuration,
|
||||
.get_pp_table = fiji_get_pp_table,
|
||||
.set_pp_table = fiji_set_pp_table,
|
||||
.force_clock_level = fiji_force_clock_level,
|
||||
|
@ -58,6 +58,9 @@ void phm_init_dynamic_caps(struct pp_hwmgr *hwmgr)
|
||||
|
||||
phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VpuRecoveryInProgress);
|
||||
|
||||
phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UVDDPM);
|
||||
phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VCEDPM);
|
||||
|
||||
if (acpi_atcs_functions_supported(hwmgr->device, ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST) &&
|
||||
acpi_atcs_functions_supported(hwmgr->device, ATCS_FUNCTION_PCIE_DEVICE_READY_NOTIFICATION))
|
||||
phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest);
|
||||
@ -130,18 +133,25 @@ int phm_set_power_state(struct pp_hwmgr *hwmgr,
|
||||
|
||||
int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
int ret = 1;
|
||||
bool enabled;
|
||||
PHM_FUNC_CHECK(hwmgr);
|
||||
|
||||
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
|
||||
PHM_PlatformCaps_TablelessHardwareInterface)) {
|
||||
if (NULL != hwmgr->hwmgr_func->dynamic_state_management_enable)
|
||||
return hwmgr->hwmgr_func->dynamic_state_management_enable(hwmgr);
|
||||
ret = hwmgr->hwmgr_func->dynamic_state_management_enable(hwmgr);
|
||||
} else {
|
||||
return phm_dispatch_table(hwmgr,
|
||||
ret = phm_dispatch_table(hwmgr,
|
||||
&(hwmgr->enable_dynamic_state_management),
|
||||
NULL, NULL);
|
||||
}
|
||||
return 0;
|
||||
|
||||
enabled = ret == 0 ? true : false;
|
||||
|
||||
cgs_notify_dpm_enabled(hwmgr->device, enabled);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int phm_force_dpm_levels(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level)
|
||||
|
@ -57,14 +57,13 @@ static int hdlcd_load(struct drm_device *drm, unsigned long flags)
|
||||
DRM_ERROR("failed to map control registers area\n");
|
||||
ret = PTR_ERR(hdlcd->mmio);
|
||||
hdlcd->mmio = NULL;
|
||||
goto fail;
|
||||
return ret;
|
||||
}
|
||||
|
||||
version = hdlcd_read(hdlcd, HDLCD_REG_VERSION);
|
||||
if ((version & HDLCD_PRODUCT_MASK) != HDLCD_PRODUCT_ID) {
|
||||
DRM_ERROR("unknown product id: 0x%x\n", version);
|
||||
ret = -EINVAL;
|
||||
goto fail;
|
||||
return -EINVAL;
|
||||
}
|
||||
DRM_INFO("found ARM HDLCD version r%dp%d\n",
|
||||
(version & HDLCD_VERSION_MAJOR_MASK) >> 8,
|
||||
@ -73,7 +72,7 @@ static int hdlcd_load(struct drm_device *drm, unsigned long flags)
|
||||
/* Get the optional framebuffer memory resource */
|
||||
ret = of_reserved_mem_device_init(drm->dev);
|
||||
if (ret && ret != -ENODEV)
|
||||
goto fail;
|
||||
return ret;
|
||||
|
||||
ret = dma_set_mask_and_coherent(drm->dev, DMA_BIT_MASK(32));
|
||||
if (ret)
|
||||
@ -101,8 +100,6 @@ irq_fail:
|
||||
drm_crtc_cleanup(&hdlcd->crtc);
|
||||
setup_fail:
|
||||
of_reserved_mem_device_release(drm->dev);
|
||||
fail:
|
||||
devm_clk_put(drm->dev, hdlcd->clk);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -412,7 +409,6 @@ err_unload:
|
||||
pm_runtime_put_sync(drm->dev);
|
||||
pm_runtime_disable(drm->dev);
|
||||
of_reserved_mem_device_release(drm->dev);
|
||||
devm_clk_put(dev, hdlcd->clk);
|
||||
err_free:
|
||||
drm_dev_unref(drm);
|
||||
|
||||
@ -436,10 +432,6 @@ static void hdlcd_drm_unbind(struct device *dev)
|
||||
pm_runtime_put_sync(drm->dev);
|
||||
pm_runtime_disable(drm->dev);
|
||||
of_reserved_mem_device_release(drm->dev);
|
||||
if (!IS_ERR(hdlcd->clk)) {
|
||||
devm_clk_put(drm->dev, hdlcd->clk);
|
||||
hdlcd->clk = NULL;
|
||||
}
|
||||
drm_mode_config_cleanup(drm);
|
||||
drm_dev_unregister(drm);
|
||||
drm_dev_unref(drm);
|
||||
|
@ -225,8 +225,6 @@ static int dw_hdmi_imx_bind(struct device *dev, struct device *master,
|
||||
if (!iores)
|
||||
return -ENXIO;
|
||||
|
||||
platform_set_drvdata(pdev, hdmi);
|
||||
|
||||
encoder->possible_crtcs = drm_of_find_possible_crtcs(drm, dev->of_node);
|
||||
/*
|
||||
* If we failed to find the CRTC(s) which this encoder is
|
||||
@ -245,7 +243,16 @@ static int dw_hdmi_imx_bind(struct device *dev, struct device *master,
|
||||
drm_encoder_init(drm, encoder, &dw_hdmi_imx_encoder_funcs,
|
||||
DRM_MODE_ENCODER_TMDS, NULL);
|
||||
|
||||
return dw_hdmi_bind(dev, master, data, encoder, iores, irq, plat_data);
|
||||
ret = dw_hdmi_bind(dev, master, data, encoder, iores, irq, plat_data);
|
||||
|
||||
/*
|
||||
* If dw_hdmi_bind() fails we'll never call dw_hdmi_unbind(),
|
||||
* which would have called the encoder cleanup. Do it manually.
|
||||
*/
|
||||
if (ret)
|
||||
drm_encoder_cleanup(encoder);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void dw_hdmi_imx_unbind(struct device *dev, struct device *master,
|
||||
|
@ -326,7 +326,6 @@ int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc,
|
||||
{
|
||||
struct imx_drm_device *imxdrm = drm->dev_private;
|
||||
struct imx_drm_crtc *imx_drm_crtc;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* The vblank arrays are dimensioned by MAX_CRTC - we can't
|
||||
@ -351,10 +350,6 @@ int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc,
|
||||
|
||||
*new_crtc = imx_drm_crtc;
|
||||
|
||||
ret = drm_mode_crtc_set_gamma_size(imx_drm_crtc->crtc, 256);
|
||||
if (ret)
|
||||
goto err_register;
|
||||
|
||||
drm_crtc_helper_add(crtc,
|
||||
imx_drm_crtc->imx_drm_helper_funcs.crtc_helper_funcs);
|
||||
|
||||
@ -362,11 +357,6 @@ int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc,
|
||||
imx_drm_crtc->imx_drm_helper_funcs.crtc_funcs, NULL);
|
||||
|
||||
return 0;
|
||||
|
||||
err_register:
|
||||
imxdrm->crtc[--imxdrm->pipes] = NULL;
|
||||
kfree(imx_drm_crtc);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(imx_drm_add_crtc);
|
||||
|
||||
|
@ -72,22 +72,101 @@ static inline int calc_bandwidth(int width, int height, unsigned int vref)
|
||||
int ipu_plane_set_base(struct ipu_plane *ipu_plane, struct drm_framebuffer *fb,
|
||||
int x, int y)
|
||||
{
|
||||
struct drm_gem_cma_object *cma_obj;
|
||||
unsigned long eba;
|
||||
int active;
|
||||
struct drm_gem_cma_object *cma_obj[3];
|
||||
unsigned long eba, ubo, vbo;
|
||||
int active, i;
|
||||
|
||||
cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
|
||||
if (!cma_obj) {
|
||||
DRM_DEBUG_KMS("entry is null.\n");
|
||||
return -EFAULT;
|
||||
for (i = 0; i < drm_format_num_planes(fb->pixel_format); i++) {
|
||||
cma_obj[i] = drm_fb_cma_get_gem_obj(fb, i);
|
||||
if (!cma_obj[i]) {
|
||||
DRM_DEBUG_KMS("plane %d entry is null.\n", i);
|
||||
return -EFAULT;
|
||||
}
|
||||
}
|
||||
|
||||
dev_dbg(ipu_plane->base.dev->dev, "phys = %pad, x = %d, y = %d",
|
||||
&cma_obj->paddr, x, y);
|
||||
|
||||
eba = cma_obj->paddr + fb->offsets[0] +
|
||||
eba = cma_obj[0]->paddr + fb->offsets[0] +
|
||||
fb->pitches[0] * y + (fb->bits_per_pixel >> 3) * x;
|
||||
|
||||
if (eba & 0x7) {
|
||||
DRM_DEBUG_KMS("base address must be a multiple of 8.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (fb->pitches[0] < 1 || fb->pitches[0] > 16384) {
|
||||
DRM_DEBUG_KMS("pitches out of range.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (ipu_plane->enabled && fb->pitches[0] != ipu_plane->stride[0]) {
|
||||
DRM_DEBUG_KMS("pitches must not change while plane is enabled.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ipu_plane->stride[0] = fb->pitches[0];
|
||||
|
||||
switch (fb->pixel_format) {
|
||||
case DRM_FORMAT_YUV420:
|
||||
case DRM_FORMAT_YVU420:
|
||||
/*
|
||||
* Multiplanar formats have to meet the following restrictions:
|
||||
* - The (up to) three plane addresses are EBA, EBA+UBO, EBA+VBO
|
||||
* - EBA, UBO and VBO are a multiple of 8
|
||||
* - UBO and VBO are unsigned and not larger than 0xfffff8
|
||||
* - Only EBA may be changed while scanout is active
|
||||
* - The strides of U and V planes must be identical.
|
||||
*/
|
||||
ubo = cma_obj[1]->paddr + fb->offsets[1] +
|
||||
fb->pitches[1] * y / 2 + x / 2 - eba;
|
||||
vbo = cma_obj[2]->paddr + fb->offsets[2] +
|
||||
fb->pitches[2] * y / 2 + x / 2 - eba;
|
||||
|
||||
if ((ubo & 0x7) || (vbo & 0x7)) {
|
||||
DRM_DEBUG_KMS("U/V buffer offsets must be a multiple of 8.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if ((ubo > 0xfffff8) || (vbo > 0xfffff8)) {
|
||||
DRM_DEBUG_KMS("U/V buffer offsets must be positive and not larger than 0xfffff8.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (ipu_plane->enabled && ((ipu_plane->u_offset != ubo) ||
|
||||
(ipu_plane->v_offset != vbo))) {
|
||||
DRM_DEBUG_KMS("U/V buffer offsets must not change while plane is enabled.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (fb->pitches[1] != fb->pitches[2]) {
|
||||
DRM_DEBUG_KMS("U/V pitches must be identical.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (fb->pitches[1] < 1 || fb->pitches[1] > 16384) {
|
||||
DRM_DEBUG_KMS("U/V pitches out of range.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (ipu_plane->enabled &&
|
||||
(ipu_plane->stride[1] != fb->pitches[1])) {
|
||||
DRM_DEBUG_KMS("U/V pitches must not change while plane is enabled.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ipu_plane->u_offset = ubo;
|
||||
ipu_plane->v_offset = vbo;
|
||||
ipu_plane->stride[1] = fb->pitches[1];
|
||||
|
||||
dev_dbg(ipu_plane->base.dev->dev,
|
||||
"phys = %pad %pad %pad, x = %d, y = %d",
|
||||
&cma_obj[0]->paddr, &cma_obj[1]->paddr,
|
||||
&cma_obj[2]->paddr, x, y);
|
||||
break;
|
||||
default:
|
||||
dev_dbg(ipu_plane->base.dev->dev, "phys = %pad, x = %d, y = %d",
|
||||
&cma_obj[0]->paddr, x, y);
|
||||
break;
|
||||
}
|
||||
|
||||
if (ipu_plane->enabled) {
|
||||
active = ipu_idmac_get_current_buffer(ipu_plane->ipu_ch);
|
||||
ipu_cpmem_set_buffer(ipu_plane->ipu_ch, !active, eba);
|
||||
@ -201,12 +280,6 @@ int ipu_plane_mode_set(struct ipu_plane *ipu_plane, struct drm_crtc *crtc,
|
||||
}
|
||||
}
|
||||
|
||||
ret = ipu_dmfc_init_channel(ipu_plane->dmfc, crtc_w);
|
||||
if (ret) {
|
||||
dev_err(dev, "initializing dmfc channel failed with %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = ipu_dmfc_alloc_bandwidth(ipu_plane->dmfc,
|
||||
calc_bandwidth(crtc_w, crtc_h,
|
||||
calc_vref(mode)), 64);
|
||||
@ -215,6 +288,8 @@ int ipu_plane_mode_set(struct ipu_plane *ipu_plane, struct drm_crtc *crtc,
|
||||
return ret;
|
||||
}
|
||||
|
||||
ipu_dmfc_config_wait4eot(ipu_plane->dmfc, crtc_w);
|
||||
|
||||
ipu_cpmem_zero(ipu_plane->ipu_ch);
|
||||
ipu_cpmem_set_resolution(ipu_plane->ipu_ch, src_w, src_h);
|
||||
ret = ipu_cpmem_set_fmt(ipu_plane->ipu_ch, fb->pixel_format);
|
||||
@ -233,6 +308,18 @@ int ipu_plane_mode_set(struct ipu_plane *ipu_plane, struct drm_crtc *crtc,
|
||||
if (interlaced)
|
||||
ipu_cpmem_interlaced_scan(ipu_plane->ipu_ch, fb->pitches[0]);
|
||||
|
||||
if (fb->pixel_format == DRM_FORMAT_YUV420) {
|
||||
ipu_cpmem_set_yuv_planar_full(ipu_plane->ipu_ch,
|
||||
ipu_plane->stride[1],
|
||||
ipu_plane->u_offset,
|
||||
ipu_plane->v_offset);
|
||||
} else if (fb->pixel_format == DRM_FORMAT_YVU420) {
|
||||
ipu_cpmem_set_yuv_planar_full(ipu_plane->ipu_ch,
|
||||
ipu_plane->stride[1],
|
||||
ipu_plane->v_offset,
|
||||
ipu_plane->u_offset);
|
||||
}
|
||||
|
||||
ipu_plane->w = src_w;
|
||||
ipu_plane->h = src_h;
|
||||
|
||||
|
@ -29,6 +29,10 @@ struct ipu_plane {
|
||||
int w;
|
||||
int h;
|
||||
|
||||
unsigned int u_offset;
|
||||
unsigned int v_offset;
|
||||
unsigned int stride[2];
|
||||
|
||||
bool enabled;
|
||||
};
|
||||
|
||||
|
@ -11,6 +11,7 @@ struct nvkm_device_tegra {
|
||||
|
||||
struct reset_control *rst;
|
||||
struct clk *clk;
|
||||
struct clk *clk_ref;
|
||||
struct clk *clk_pwr;
|
||||
|
||||
struct regulator *vdd;
|
||||
@ -36,6 +37,10 @@ struct nvkm_device_tegra_func {
|
||||
* bypassed). A value of 0 means an IOMMU is never used.
|
||||
*/
|
||||
u8 iommu_bit;
|
||||
/*
|
||||
* Whether the chip requires a reference clock
|
||||
*/
|
||||
bool require_ref_clk;
|
||||
};
|
||||
|
||||
int nvkm_device_tegra_new(const struct nvkm_device_tegra_func *,
|
||||
|
@ -55,6 +55,11 @@ static const struct nvkm_device_tegra_func gk20a_platform_data = {
|
||||
.iommu_bit = 34,
|
||||
};
|
||||
|
||||
static const struct nvkm_device_tegra_func gm20b_platform_data = {
|
||||
.iommu_bit = 34,
|
||||
.require_ref_clk = true,
|
||||
};
|
||||
|
||||
static const struct of_device_id nouveau_platform_match[] = {
|
||||
{
|
||||
.compatible = "nvidia,gk20a",
|
||||
@ -62,7 +67,7 @@ static const struct of_device_id nouveau_platform_match[] = {
|
||||
},
|
||||
{
|
||||
.compatible = "nvidia,gm20b",
|
||||
.data = &gk20a_platform_data,
|
||||
.data = &gm20b_platform_data,
|
||||
},
|
||||
{ }
|
||||
};
|
||||
|
@ -35,6 +35,11 @@ nvkm_device_tegra_power_up(struct nvkm_device_tegra *tdev)
|
||||
ret = clk_prepare_enable(tdev->clk);
|
||||
if (ret)
|
||||
goto err_clk;
|
||||
if (tdev->clk_ref) {
|
||||
ret = clk_prepare_enable(tdev->clk_ref);
|
||||
if (ret)
|
||||
goto err_clk_ref;
|
||||
}
|
||||
ret = clk_prepare_enable(tdev->clk_pwr);
|
||||
if (ret)
|
||||
goto err_clk_pwr;
|
||||
@ -57,6 +62,9 @@ nvkm_device_tegra_power_up(struct nvkm_device_tegra *tdev)
|
||||
err_clamp:
|
||||
clk_disable_unprepare(tdev->clk_pwr);
|
||||
err_clk_pwr:
|
||||
if (tdev->clk_ref)
|
||||
clk_disable_unprepare(tdev->clk_ref);
|
||||
err_clk_ref:
|
||||
clk_disable_unprepare(tdev->clk);
|
||||
err_clk:
|
||||
regulator_disable(tdev->vdd);
|
||||
@ -71,6 +79,8 @@ nvkm_device_tegra_power_down(struct nvkm_device_tegra *tdev)
|
||||
udelay(10);
|
||||
|
||||
clk_disable_unprepare(tdev->clk_pwr);
|
||||
if (tdev->clk_ref)
|
||||
clk_disable_unprepare(tdev->clk_ref);
|
||||
clk_disable_unprepare(tdev->clk);
|
||||
udelay(10);
|
||||
|
||||
@ -274,6 +284,13 @@ nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func,
|
||||
goto free;
|
||||
}
|
||||
|
||||
if (func->require_ref_clk)
|
||||
tdev->clk_ref = devm_clk_get(&pdev->dev, "ref");
|
||||
if (IS_ERR(tdev->clk_ref)) {
|
||||
ret = PTR_ERR(tdev->clk_ref);
|
||||
goto free;
|
||||
}
|
||||
|
||||
tdev->clk_pwr = devm_clk_get(&pdev->dev, "pwr");
|
||||
if (IS_ERR(tdev->clk_pwr)) {
|
||||
ret = PTR_ERR(tdev->clk_pwr);
|
||||
|
@ -275,13 +275,15 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
|
||||
if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev))
|
||||
atombios_enable_crtc_memreq(crtc, ATOM_ENABLE);
|
||||
atombios_blank_crtc(crtc, ATOM_DISABLE);
|
||||
drm_vblank_on(dev, radeon_crtc->crtc_id);
|
||||
if (dev->num_crtcs > radeon_crtc->crtc_id)
|
||||
drm_vblank_on(dev, radeon_crtc->crtc_id);
|
||||
radeon_crtc_load_lut(crtc);
|
||||
break;
|
||||
case DRM_MODE_DPMS_STANDBY:
|
||||
case DRM_MODE_DPMS_SUSPEND:
|
||||
case DRM_MODE_DPMS_OFF:
|
||||
drm_vblank_off(dev, radeon_crtc->crtc_id);
|
||||
if (dev->num_crtcs > radeon_crtc->crtc_id)
|
||||
drm_vblank_off(dev, radeon_crtc->crtc_id);
|
||||
if (radeon_crtc->enabled)
|
||||
atombios_blank_crtc(crtc, ATOM_ENABLE);
|
||||
if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev))
|
||||
|
@ -291,6 +291,8 @@ int radeon_irq_kms_init(struct radeon_device *rdev)
|
||||
if (r) {
|
||||
return r;
|
||||
}
|
||||
rdev->ddev->vblank_disable_allowed = true;
|
||||
|
||||
/* enable msi */
|
||||
rdev->msi_enabled = 0;
|
||||
|
||||
|
@ -331,13 +331,15 @@ static void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
|
||||
RADEON_CRTC_DISP_REQ_EN_B));
|
||||
WREG32_P(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl, ~(mask | crtc_ext_cntl));
|
||||
}
|
||||
drm_vblank_on(dev, radeon_crtc->crtc_id);
|
||||
if (dev->num_crtcs > radeon_crtc->crtc_id)
|
||||
drm_vblank_on(dev, radeon_crtc->crtc_id);
|
||||
radeon_crtc_load_lut(crtc);
|
||||
break;
|
||||
case DRM_MODE_DPMS_STANDBY:
|
||||
case DRM_MODE_DPMS_SUSPEND:
|
||||
case DRM_MODE_DPMS_OFF:
|
||||
drm_vblank_off(dev, radeon_crtc->crtc_id);
|
||||
if (dev->num_crtcs > radeon_crtc->crtc_id)
|
||||
drm_vblank_off(dev, radeon_crtc->crtc_id);
|
||||
if (radeon_crtc->crtc_id)
|
||||
WREG32_P(RADEON_CRTC2_GEN_CNTL, mask, ~(RADEON_CRTC2_EN | mask));
|
||||
else {
|
||||
|
@ -395,60 +395,48 @@ void ipu_cpmem_set_yuv_interleaved(struct ipuv3_channel *ch, u32 pixel_format)
|
||||
EXPORT_SYMBOL_GPL(ipu_cpmem_set_yuv_interleaved);
|
||||
|
||||
void ipu_cpmem_set_yuv_planar_full(struct ipuv3_channel *ch,
|
||||
u32 pixel_format, int stride,
|
||||
int u_offset, int v_offset)
|
||||
unsigned int uv_stride,
|
||||
unsigned int u_offset, unsigned int v_offset)
|
||||
{
|
||||
switch (pixel_format) {
|
||||
case V4L2_PIX_FMT_YUV420:
|
||||
case V4L2_PIX_FMT_YUV422P:
|
||||
ipu_ch_param_write_field(ch, IPU_FIELD_SLUV, (stride / 2) - 1);
|
||||
ipu_ch_param_write_field(ch, IPU_FIELD_UBO, u_offset / 8);
|
||||
ipu_ch_param_write_field(ch, IPU_FIELD_VBO, v_offset / 8);
|
||||
break;
|
||||
case V4L2_PIX_FMT_YVU420:
|
||||
ipu_ch_param_write_field(ch, IPU_FIELD_SLUV, (stride / 2) - 1);
|
||||
ipu_ch_param_write_field(ch, IPU_FIELD_UBO, v_offset / 8);
|
||||
ipu_ch_param_write_field(ch, IPU_FIELD_VBO, u_offset / 8);
|
||||
break;
|
||||
case V4L2_PIX_FMT_NV12:
|
||||
case V4L2_PIX_FMT_NV16:
|
||||
ipu_ch_param_write_field(ch, IPU_FIELD_SLUV, stride - 1);
|
||||
ipu_ch_param_write_field(ch, IPU_FIELD_UBO, u_offset / 8);
|
||||
ipu_ch_param_write_field(ch, IPU_FIELD_VBO, u_offset / 8);
|
||||
break;
|
||||
}
|
||||
ipu_ch_param_write_field(ch, IPU_FIELD_SLUV, uv_stride - 1);
|
||||
ipu_ch_param_write_field(ch, IPU_FIELD_UBO, u_offset / 8);
|
||||
ipu_ch_param_write_field(ch, IPU_FIELD_VBO, v_offset / 8);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ipu_cpmem_set_yuv_planar_full);
|
||||
|
||||
void ipu_cpmem_set_yuv_planar(struct ipuv3_channel *ch,
|
||||
u32 pixel_format, int stride, int height)
|
||||
{
|
||||
int u_offset, v_offset;
|
||||
int fourcc, u_offset, v_offset;
|
||||
int uv_stride = 0;
|
||||
|
||||
switch (pixel_format) {
|
||||
case V4L2_PIX_FMT_YUV420:
|
||||
case V4L2_PIX_FMT_YVU420:
|
||||
fourcc = v4l2_pix_fmt_to_drm_fourcc(pixel_format);
|
||||
switch (fourcc) {
|
||||
case DRM_FORMAT_YUV420:
|
||||
uv_stride = stride / 2;
|
||||
u_offset = stride * height;
|
||||
v_offset = u_offset + (uv_stride * height / 2);
|
||||
ipu_cpmem_set_yuv_planar_full(ch, pixel_format, stride,
|
||||
u_offset, v_offset);
|
||||
break;
|
||||
case V4L2_PIX_FMT_YUV422P:
|
||||
case DRM_FORMAT_YVU420:
|
||||
uv_stride = stride / 2;
|
||||
v_offset = stride * height;
|
||||
u_offset = v_offset + (uv_stride * height / 2);
|
||||
break;
|
||||
case DRM_FORMAT_YUV422:
|
||||
uv_stride = stride / 2;
|
||||
u_offset = stride * height;
|
||||
v_offset = u_offset + (uv_stride * height);
|
||||
ipu_cpmem_set_yuv_planar_full(ch, pixel_format, stride,
|
||||
u_offset, v_offset);
|
||||
break;
|
||||
case V4L2_PIX_FMT_NV12:
|
||||
case V4L2_PIX_FMT_NV16:
|
||||
case DRM_FORMAT_NV12:
|
||||
case DRM_FORMAT_NV16:
|
||||
uv_stride = stride;
|
||||
u_offset = stride * height;
|
||||
ipu_cpmem_set_yuv_planar_full(ch, pixel_format, stride,
|
||||
u_offset, 0);
|
||||
v_offset = 0;
|
||||
break;
|
||||
default:
|
||||
return;
|
||||
}
|
||||
ipu_cpmem_set_yuv_planar_full(ch, uv_stride, u_offset, v_offset);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ipu_cpmem_set_yuv_planar);
|
||||
|
||||
@ -684,6 +672,15 @@ int ipu_cpmem_set_image(struct ipuv3_channel *ch, struct ipu_image *image)
|
||||
|
||||
switch (pix->pixelformat) {
|
||||
case V4L2_PIX_FMT_YUV420:
|
||||
offset = Y_OFFSET(pix, image->rect.left, image->rect.top);
|
||||
u_offset = U_OFFSET(pix, image->rect.left,
|
||||
image->rect.top) - offset;
|
||||
v_offset = V_OFFSET(pix, image->rect.left,
|
||||
image->rect.top) - offset;
|
||||
|
||||
ipu_cpmem_set_yuv_planar_full(ch, pix->bytesperline / 2,
|
||||
u_offset, v_offset);
|
||||
break;
|
||||
case V4L2_PIX_FMT_YVU420:
|
||||
offset = Y_OFFSET(pix, image->rect.left, image->rect.top);
|
||||
u_offset = U_OFFSET(pix, image->rect.left,
|
||||
@ -691,9 +688,8 @@ int ipu_cpmem_set_image(struct ipuv3_channel *ch, struct ipu_image *image)
|
||||
v_offset = V_OFFSET(pix, image->rect.left,
|
||||
image->rect.top) - offset;
|
||||
|
||||
ipu_cpmem_set_yuv_planar_full(ch, pix->pixelformat,
|
||||
pix->bytesperline,
|
||||
u_offset, v_offset);
|
||||
ipu_cpmem_set_yuv_planar_full(ch, pix->bytesperline / 2,
|
||||
v_offset, u_offset);
|
||||
break;
|
||||
case V4L2_PIX_FMT_YUV422P:
|
||||
offset = Y_OFFSET(pix, image->rect.left, image->rect.top);
|
||||
@ -702,8 +698,7 @@ int ipu_cpmem_set_image(struct ipuv3_channel *ch, struct ipu_image *image)
|
||||
v_offset = V2_OFFSET(pix, image->rect.left,
|
||||
image->rect.top) - offset;
|
||||
|
||||
ipu_cpmem_set_yuv_planar_full(ch, pix->pixelformat,
|
||||
pix->bytesperline,
|
||||
ipu_cpmem_set_yuv_planar_full(ch, pix->bytesperline / 2,
|
||||
u_offset, v_offset);
|
||||
break;
|
||||
case V4L2_PIX_FMT_NV12:
|
||||
@ -712,8 +707,7 @@ int ipu_cpmem_set_image(struct ipuv3_channel *ch, struct ipu_image *image)
|
||||
image->rect.top) - offset;
|
||||
v_offset = 0;
|
||||
|
||||
ipu_cpmem_set_yuv_planar_full(ch, pix->pixelformat,
|
||||
pix->bytesperline,
|
||||
ipu_cpmem_set_yuv_planar_full(ch, pix->bytesperline,
|
||||
u_offset, v_offset);
|
||||
break;
|
||||
case V4L2_PIX_FMT_NV16:
|
||||
@ -722,8 +716,7 @@ int ipu_cpmem_set_image(struct ipuv3_channel *ch, struct ipu_image *image)
|
||||
image->rect.top) - offset;
|
||||
v_offset = 0;
|
||||
|
||||
ipu_cpmem_set_yuv_planar_full(ch, pix->pixelformat,
|
||||
pix->bytesperline,
|
||||
ipu_cpmem_set_yuv_planar_full(ch, pix->bytesperline,
|
||||
u_offset, v_offset);
|
||||
break;
|
||||
case V4L2_PIX_FMT_UYVY:
|
||||
|
@ -350,11 +350,13 @@ out:
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ipu_dmfc_alloc_bandwidth);
|
||||
|
||||
int ipu_dmfc_init_channel(struct dmfc_channel *dmfc, int width)
|
||||
void ipu_dmfc_config_wait4eot(struct dmfc_channel *dmfc, int width)
|
||||
{
|
||||
struct ipu_dmfc_priv *priv = dmfc->priv;
|
||||
u32 dmfc_gen1;
|
||||
|
||||
mutex_lock(&priv->mutex);
|
||||
|
||||
dmfc_gen1 = readl(priv->base + DMFC_GENERAL1);
|
||||
|
||||
if ((dmfc->slots * 64 * 4) / width > dmfc->data->max_fifo_lines)
|
||||
@ -364,9 +366,9 @@ int ipu_dmfc_init_channel(struct dmfc_channel *dmfc, int width)
|
||||
|
||||
writel(dmfc_gen1, priv->base + DMFC_GENERAL1);
|
||||
|
||||
return 0;
|
||||
mutex_unlock(&priv->mutex);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ipu_dmfc_init_channel);
|
||||
EXPORT_SYMBOL_GPL(ipu_dmfc_config_wait4eot);
|
||||
|
||||
struct dmfc_channel *ipu_dmfc_get(struct ipu_soc *ipu, int ipu_channel)
|
||||
{
|
||||
|
@ -92,7 +92,7 @@ struct ttm_placement {
|
||||
*/
|
||||
struct ttm_bus_placement {
|
||||
void *addr;
|
||||
unsigned long base;
|
||||
phys_addr_t base;
|
||||
unsigned long size;
|
||||
unsigned long offset;
|
||||
bool is_iomem;
|
||||
|
@ -194,8 +194,9 @@ int ipu_cpmem_set_format_rgb(struct ipuv3_channel *ch,
|
||||
int ipu_cpmem_set_format_passthrough(struct ipuv3_channel *ch, int width);
|
||||
void ipu_cpmem_set_yuv_interleaved(struct ipuv3_channel *ch, u32 pixel_format);
|
||||
void ipu_cpmem_set_yuv_planar_full(struct ipuv3_channel *ch,
|
||||
u32 pixel_format, int stride,
|
||||
int u_offset, int v_offset);
|
||||
unsigned int uv_stride,
|
||||
unsigned int u_offset,
|
||||
unsigned int v_offset);
|
||||
void ipu_cpmem_set_yuv_planar(struct ipuv3_channel *ch,
|
||||
u32 pixel_format, int stride, int height);
|
||||
int ipu_cpmem_set_fmt(struct ipuv3_channel *ch, u32 drm_fourcc);
|
||||
@ -236,7 +237,7 @@ void ipu_dmfc_disable_channel(struct dmfc_channel *dmfc);
|
||||
int ipu_dmfc_alloc_bandwidth(struct dmfc_channel *dmfc,
|
||||
unsigned long bandwidth_mbs, int burstsize);
|
||||
void ipu_dmfc_free_bandwidth(struct dmfc_channel *dmfc);
|
||||
int ipu_dmfc_init_channel(struct dmfc_channel *dmfc, int width);
|
||||
void ipu_dmfc_config_wait4eot(struct dmfc_channel *dmfc, int width);
|
||||
struct dmfc_channel *ipu_dmfc_get(struct ipu_soc *ipu, int ipuv3_channel);
|
||||
void ipu_dmfc_put(struct dmfc_channel *dmfc);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user