mirror of
https://github.com/torvalds/linux.git
synced 2024-11-26 22:21:42 +00:00
msm, i915, amdgpu, qxl, virtio-gpu, sun4i fixes
-----BEGIN PGP SIGNATURE----- iQIcBAABAgAGBQJa4raGAAoJEAx081l5xIa++xYP/jtRaiJDwYd87aq+FYyu4YT8 CFIezkxaYLUMwhRtLmA51K31pe/3uSytYaJAScRXM/BtkHbWS7Pt3Cbx66+iwKC1 O7CatWI1iPmnvfhuErZVO6dow40DdjZyJ5skTSVXfKlKZ1NOlJGwQMUZnm/krmka 6grOFxqsyl984NgubnpMsi64TW9FtM8PZE7MJm+x+2X6MV2deq9w769u8rorS7/Y Uq2PkiCVrctf8+tQF+UpPcrXdyuonTzuT/qvG2/1VhP7bGi9mH/3SCepZYpCUQup LlDYM76Of9y/FQUsyCGhiWdqt79wjEZt3EoJjU5SeeUEGWqiJX5b32b4PFUeh1Xq F3o1lFm27ZCTBb9X7mfx1XDzPC7DqqNnOKAJGgXovZa+mT2IsgUqOjcfGXOU0rTO IKgfN7vyt+S3vZrg4aPu4yg1CTXmOrY81OQTpAaDeo4CQ/LsRapQaQRBhcYCm5pw 3D25hzcpPMdC2QrXa7R5kDiqWidR/TFCX26H8DA673qdawWdzPhlrw1gv9hJwMB/ 5sCpPb/3pwXQBQ/TrUoY1oLBEGh00rZ7lYaQc1I+CJ4V9nTbqcZ+WzMI8IMV8UlX 5QGkNfc9bIsOyVDBkFasyl1TYTFCGKXsKn583pCVsP/oGTPPmBbQAJB8r5L+HyWW U3R224YdF9Pv+XBqm2KW =hboJ -----END PGP SIGNATURE----- Merge tag 'drm-fixes-for-v4.17-rc3' of git://people.freedesktop.org/~airlied/linux Pull drm fixes from Dave Airlie: "Pretty run of the mill for this stage in the cycle: msm, i915, amdgpu, qxl, virtio-gpu, sun4i fixes. i915: - Black screen fixes - Display w/a fix - HDA codec interop fix sun4i: - tbsa711 tablet regression fix qxl: - Regression fixes due to changes in TTM virtio: - Fix wait event condition msm: - DSI display fixes amdgpu: - fix hang on Carrizo - DP MST hang fixes - irq handling deadlock in DC. amdkfd: - Fix Kconfig issue - Clock retrieval fix - Sparse fixes" * tag 'drm-fixes-for-v4.17-rc3' of git://people.freedesktop.org/~airlied/linux: (27 commits) drm/edid: Reset more of the display info drm/virtio: fix vq wait_event condition qxl: keep separate release_bo pointer qxl: fix qxl_release_{map,unmap} Revert "drm/sun4i: add lvds mode_valid function" drm/amd/display: Check dc_sink every time in MST hotplug drm/amd/display: Update MST edid property every time drm/amd/display: Don't read EDID in atomic_check drm/amd/display: Disallow enabling CRTC without primary plane with FB drm/amd/display: Fix deadlock when flushing irq drm/i915/fbdev: Enable late fbdev initial configuration drm/i915: Use ktime on wait_for drm/amdgpu: set COMPUTE_PGM_RSRC1 for SGPR/VGPR clearing shaders drm/amdkfd: fix build, select MMU_NOTIFIER drm/amdkfd: fix clock counter retrieval for node without GPU drm/amdkfd: Fix the error return code in kfd_ioctl_unmap_memory_from_gpu() drm/amdkfd: kfd_dev_is_large_bar() can be static drm/i915: Enable display WA#1183 from its correct spot drm/i915/audio: set minimum CD clock to twice the BCLK drm/msm: don't deref error pointer in the msm_fbdev_create error path ...
This commit is contained in:
commit
0f940fac65
@ -1459,10 +1459,11 @@ static const u32 sgpr_init_compute_shader[] =
|
||||
static const u32 vgpr_init_regs[] =
|
||||
{
|
||||
mmCOMPUTE_STATIC_THREAD_MGMT_SE0, 0xffffffff,
|
||||
mmCOMPUTE_RESOURCE_LIMITS, 0,
|
||||
mmCOMPUTE_RESOURCE_LIMITS, 0x1000000, /* CU_GROUP_COUNT=1 */
|
||||
mmCOMPUTE_NUM_THREAD_X, 256*4,
|
||||
mmCOMPUTE_NUM_THREAD_Y, 1,
|
||||
mmCOMPUTE_NUM_THREAD_Z, 1,
|
||||
mmCOMPUTE_PGM_RSRC1, 0x100004f, /* VGPRS=15 (64 logical VGPRs), SGPRS=1 (16 SGPRs), BULKY=1 */
|
||||
mmCOMPUTE_PGM_RSRC2, 20,
|
||||
mmCOMPUTE_USER_DATA_0, 0xedcedc00,
|
||||
mmCOMPUTE_USER_DATA_1, 0xedcedc01,
|
||||
@ -1479,10 +1480,11 @@ static const u32 vgpr_init_regs[] =
|
||||
static const u32 sgpr1_init_regs[] =
|
||||
{
|
||||
mmCOMPUTE_STATIC_THREAD_MGMT_SE0, 0x0f,
|
||||
mmCOMPUTE_RESOURCE_LIMITS, 0x1000000,
|
||||
mmCOMPUTE_RESOURCE_LIMITS, 0x1000000, /* CU_GROUP_COUNT=1 */
|
||||
mmCOMPUTE_NUM_THREAD_X, 256*5,
|
||||
mmCOMPUTE_NUM_THREAD_Y, 1,
|
||||
mmCOMPUTE_NUM_THREAD_Z, 1,
|
||||
mmCOMPUTE_PGM_RSRC1, 0x240, /* SGPRS=9 (80 GPRS) */
|
||||
mmCOMPUTE_PGM_RSRC2, 20,
|
||||
mmCOMPUTE_USER_DATA_0, 0xedcedc00,
|
||||
mmCOMPUTE_USER_DATA_1, 0xedcedc01,
|
||||
@ -1503,6 +1505,7 @@ static const u32 sgpr2_init_regs[] =
|
||||
mmCOMPUTE_NUM_THREAD_X, 256*5,
|
||||
mmCOMPUTE_NUM_THREAD_Y, 1,
|
||||
mmCOMPUTE_NUM_THREAD_Z, 1,
|
||||
mmCOMPUTE_PGM_RSRC1, 0x240, /* SGPRS=9 (80 GPRS) */
|
||||
mmCOMPUTE_PGM_RSRC2, 20,
|
||||
mmCOMPUTE_USER_DATA_0, 0xedcedc00,
|
||||
mmCOMPUTE_USER_DATA_1, 0xedcedc01,
|
||||
|
@ -6,5 +6,6 @@ config HSA_AMD
|
||||
tristate "HSA kernel driver for AMD GPU devices"
|
||||
depends on DRM_AMDGPU && X86_64
|
||||
imply AMD_IOMMU_V2
|
||||
select MMU_NOTIFIER
|
||||
help
|
||||
Enable this if you want to use HSA features on AMD GPU devices.
|
||||
|
@ -749,12 +749,13 @@ static int kfd_ioctl_get_clock_counters(struct file *filep,
|
||||
struct timespec64 time;
|
||||
|
||||
dev = kfd_device_by_id(args->gpu_id);
|
||||
if (dev == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
/* Reading GPU clock counter from KGD */
|
||||
args->gpu_clock_counter =
|
||||
dev->kfd2kgd->get_gpu_clock_counter(dev->kgd);
|
||||
if (dev)
|
||||
/* Reading GPU clock counter from KGD */
|
||||
args->gpu_clock_counter =
|
||||
dev->kfd2kgd->get_gpu_clock_counter(dev->kgd);
|
||||
else
|
||||
/* Node without GPU resource */
|
||||
args->gpu_clock_counter = 0;
|
||||
|
||||
/* No access to rdtsc. Using raw monotonic time */
|
||||
getrawmonotonic64(&time);
|
||||
@ -1147,7 +1148,7 @@ err_unlock:
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool kfd_dev_is_large_bar(struct kfd_dev *dev)
|
||||
static bool kfd_dev_is_large_bar(struct kfd_dev *dev)
|
||||
{
|
||||
struct kfd_local_mem_info mem_info;
|
||||
|
||||
@ -1421,7 +1422,7 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
|
||||
|
||||
pdd = kfd_get_process_device_data(dev, p);
|
||||
if (!pdd) {
|
||||
err = PTR_ERR(pdd);
|
||||
err = -EINVAL;
|
||||
goto bind_process_to_device_failed;
|
||||
}
|
||||
|
||||
|
@ -4557,6 +4557,7 @@ static int dm_update_crtcs_state(struct dc *dc,
|
||||
struct amdgpu_dm_connector *aconnector = NULL;
|
||||
struct drm_connector_state *new_con_state = NULL;
|
||||
struct dm_connector_state *dm_conn_state = NULL;
|
||||
struct drm_plane_state *new_plane_state = NULL;
|
||||
|
||||
new_stream = NULL;
|
||||
|
||||
@ -4564,6 +4565,13 @@ static int dm_update_crtcs_state(struct dc *dc,
|
||||
dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
|
||||
acrtc = to_amdgpu_crtc(crtc);
|
||||
|
||||
new_plane_state = drm_atomic_get_new_plane_state(state, new_crtc_state->crtc->primary);
|
||||
|
||||
if (new_crtc_state->enable && new_plane_state && !new_plane_state->fb) {
|
||||
ret = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
|
||||
|
||||
/* TODO This hack should go away */
|
||||
@ -4760,7 +4768,7 @@ static int dm_update_planes_state(struct dc *dc,
|
||||
if (!dm_old_crtc_state->stream)
|
||||
continue;
|
||||
|
||||
DRM_DEBUG_DRIVER("Disabling DRM plane: %d on DRM crtc %d\n",
|
||||
DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
|
||||
plane->base.id, old_plane_crtc->base.id);
|
||||
|
||||
if (!dc_remove_plane_from_context(
|
||||
|
@ -329,14 +329,15 @@ void amdgpu_dm_irq_fini(struct amdgpu_device *adev)
|
||||
{
|
||||
int src;
|
||||
struct irq_list_head *lh;
|
||||
unsigned long irq_table_flags;
|
||||
DRM_DEBUG_KMS("DM_IRQ: releasing resources.\n");
|
||||
|
||||
for (src = 0; src < DAL_IRQ_SOURCES_NUMBER; src++) {
|
||||
|
||||
DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
|
||||
/* The handler was removed from the table,
|
||||
* it means it is safe to flush all the 'work'
|
||||
* (because no code can schedule a new one). */
|
||||
lh = &adev->dm.irq_handler_list_low_tab[src];
|
||||
DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
|
||||
flush_work(&lh->work);
|
||||
}
|
||||
}
|
||||
|
@ -161,6 +161,11 @@ dm_dp_mst_connector_destroy(struct drm_connector *connector)
|
||||
struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
|
||||
struct amdgpu_encoder *amdgpu_encoder = amdgpu_dm_connector->mst_encoder;
|
||||
|
||||
if (amdgpu_dm_connector->edid) {
|
||||
kfree(amdgpu_dm_connector->edid);
|
||||
amdgpu_dm_connector->edid = NULL;
|
||||
}
|
||||
|
||||
drm_encoder_cleanup(&amdgpu_encoder->base);
|
||||
kfree(amdgpu_encoder);
|
||||
drm_connector_cleanup(connector);
|
||||
@ -181,28 +186,22 @@ static const struct drm_connector_funcs dm_dp_mst_connector_funcs = {
|
||||
void dm_dp_mst_dc_sink_create(struct drm_connector *connector)
|
||||
{
|
||||
struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
|
||||
struct edid *edid;
|
||||
struct dc_sink *dc_sink;
|
||||
struct dc_sink_init_data init_params = {
|
||||
.link = aconnector->dc_link,
|
||||
.sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST };
|
||||
|
||||
/* FIXME none of this is safe. we shouldn't touch aconnector here in
|
||||
* atomic_check
|
||||
*/
|
||||
|
||||
/*
|
||||
* TODO: Need to further figure out why ddc.algo is NULL while MST port exists
|
||||
*/
|
||||
if (!aconnector->port || !aconnector->port->aux.ddc.algo)
|
||||
return;
|
||||
|
||||
edid = drm_dp_mst_get_edid(connector, &aconnector->mst_port->mst_mgr, aconnector->port);
|
||||
|
||||
if (!edid) {
|
||||
drm_mode_connector_update_edid_property(
|
||||
&aconnector->base,
|
||||
NULL);
|
||||
return;
|
||||
}
|
||||
|
||||
aconnector->edid = edid;
|
||||
ASSERT(aconnector->edid);
|
||||
|
||||
dc_sink = dc_link_add_remote_sink(
|
||||
aconnector->dc_link,
|
||||
@ -215,9 +214,6 @@ void dm_dp_mst_dc_sink_create(struct drm_connector *connector)
|
||||
|
||||
amdgpu_dm_add_sink_to_freesync_module(
|
||||
connector, aconnector->edid);
|
||||
|
||||
drm_mode_connector_update_edid_property(
|
||||
&aconnector->base, aconnector->edid);
|
||||
}
|
||||
|
||||
static int dm_dp_mst_get_modes(struct drm_connector *connector)
|
||||
@ -230,10 +226,6 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
|
||||
|
||||
if (!aconnector->edid) {
|
||||
struct edid *edid;
|
||||
struct dc_sink *dc_sink;
|
||||
struct dc_sink_init_data init_params = {
|
||||
.link = aconnector->dc_link,
|
||||
.sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST };
|
||||
edid = drm_dp_mst_get_edid(connector, &aconnector->mst_port->mst_mgr, aconnector->port);
|
||||
|
||||
if (!edid) {
|
||||
@ -244,11 +236,17 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
|
||||
}
|
||||
|
||||
aconnector->edid = edid;
|
||||
}
|
||||
|
||||
if (!aconnector->dc_sink) {
|
||||
struct dc_sink *dc_sink;
|
||||
struct dc_sink_init_data init_params = {
|
||||
.link = aconnector->dc_link,
|
||||
.sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST };
|
||||
dc_sink = dc_link_add_remote_sink(
|
||||
aconnector->dc_link,
|
||||
(uint8_t *)edid,
|
||||
(edid->extensions + 1) * EDID_LENGTH,
|
||||
(uint8_t *)aconnector->edid,
|
||||
(aconnector->edid->extensions + 1) * EDID_LENGTH,
|
||||
&init_params);
|
||||
|
||||
dc_sink->priv = aconnector;
|
||||
@ -256,12 +254,12 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
|
||||
|
||||
if (aconnector->dc_sink)
|
||||
amdgpu_dm_add_sink_to_freesync_module(
|
||||
connector, edid);
|
||||
|
||||
drm_mode_connector_update_edid_property(
|
||||
&aconnector->base, edid);
|
||||
connector, aconnector->edid);
|
||||
}
|
||||
|
||||
drm_mode_connector_update_edid_property(
|
||||
&aconnector->base, aconnector->edid);
|
||||
|
||||
ret = drm_add_edid_modes(connector, aconnector->edid);
|
||||
|
||||
return ret;
|
||||
@ -424,14 +422,6 @@ static void dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
|
||||
dc_sink_release(aconnector->dc_sink);
|
||||
aconnector->dc_sink = NULL;
|
||||
}
|
||||
if (aconnector->edid) {
|
||||
kfree(aconnector->edid);
|
||||
aconnector->edid = NULL;
|
||||
}
|
||||
|
||||
drm_mode_connector_update_edid_property(
|
||||
&aconnector->base,
|
||||
NULL);
|
||||
|
||||
aconnector->mst_connected = false;
|
||||
}
|
||||
|
@ -4451,6 +4451,7 @@ drm_reset_display_info(struct drm_connector *connector)
|
||||
info->max_tmds_clock = 0;
|
||||
info->dvi_dual = false;
|
||||
info->has_hdmi_infoframe = false;
|
||||
memset(&info->hdmi, 0, sizeof(info->hdmi));
|
||||
|
||||
info->non_desktop = 0;
|
||||
}
|
||||
@ -4462,17 +4463,11 @@ u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edi
|
||||
|
||||
u32 quirks = edid_get_quirks(edid);
|
||||
|
||||
drm_reset_display_info(connector);
|
||||
|
||||
info->width_mm = edid->width_cm * 10;
|
||||
info->height_mm = edid->height_cm * 10;
|
||||
|
||||
/* driver figures it out in this case */
|
||||
info->bpc = 0;
|
||||
info->color_formats = 0;
|
||||
info->cea_rev = 0;
|
||||
info->max_tmds_clock = 0;
|
||||
info->dvi_dual = false;
|
||||
info->has_hdmi_infoframe = false;
|
||||
|
||||
info->non_desktop = !!(quirks & EDID_QUIRK_NON_DESKTOP);
|
||||
|
||||
DRM_DEBUG_KMS("non_desktop set to %d\n", info->non_desktop);
|
||||
|
@ -2140,10 +2140,22 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
|
||||
}
|
||||
}
|
||||
|
||||
/* According to BSpec, "The CD clock frequency must be at least twice
|
||||
/*
|
||||
* According to BSpec, "The CD clock frequency must be at least twice
|
||||
* the frequency of the Azalia BCLK." and BCLK is 96 MHz by default.
|
||||
*
|
||||
* FIXME: Check the actual, not default, BCLK being used.
|
||||
*
|
||||
* FIXME: This does not depend on ->has_audio because the higher CDCLK
|
||||
* is required for audio probe, also when there are no audio capable
|
||||
* displays connected at probe time. This leads to unnecessarily high
|
||||
* CDCLK when audio is not required.
|
||||
*
|
||||
* FIXME: This limit is only applied when there are displays connected
|
||||
* at probe time. If we probe without displays, we'll still end up using
|
||||
* the platform minimum CDCLK, failing audio probe.
|
||||
*/
|
||||
if (crtc_state->has_audio && INTEL_GEN(dev_priv) >= 9)
|
||||
if (INTEL_GEN(dev_priv) >= 9)
|
||||
min_cdclk = max(2 * 96000, min_cdclk);
|
||||
|
||||
/*
|
||||
|
@ -49,12 +49,12 @@
|
||||
* check the condition before the timeout.
|
||||
*/
|
||||
#define __wait_for(OP, COND, US, Wmin, Wmax) ({ \
|
||||
unsigned long timeout__ = jiffies + usecs_to_jiffies(US) + 1; \
|
||||
const ktime_t end__ = ktime_add_ns(ktime_get_raw(), 1000ll * (US)); \
|
||||
long wait__ = (Wmin); /* recommended min for usleep is 10 us */ \
|
||||
int ret__; \
|
||||
might_sleep(); \
|
||||
for (;;) { \
|
||||
bool expired__ = time_after(jiffies, timeout__); \
|
||||
const bool expired__ = ktime_after(ktime_get_raw(), end__); \
|
||||
OP; \
|
||||
if (COND) { \
|
||||
ret__ = 0; \
|
||||
|
@ -806,7 +806,7 @@ void intel_fbdev_output_poll_changed(struct drm_device *dev)
|
||||
return;
|
||||
|
||||
intel_fbdev_sync(ifbdev);
|
||||
if (ifbdev->vma)
|
||||
if (ifbdev->vma || ifbdev->helper.deferred_setup)
|
||||
drm_fb_helper_hotplug_event(&ifbdev->helper);
|
||||
}
|
||||
|
||||
|
@ -641,19 +641,18 @@ void skl_enable_dc6(struct drm_i915_private *dev_priv)
|
||||
|
||||
DRM_DEBUG_KMS("Enabling DC6\n");
|
||||
|
||||
gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
|
||||
/* Wa Display #1183: skl,kbl,cfl */
|
||||
if (IS_GEN9_BC(dev_priv))
|
||||
I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
|
||||
SKL_SELECT_ALTERNATE_DC_EXIT);
|
||||
|
||||
gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
|
||||
}
|
||||
|
||||
void skl_disable_dc6(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
DRM_DEBUG_KMS("Disabling DC6\n");
|
||||
|
||||
/* Wa Display #1183: skl,kbl,cfl */
|
||||
if (IS_GEN9_BC(dev_priv))
|
||||
I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
|
||||
SKL_SELECT_ALTERNATE_DC_EXIT);
|
||||
|
||||
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
|
||||
}
|
||||
|
||||
|
@ -351,6 +351,7 @@ static void mdp4_crtc_atomic_flush(struct drm_crtc *crtc,
|
||||
|
||||
spin_lock_irqsave(&dev->event_lock, flags);
|
||||
mdp4_crtc->event = crtc->state->event;
|
||||
crtc->state->event = NULL;
|
||||
spin_unlock_irqrestore(&dev->event_lock, flags);
|
||||
|
||||
blend_setup(crtc);
|
||||
|
@ -708,6 +708,7 @@ static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc,
|
||||
|
||||
spin_lock_irqsave(&dev->event_lock, flags);
|
||||
mdp5_crtc->event = crtc->state->event;
|
||||
crtc->state->event = NULL;
|
||||
spin_unlock_irqrestore(&dev->event_lock, flags);
|
||||
|
||||
/*
|
||||
|
@ -171,7 +171,8 @@ uint32_t mdp_get_formats(uint32_t *pixel_formats, uint32_t max_formats,
|
||||
return i;
|
||||
}
|
||||
|
||||
const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format)
|
||||
const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format,
|
||||
uint64_t modifier)
|
||||
{
|
||||
int i;
|
||||
for (i = 0; i < ARRAY_SIZE(formats); i++) {
|
||||
|
@ -98,7 +98,7 @@ struct mdp_format {
|
||||
#define MDP_FORMAT_IS_YUV(mdp_format) ((mdp_format)->is_yuv)
|
||||
|
||||
uint32_t mdp_get_formats(uint32_t *formats, uint32_t max_formats, bool rgb_only);
|
||||
const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format);
|
||||
const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format, uint64_t modifier);
|
||||
|
||||
/* MDP capabilities */
|
||||
#define MDP_CAP_SMP BIT(0) /* Shared Memory Pool */
|
||||
|
@ -173,6 +173,7 @@ struct msm_dsi_host {
|
||||
|
||||
bool registered;
|
||||
bool power_on;
|
||||
bool enabled;
|
||||
int irq;
|
||||
};
|
||||
|
||||
@ -775,7 +776,7 @@ static inline enum dsi_cmd_dst_format dsi_get_cmd_fmt(
|
||||
switch (mipi_fmt) {
|
||||
case MIPI_DSI_FMT_RGB888: return CMD_DST_FORMAT_RGB888;
|
||||
case MIPI_DSI_FMT_RGB666_PACKED:
|
||||
case MIPI_DSI_FMT_RGB666: return VID_DST_FORMAT_RGB666;
|
||||
case MIPI_DSI_FMT_RGB666: return CMD_DST_FORMAT_RGB666;
|
||||
case MIPI_DSI_FMT_RGB565: return CMD_DST_FORMAT_RGB565;
|
||||
default: return CMD_DST_FORMAT_RGB888;
|
||||
}
|
||||
@ -986,13 +987,19 @@ static void dsi_set_tx_power_mode(int mode, struct msm_dsi_host *msm_host)
|
||||
|
||||
static void dsi_wait4video_done(struct msm_dsi_host *msm_host)
|
||||
{
|
||||
u32 ret = 0;
|
||||
struct device *dev = &msm_host->pdev->dev;
|
||||
|
||||
dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_VIDEO_DONE, 1);
|
||||
|
||||
reinit_completion(&msm_host->video_comp);
|
||||
|
||||
wait_for_completion_timeout(&msm_host->video_comp,
|
||||
ret = wait_for_completion_timeout(&msm_host->video_comp,
|
||||
msecs_to_jiffies(70));
|
||||
|
||||
if (ret <= 0)
|
||||
dev_err(dev, "wait for video done timed out\n");
|
||||
|
||||
dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_VIDEO_DONE, 0);
|
||||
}
|
||||
|
||||
@ -1001,7 +1008,7 @@ static void dsi_wait4video_eng_busy(struct msm_dsi_host *msm_host)
|
||||
if (!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO))
|
||||
return;
|
||||
|
||||
if (msm_host->power_on) {
|
||||
if (msm_host->power_on && msm_host->enabled) {
|
||||
dsi_wait4video_done(msm_host);
|
||||
/* delay 4 ms to skip BLLP */
|
||||
usleep_range(2000, 4000);
|
||||
@ -2203,7 +2210,7 @@ int msm_dsi_host_enable(struct mipi_dsi_host *host)
|
||||
* pm_runtime_put_autosuspend(&msm_host->pdev->dev);
|
||||
* }
|
||||
*/
|
||||
|
||||
msm_host->enabled = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -2211,6 +2218,7 @@ int msm_dsi_host_disable(struct mipi_dsi_host *host)
|
||||
{
|
||||
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
|
||||
|
||||
msm_host->enabled = false;
|
||||
dsi_op_mode_config(msm_host,
|
||||
!!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO), false);
|
||||
|
||||
|
@ -265,6 +265,115 @@ int msm_dsi_dphy_timing_calc_v2(struct msm_dsi_dphy_timing *timing,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int msm_dsi_dphy_timing_calc_v3(struct msm_dsi_dphy_timing *timing,
|
||||
struct msm_dsi_phy_clk_request *clk_req)
|
||||
{
|
||||
const unsigned long bit_rate = clk_req->bitclk_rate;
|
||||
const unsigned long esc_rate = clk_req->escclk_rate;
|
||||
s32 ui, ui_x8, lpx;
|
||||
s32 tmax, tmin;
|
||||
s32 pcnt0 = 50;
|
||||
s32 pcnt1 = 50;
|
||||
s32 pcnt2 = 10;
|
||||
s32 pcnt3 = 30;
|
||||
s32 pcnt4 = 10;
|
||||
s32 pcnt5 = 2;
|
||||
s32 coeff = 1000; /* Precision, should avoid overflow */
|
||||
s32 hb_en, hb_en_ckln;
|
||||
s32 temp;
|
||||
|
||||
if (!bit_rate || !esc_rate)
|
||||
return -EINVAL;
|
||||
|
||||
timing->hs_halfbyte_en = 0;
|
||||
hb_en = 0;
|
||||
timing->hs_halfbyte_en_ckln = 0;
|
||||
hb_en_ckln = 0;
|
||||
|
||||
ui = mult_frac(NSEC_PER_MSEC, coeff, bit_rate / 1000);
|
||||
ui_x8 = ui << 3;
|
||||
lpx = mult_frac(NSEC_PER_MSEC, coeff, esc_rate / 1000);
|
||||
|
||||
temp = S_DIV_ROUND_UP(38 * coeff, ui_x8);
|
||||
tmin = max_t(s32, temp, 0);
|
||||
temp = (95 * coeff) / ui_x8;
|
||||
tmax = max_t(s32, temp, 0);
|
||||
timing->clk_prepare = linear_inter(tmax, tmin, pcnt0, 0, false);
|
||||
|
||||
temp = 300 * coeff - (timing->clk_prepare << 3) * ui;
|
||||
tmin = S_DIV_ROUND_UP(temp, ui_x8) - 1;
|
||||
tmax = (tmin > 255) ? 511 : 255;
|
||||
timing->clk_zero = linear_inter(tmax, tmin, pcnt5, 0, false);
|
||||
|
||||
tmin = DIV_ROUND_UP(60 * coeff + 3 * ui, ui_x8);
|
||||
temp = 105 * coeff + 12 * ui - 20 * coeff;
|
||||
tmax = (temp + 3 * ui) / ui_x8;
|
||||
timing->clk_trail = linear_inter(tmax, tmin, pcnt3, 0, false);
|
||||
|
||||
temp = S_DIV_ROUND_UP(40 * coeff + 4 * ui, ui_x8);
|
||||
tmin = max_t(s32, temp, 0);
|
||||
temp = (85 * coeff + 6 * ui) / ui_x8;
|
||||
tmax = max_t(s32, temp, 0);
|
||||
timing->hs_prepare = linear_inter(tmax, tmin, pcnt1, 0, false);
|
||||
|
||||
temp = 145 * coeff + 10 * ui - (timing->hs_prepare << 3) * ui;
|
||||
tmin = S_DIV_ROUND_UP(temp, ui_x8) - 1;
|
||||
tmax = 255;
|
||||
timing->hs_zero = linear_inter(tmax, tmin, pcnt4, 0, false);
|
||||
|
||||
tmin = DIV_ROUND_UP(60 * coeff + 4 * ui, ui_x8) - 1;
|
||||
temp = 105 * coeff + 12 * ui - 20 * coeff;
|
||||
tmax = (temp / ui_x8) - 1;
|
||||
timing->hs_trail = linear_inter(tmax, tmin, pcnt3, 0, false);
|
||||
|
||||
temp = 50 * coeff + ((hb_en << 2) - 8) * ui;
|
||||
timing->hs_rqst = S_DIV_ROUND_UP(temp, ui_x8);
|
||||
|
||||
tmin = DIV_ROUND_UP(100 * coeff, ui_x8) - 1;
|
||||
tmax = 255;
|
||||
timing->hs_exit = linear_inter(tmax, tmin, pcnt2, 0, false);
|
||||
|
||||
temp = 50 * coeff + ((hb_en_ckln << 2) - 8) * ui;
|
||||
timing->hs_rqst_ckln = S_DIV_ROUND_UP(temp, ui_x8);
|
||||
|
||||
temp = 60 * coeff + 52 * ui - 43 * ui;
|
||||
tmin = DIV_ROUND_UP(temp, ui_x8) - 1;
|
||||
tmax = 63;
|
||||
timing->shared_timings.clk_post =
|
||||
linear_inter(tmax, tmin, pcnt2, 0, false);
|
||||
|
||||
temp = 8 * ui + (timing->clk_prepare << 3) * ui;
|
||||
temp += (((timing->clk_zero + 3) << 3) + 11) * ui;
|
||||
temp += hb_en_ckln ? (((timing->hs_rqst_ckln << 3) + 4) * ui) :
|
||||
(((timing->hs_rqst_ckln << 3) + 8) * ui);
|
||||
tmin = S_DIV_ROUND_UP(temp, ui_x8) - 1;
|
||||
tmax = 63;
|
||||
if (tmin > tmax) {
|
||||
temp = linear_inter(tmax << 1, tmin, pcnt2, 0, false);
|
||||
timing->shared_timings.clk_pre = temp >> 1;
|
||||
timing->shared_timings.clk_pre_inc_by_2 = 1;
|
||||
} else {
|
||||
timing->shared_timings.clk_pre =
|
||||
linear_inter(tmax, tmin, pcnt2, 0, false);
|
||||
timing->shared_timings.clk_pre_inc_by_2 = 0;
|
||||
}
|
||||
|
||||
timing->ta_go = 3;
|
||||
timing->ta_sure = 0;
|
||||
timing->ta_get = 4;
|
||||
|
||||
DBG("%d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d",
|
||||
timing->shared_timings.clk_pre, timing->shared_timings.clk_post,
|
||||
timing->shared_timings.clk_pre_inc_by_2, timing->clk_zero,
|
||||
timing->clk_trail, timing->clk_prepare, timing->hs_exit,
|
||||
timing->hs_zero, timing->hs_prepare, timing->hs_trail,
|
||||
timing->hs_rqst, timing->hs_rqst_ckln, timing->hs_halfbyte_en,
|
||||
timing->hs_halfbyte_en_ckln, timing->hs_prep_dly,
|
||||
timing->hs_prep_dly_ckln);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void msm_dsi_phy_set_src_pll(struct msm_dsi_phy *phy, int pll_id, u32 reg,
|
||||
u32 bit_mask)
|
||||
{
|
||||
|
@ -101,6 +101,8 @@ int msm_dsi_dphy_timing_calc(struct msm_dsi_dphy_timing *timing,
|
||||
struct msm_dsi_phy_clk_request *clk_req);
|
||||
int msm_dsi_dphy_timing_calc_v2(struct msm_dsi_dphy_timing *timing,
|
||||
struct msm_dsi_phy_clk_request *clk_req);
|
||||
int msm_dsi_dphy_timing_calc_v3(struct msm_dsi_dphy_timing *timing,
|
||||
struct msm_dsi_phy_clk_request *clk_req);
|
||||
void msm_dsi_phy_set_src_pll(struct msm_dsi_phy *phy, int pll_id, u32 reg,
|
||||
u32 bit_mask);
|
||||
int msm_dsi_phy_init_common(struct msm_dsi_phy *phy);
|
||||
|
@ -79,34 +79,6 @@ static void dsi_phy_hw_v3_0_lane_settings(struct msm_dsi_phy *phy)
|
||||
dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_TX_DCTRL(3), 0x04);
|
||||
}
|
||||
|
||||
static int msm_dsi_dphy_timing_calc_v3(struct msm_dsi_dphy_timing *timing,
|
||||
struct msm_dsi_phy_clk_request *clk_req)
|
||||
{
|
||||
/*
|
||||
* TODO: These params need to be computed, they're currently hardcoded
|
||||
* for a 1440x2560@60Hz panel with a byteclk of 100.618 Mhz, and a
|
||||
* default escape clock of 19.2 Mhz.
|
||||
*/
|
||||
|
||||
timing->hs_halfbyte_en = 0;
|
||||
timing->clk_zero = 0x1c;
|
||||
timing->clk_prepare = 0x07;
|
||||
timing->clk_trail = 0x07;
|
||||
timing->hs_exit = 0x23;
|
||||
timing->hs_zero = 0x21;
|
||||
timing->hs_prepare = 0x07;
|
||||
timing->hs_trail = 0x07;
|
||||
timing->hs_rqst = 0x05;
|
||||
timing->ta_sure = 0x00;
|
||||
timing->ta_go = 0x03;
|
||||
timing->ta_get = 0x04;
|
||||
|
||||
timing->shared_timings.clk_pre = 0x2d;
|
||||
timing->shared_timings.clk_post = 0x0d;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dsi_10nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
|
||||
struct msm_dsi_phy_clk_request *clk_req)
|
||||
{
|
||||
|
@ -183,7 +183,8 @@ static struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
|
||||
hsub = drm_format_horz_chroma_subsampling(mode_cmd->pixel_format);
|
||||
vsub = drm_format_vert_chroma_subsampling(mode_cmd->pixel_format);
|
||||
|
||||
format = kms->funcs->get_format(kms, mode_cmd->pixel_format);
|
||||
format = kms->funcs->get_format(kms, mode_cmd->pixel_format,
|
||||
mode_cmd->modifier[0]);
|
||||
if (!format) {
|
||||
dev_err(dev->dev, "unsupported pixel format: %4.4s\n",
|
||||
(char *)&mode_cmd->pixel_format);
|
||||
|
@ -92,8 +92,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
|
||||
|
||||
if (IS_ERR(fb)) {
|
||||
dev_err(dev->dev, "failed to allocate fb\n");
|
||||
ret = PTR_ERR(fb);
|
||||
goto fail;
|
||||
return PTR_ERR(fb);
|
||||
}
|
||||
|
||||
bo = msm_framebuffer_bo(fb, 0);
|
||||
@ -151,13 +150,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
|
||||
|
||||
fail_unlock:
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
fail:
|
||||
|
||||
if (ret) {
|
||||
if (fb)
|
||||
drm_framebuffer_remove(fb);
|
||||
}
|
||||
|
||||
drm_framebuffer_remove(fb);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -132,17 +132,19 @@ static void put_pages(struct drm_gem_object *obj)
|
||||
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
||||
|
||||
if (msm_obj->pages) {
|
||||
/* For non-cached buffers, ensure the new pages are clean
|
||||
* because display controller, GPU, etc. are not coherent:
|
||||
*/
|
||||
if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
|
||||
dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl,
|
||||
msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
|
||||
if (msm_obj->sgt) {
|
||||
/* For non-cached buffers, ensure the new
|
||||
* pages are clean because display controller,
|
||||
* GPU, etc. are not coherent:
|
||||
*/
|
||||
if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
|
||||
dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl,
|
||||
msm_obj->sgt->nents,
|
||||
DMA_BIDIRECTIONAL);
|
||||
|
||||
if (msm_obj->sgt)
|
||||
sg_free_table(msm_obj->sgt);
|
||||
|
||||
kfree(msm_obj->sgt);
|
||||
kfree(msm_obj->sgt);
|
||||
}
|
||||
|
||||
if (use_pages(obj))
|
||||
drm_gem_put_pages(obj, msm_obj->pages, true, false);
|
||||
|
@ -48,8 +48,11 @@ struct msm_kms_funcs {
|
||||
/* functions to wait for atomic commit completed on each CRTC */
|
||||
void (*wait_for_crtc_commit_done)(struct msm_kms *kms,
|
||||
struct drm_crtc *crtc);
|
||||
/* get msm_format w/ optional format modifiers from drm_mode_fb_cmd2 */
|
||||
const struct msm_format *(*get_format)(struct msm_kms *kms,
|
||||
const uint32_t format,
|
||||
const uint64_t modifiers);
|
||||
/* misc: */
|
||||
const struct msm_format *(*get_format)(struct msm_kms *kms, uint32_t format);
|
||||
long (*round_pixclk)(struct msm_kms *kms, unsigned long rate,
|
||||
struct drm_encoder *encoder);
|
||||
int (*set_split_display)(struct msm_kms *kms,
|
||||
|
@ -179,10 +179,9 @@ qxl_push_command_ring_release(struct qxl_device *qdev, struct qxl_release *relea
|
||||
uint32_t type, bool interruptible)
|
||||
{
|
||||
struct qxl_command cmd;
|
||||
struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head);
|
||||
|
||||
cmd.type = type;
|
||||
cmd.data = qxl_bo_physical_address(qdev, to_qxl_bo(entry->tv.bo), release->release_offset);
|
||||
cmd.data = qxl_bo_physical_address(qdev, release->release_bo, release->release_offset);
|
||||
|
||||
return qxl_ring_push(qdev->command_ring, &cmd, interruptible);
|
||||
}
|
||||
@ -192,10 +191,9 @@ qxl_push_cursor_ring_release(struct qxl_device *qdev, struct qxl_release *releas
|
||||
uint32_t type, bool interruptible)
|
||||
{
|
||||
struct qxl_command cmd;
|
||||
struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head);
|
||||
|
||||
cmd.type = type;
|
||||
cmd.data = qxl_bo_physical_address(qdev, to_qxl_bo(entry->tv.bo), release->release_offset);
|
||||
cmd.data = qxl_bo_physical_address(qdev, release->release_bo, release->release_offset);
|
||||
|
||||
return qxl_ring_push(qdev->cursor_ring, &cmd, interruptible);
|
||||
}
|
||||
|
@ -167,6 +167,7 @@ struct qxl_release {
|
||||
|
||||
int id;
|
||||
int type;
|
||||
struct qxl_bo *release_bo;
|
||||
uint32_t release_offset;
|
||||
uint32_t surface_release_id;
|
||||
struct ww_acquire_ctx ticket;
|
||||
|
@ -182,9 +182,9 @@ static int qxl_process_single_command(struct qxl_device *qdev,
|
||||
goto out_free_reloc;
|
||||
|
||||
/* TODO copy slow path code from i915 */
|
||||
fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_SIZE));
|
||||
fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_MASK));
|
||||
unwritten = __copy_from_user_inatomic_nocache
|
||||
(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE),
|
||||
(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_MASK),
|
||||
u64_to_user_ptr(cmd->command), cmd->command_size);
|
||||
|
||||
{
|
||||
|
@ -173,6 +173,7 @@ qxl_release_free_list(struct qxl_release *release)
|
||||
list_del(&entry->tv.head);
|
||||
kfree(entry);
|
||||
}
|
||||
release->release_bo = NULL;
|
||||
}
|
||||
|
||||
void
|
||||
@ -296,7 +297,6 @@ int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
|
||||
{
|
||||
if (surface_cmd_type == QXL_SURFACE_CMD_DESTROY && create_rel) {
|
||||
int idr_ret;
|
||||
struct qxl_bo_list *entry = list_first_entry(&create_rel->bos, struct qxl_bo_list, tv.head);
|
||||
struct qxl_bo *bo;
|
||||
union qxl_release_info *info;
|
||||
|
||||
@ -304,8 +304,9 @@ int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
|
||||
idr_ret = qxl_release_alloc(qdev, QXL_RELEASE_SURFACE_CMD, release);
|
||||
if (idr_ret < 0)
|
||||
return idr_ret;
|
||||
bo = to_qxl_bo(entry->tv.bo);
|
||||
bo = create_rel->release_bo;
|
||||
|
||||
(*release)->release_bo = bo;
|
||||
(*release)->release_offset = create_rel->release_offset + 64;
|
||||
|
||||
qxl_release_list_add(*release, bo);
|
||||
@ -365,6 +366,7 @@ int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
|
||||
|
||||
bo = qxl_bo_ref(qdev->current_release_bo[cur_idx]);
|
||||
|
||||
(*release)->release_bo = bo;
|
||||
(*release)->release_offset = qdev->current_release_bo_offset[cur_idx] * release_size_per_bo[cur_idx];
|
||||
qdev->current_release_bo_offset[cur_idx]++;
|
||||
|
||||
@ -408,13 +410,12 @@ union qxl_release_info *qxl_release_map(struct qxl_device *qdev,
|
||||
{
|
||||
void *ptr;
|
||||
union qxl_release_info *info;
|
||||
struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head);
|
||||
struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
|
||||
struct qxl_bo *bo = release->release_bo;
|
||||
|
||||
ptr = qxl_bo_kmap_atomic_page(qdev, bo, release->release_offset & PAGE_SIZE);
|
||||
ptr = qxl_bo_kmap_atomic_page(qdev, bo, release->release_offset & PAGE_MASK);
|
||||
if (!ptr)
|
||||
return NULL;
|
||||
info = ptr + (release->release_offset & ~PAGE_SIZE);
|
||||
info = ptr + (release->release_offset & ~PAGE_MASK);
|
||||
return info;
|
||||
}
|
||||
|
||||
@ -422,11 +423,10 @@ void qxl_release_unmap(struct qxl_device *qdev,
|
||||
struct qxl_release *release,
|
||||
union qxl_release_info *info)
|
||||
{
|
||||
struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head);
|
||||
struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
|
||||
struct qxl_bo *bo = release->release_bo;
|
||||
void *ptr;
|
||||
|
||||
ptr = ((void *)info) - (release->release_offset & ~PAGE_SIZE);
|
||||
ptr = ((void *)info) - (release->release_offset & ~PAGE_MASK);
|
||||
qxl_bo_kunmap_atomic_page(qdev, bo, ptr);
|
||||
}
|
||||
|
||||
|
@ -94,64 +94,9 @@ static void sun4i_lvds_encoder_disable(struct drm_encoder *encoder)
|
||||
}
|
||||
}
|
||||
|
||||
static enum drm_mode_status sun4i_lvds_encoder_mode_valid(struct drm_encoder *crtc,
|
||||
const struct drm_display_mode *mode)
|
||||
{
|
||||
struct sun4i_lvds *lvds = drm_encoder_to_sun4i_lvds(crtc);
|
||||
struct sun4i_tcon *tcon = lvds->tcon;
|
||||
u32 hsync = mode->hsync_end - mode->hsync_start;
|
||||
u32 vsync = mode->vsync_end - mode->vsync_start;
|
||||
unsigned long rate = mode->clock * 1000;
|
||||
long rounded_rate;
|
||||
|
||||
DRM_DEBUG_DRIVER("Validating modes...\n");
|
||||
|
||||
if (hsync < 1)
|
||||
return MODE_HSYNC_NARROW;
|
||||
|
||||
if (hsync > 0x3ff)
|
||||
return MODE_HSYNC_WIDE;
|
||||
|
||||
if ((mode->hdisplay < 1) || (mode->htotal < 1))
|
||||
return MODE_H_ILLEGAL;
|
||||
|
||||
if ((mode->hdisplay > 0x7ff) || (mode->htotal > 0xfff))
|
||||
return MODE_BAD_HVALUE;
|
||||
|
||||
DRM_DEBUG_DRIVER("Horizontal parameters OK\n");
|
||||
|
||||
if (vsync < 1)
|
||||
return MODE_VSYNC_NARROW;
|
||||
|
||||
if (vsync > 0x3ff)
|
||||
return MODE_VSYNC_WIDE;
|
||||
|
||||
if ((mode->vdisplay < 1) || (mode->vtotal < 1))
|
||||
return MODE_V_ILLEGAL;
|
||||
|
||||
if ((mode->vdisplay > 0x7ff) || (mode->vtotal > 0xfff))
|
||||
return MODE_BAD_VVALUE;
|
||||
|
||||
DRM_DEBUG_DRIVER("Vertical parameters OK\n");
|
||||
|
||||
tcon->dclk_min_div = 7;
|
||||
tcon->dclk_max_div = 7;
|
||||
rounded_rate = clk_round_rate(tcon->dclk, rate);
|
||||
if (rounded_rate < rate)
|
||||
return MODE_CLOCK_LOW;
|
||||
|
||||
if (rounded_rate > rate)
|
||||
return MODE_CLOCK_HIGH;
|
||||
|
||||
DRM_DEBUG_DRIVER("Clock rate OK\n");
|
||||
|
||||
return MODE_OK;
|
||||
}
|
||||
|
||||
static const struct drm_encoder_helper_funcs sun4i_lvds_enc_helper_funcs = {
|
||||
.disable = sun4i_lvds_encoder_disable,
|
||||
.enable = sun4i_lvds_encoder_enable,
|
||||
.mode_valid = sun4i_lvds_encoder_mode_valid,
|
||||
};
|
||||
|
||||
static const struct drm_encoder_funcs sun4i_lvds_enc_funcs = {
|
||||
|
@ -293,7 +293,7 @@ retry:
|
||||
ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
|
||||
if (ret == -ENOSPC) {
|
||||
spin_unlock(&vgdev->ctrlq.qlock);
|
||||
wait_event(vgdev->ctrlq.ack_queue, vq->num_free);
|
||||
wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= outcnt + incnt);
|
||||
spin_lock(&vgdev->ctrlq.qlock);
|
||||
goto retry;
|
||||
} else {
|
||||
@ -368,7 +368,7 @@ retry:
|
||||
ret = virtqueue_add_sgs(vq, sgs, outcnt, 0, vbuf, GFP_ATOMIC);
|
||||
if (ret == -ENOSPC) {
|
||||
spin_unlock(&vgdev->cursorq.qlock);
|
||||
wait_event(vgdev->cursorq.ack_queue, vq->num_free);
|
||||
wait_event(vgdev->cursorq.ack_queue, vq->num_free >= outcnt);
|
||||
spin_lock(&vgdev->cursorq.qlock);
|
||||
goto retry;
|
||||
} else {
|
||||
|
Loading…
Reference in New Issue
Block a user