forked from Minki/linux
Merge branch 'drm-fixes-4.15' of git://people.freedesktop.org/~agd5f/linux into drm-fixes
Nothing too major here. A couple more ttm fixes for huge page and a kiq fix for amdgpu, along with some DC fixes. * 'drm-fixes-4.15' of git://people.freedesktop.org/~agd5f/linux: drm/amd/display: Fix rehook MST display not light back on drm/amd/display: fix missing pixel clock adjustment for dongle drm/amd/display: set chroma taps to 1 when not scaling drm/amd/display: add pipe locking before front end programing drm/amdgpu: fix MAP_QUEUES paramter drm/ttm: max_cpages is in unit of native page drm/ttm: fix incorrect calculate on shrink_pages
This commit is contained in:
commit
fa5cf90160
@ -2467,7 +2467,7 @@ static int gfx_v9_0_kiq_kcq_enable(struct amdgpu_device *adev)
|
||||
PACKET3_MAP_QUEUES_PIPE(ring->pipe) |
|
||||
PACKET3_MAP_QUEUES_ME((ring->me == 1 ? 0 : 1)) |
|
||||
PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */
|
||||
PACKET3_MAP_QUEUES_ALLOC_FORMAT(1) | /* alloc format: all_on_one_pipe */
|
||||
PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */
|
||||
PACKET3_MAP_QUEUES_ENGINE_SEL(0) | /* engine_sel: compute */
|
||||
PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */
|
||||
amdgpu_ring_write(kiq_ring, PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index));
|
||||
|
@ -2336,7 +2336,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
|
||||
const struct dm_connector_state *dm_state)
|
||||
{
|
||||
struct drm_display_mode *preferred_mode = NULL;
|
||||
const struct drm_connector *drm_connector;
|
||||
struct drm_connector *drm_connector;
|
||||
struct dc_stream_state *stream = NULL;
|
||||
struct drm_display_mode mode = *drm_mode;
|
||||
bool native_mode_found = false;
|
||||
@ -2355,11 +2355,13 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
|
||||
|
||||
if (!aconnector->dc_sink) {
|
||||
/*
|
||||
* Exclude MST from creating fake_sink
|
||||
* TODO: need to enable MST into fake_sink feature
|
||||
* Create dc_sink when necessary to MST
|
||||
* Don't apply fake_sink to MST
|
||||
*/
|
||||
if (aconnector->mst_port)
|
||||
goto stream_create_fail;
|
||||
if (aconnector->mst_port) {
|
||||
dm_dp_mst_dc_sink_create(drm_connector);
|
||||
goto mst_dc_sink_create_done;
|
||||
}
|
||||
|
||||
if (create_fake_sink(aconnector))
|
||||
goto stream_create_fail;
|
||||
@ -2410,6 +2412,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
|
||||
stream_create_fail:
|
||||
dm_state_null:
|
||||
drm_connector_null:
|
||||
mst_dc_sink_create_done:
|
||||
return stream;
|
||||
}
|
||||
|
||||
|
@ -189,6 +189,8 @@ struct amdgpu_dm_connector {
|
||||
struct mutex hpd_lock;
|
||||
|
||||
bool fake_enable;
|
||||
|
||||
bool mst_connected;
|
||||
};
|
||||
|
||||
#define to_amdgpu_dm_connector(x) container_of(x, struct amdgpu_dm_connector, base)
|
||||
|
@ -185,6 +185,42 @@ static int dm_connector_update_modes(struct drm_connector *connector,
|
||||
return ret;
|
||||
}
|
||||
|
||||
void dm_dp_mst_dc_sink_create(struct drm_connector *connector)
|
||||
{
|
||||
struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
|
||||
struct edid *edid;
|
||||
struct dc_sink *dc_sink;
|
||||
struct dc_sink_init_data init_params = {
|
||||
.link = aconnector->dc_link,
|
||||
.sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST };
|
||||
|
||||
edid = drm_dp_mst_get_edid(connector, &aconnector->mst_port->mst_mgr, aconnector->port);
|
||||
|
||||
if (!edid) {
|
||||
drm_mode_connector_update_edid_property(
|
||||
&aconnector->base,
|
||||
NULL);
|
||||
return;
|
||||
}
|
||||
|
||||
aconnector->edid = edid;
|
||||
|
||||
dc_sink = dc_link_add_remote_sink(
|
||||
aconnector->dc_link,
|
||||
(uint8_t *)aconnector->edid,
|
||||
(aconnector->edid->extensions + 1) * EDID_LENGTH,
|
||||
&init_params);
|
||||
|
||||
dc_sink->priv = aconnector;
|
||||
aconnector->dc_sink = dc_sink;
|
||||
|
||||
amdgpu_dm_add_sink_to_freesync_module(
|
||||
connector, aconnector->edid);
|
||||
|
||||
drm_mode_connector_update_edid_property(
|
||||
&aconnector->base, aconnector->edid);
|
||||
}
|
||||
|
||||
static int dm_dp_mst_get_modes(struct drm_connector *connector)
|
||||
{
|
||||
struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
|
||||
@ -311,6 +347,7 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
|
||||
drm_mode_connector_set_path_property(connector, pathprop);
|
||||
|
||||
drm_connector_list_iter_end(&conn_iter);
|
||||
aconnector->mst_connected = true;
|
||||
return &aconnector->base;
|
||||
}
|
||||
}
|
||||
@ -363,6 +400,8 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
|
||||
*/
|
||||
amdgpu_dm_connector_funcs_reset(connector);
|
||||
|
||||
aconnector->mst_connected = true;
|
||||
|
||||
DRM_INFO("DM_MST: added connector: %p [id: %d] [master: %p]\n",
|
||||
aconnector, connector->base.id, aconnector->mst_port);
|
||||
|
||||
@ -394,6 +433,8 @@ static void dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
|
||||
drm_mode_connector_update_edid_property(
|
||||
&aconnector->base,
|
||||
NULL);
|
||||
|
||||
aconnector->mst_connected = false;
|
||||
}
|
||||
|
||||
static void dm_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
|
||||
@ -404,10 +445,18 @@ static void dm_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
|
||||
drm_kms_helper_hotplug_event(dev);
|
||||
}
|
||||
|
||||
static void dm_dp_mst_link_status_reset(struct drm_connector *connector)
|
||||
{
|
||||
mutex_lock(&connector->dev->mode_config.mutex);
|
||||
drm_mode_connector_set_link_status_property(connector, DRM_MODE_LINK_STATUS_BAD);
|
||||
mutex_unlock(&connector->dev->mode_config.mutex);
|
||||
}
|
||||
|
||||
static void dm_dp_mst_register_connector(struct drm_connector *connector)
|
||||
{
|
||||
struct drm_device *dev = connector->dev;
|
||||
struct amdgpu_device *adev = dev->dev_private;
|
||||
struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
|
||||
|
||||
if (adev->mode_info.rfbdev)
|
||||
drm_fb_helper_add_one_connector(&adev->mode_info.rfbdev->helper, connector);
|
||||
@ -416,6 +465,8 @@ static void dm_dp_mst_register_connector(struct drm_connector *connector)
|
||||
|
||||
drm_connector_register(connector);
|
||||
|
||||
if (aconnector->mst_connected)
|
||||
dm_dp_mst_link_status_reset(connector);
|
||||
}
|
||||
|
||||
static const struct drm_dp_mst_topology_cbs dm_mst_cbs = {
|
||||
|
@ -31,5 +31,6 @@ struct amdgpu_dm_connector;
|
||||
|
||||
void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
|
||||
struct amdgpu_dm_connector *aconnector);
|
||||
void dm_dp_mst_dc_sink_create(struct drm_connector *connector);
|
||||
|
||||
#endif
|
||||
|
@ -900,6 +900,15 @@ bool dcn_validate_bandwidth(
|
||||
v->override_vta_ps[input_idx] = pipe->plane_res.scl_data.taps.v_taps;
|
||||
v->override_hta_pschroma[input_idx] = pipe->plane_res.scl_data.taps.h_taps_c;
|
||||
v->override_vta_pschroma[input_idx] = pipe->plane_res.scl_data.taps.v_taps_c;
|
||||
/*
|
||||
* Spreadsheet doesn't handle taps_c is one properly,
|
||||
* need to force Chroma to always be scaled to pass
|
||||
* bandwidth validation.
|
||||
*/
|
||||
if (v->override_hta_pschroma[input_idx] == 1)
|
||||
v->override_hta_pschroma[input_idx] = 2;
|
||||
if (v->override_vta_pschroma[input_idx] == 1)
|
||||
v->override_vta_pschroma[input_idx] = 2;
|
||||
v->source_scan[input_idx] = (pipe->plane_state->rotation % 2) ? dcn_bw_vert : dcn_bw_hor;
|
||||
}
|
||||
if (v->is_line_buffer_bpp_fixed == dcn_bw_yes)
|
||||
|
@ -1801,7 +1801,7 @@ static void disable_link(struct dc_link *link, enum signal_type signal)
|
||||
link->link_enc->funcs->disable_output(link->link_enc, signal, link);
|
||||
}
|
||||
|
||||
bool dp_active_dongle_validate_timing(
|
||||
static bool dp_active_dongle_validate_timing(
|
||||
const struct dc_crtc_timing *timing,
|
||||
const struct dc_dongle_caps *dongle_caps)
|
||||
{
|
||||
@ -1833,6 +1833,8 @@ bool dp_active_dongle_validate_timing(
|
||||
/* Check Color Depth and Pixel Clock */
|
||||
if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
|
||||
required_pix_clk /= 2;
|
||||
else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422)
|
||||
required_pix_clk = required_pix_clk * 2 / 3;
|
||||
|
||||
switch (timing->display_color_depth) {
|
||||
case COLOR_DEPTH_666:
|
||||
|
@ -2866,16 +2866,19 @@ static void dce110_apply_ctx_for_surface(
|
||||
int num_planes,
|
||||
struct dc_state *context)
|
||||
{
|
||||
int i, be_idx;
|
||||
int i;
|
||||
|
||||
if (num_planes == 0)
|
||||
return;
|
||||
|
||||
be_idx = -1;
|
||||
for (i = 0; i < dc->res_pool->pipe_count; i++) {
|
||||
if (stream == context->res_ctx.pipe_ctx[i].stream) {
|
||||
be_idx = context->res_ctx.pipe_ctx[i].stream_res.tg->inst;
|
||||
break;
|
||||
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
|
||||
struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
|
||||
|
||||
if (stream == pipe_ctx->stream) {
|
||||
if (!pipe_ctx->top_pipe &&
|
||||
(pipe_ctx->plane_state || old_pipe_ctx->plane_state))
|
||||
dc->hwss.pipe_control_lock(dc, pipe_ctx, true);
|
||||
}
|
||||
}
|
||||
|
||||
@ -2895,9 +2898,22 @@ static void dce110_apply_ctx_for_surface(
|
||||
context->stream_count);
|
||||
|
||||
dce110_program_front_end_for_pipe(dc, pipe_ctx);
|
||||
|
||||
dc->hwss.update_plane_addr(dc, pipe_ctx);
|
||||
|
||||
program_surface_visibility(dc, pipe_ctx);
|
||||
|
||||
}
|
||||
|
||||
for (i = 0; i < dc->res_pool->pipe_count; i++) {
|
||||
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
|
||||
struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
|
||||
|
||||
if ((stream == pipe_ctx->stream) &&
|
||||
(!pipe_ctx->top_pipe) &&
|
||||
(pipe_ctx->plane_state || old_pipe_ctx->plane_state))
|
||||
dc->hwss.pipe_control_lock(dc, pipe_ctx, false);
|
||||
}
|
||||
}
|
||||
|
||||
static void dce110_power_down_fe(struct dc *dc, int fe_idx)
|
||||
|
@ -159,11 +159,10 @@ bool dpp_get_optimal_number_of_taps(
|
||||
scl_data->taps.h_taps = 1;
|
||||
if (IDENTITY_RATIO(scl_data->ratios.vert))
|
||||
scl_data->taps.v_taps = 1;
|
||||
/*
|
||||
* Spreadsheet doesn't handle taps_c is one properly,
|
||||
* need to force Chroma to always be scaled to pass
|
||||
* bandwidth validation.
|
||||
*/
|
||||
if (IDENTITY_RATIO(scl_data->ratios.horz_c))
|
||||
scl_data->taps.h_taps_c = 1;
|
||||
if (IDENTITY_RATIO(scl_data->ratios.vert_c))
|
||||
scl_data->taps.v_taps_c = 1;
|
||||
}
|
||||
|
||||
return true;
|
||||
|
@ -455,6 +455,7 @@ ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
|
||||
freed += (nr_free_pool - shrink_pages) << pool->order;
|
||||
if (freed >= sc->nr_to_scan)
|
||||
break;
|
||||
shrink_pages <<= pool->order;
|
||||
}
|
||||
mutex_unlock(&lock);
|
||||
return freed;
|
||||
@ -543,7 +544,7 @@ static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
|
||||
int r = 0;
|
||||
unsigned i, j, cpages;
|
||||
unsigned npages = 1 << order;
|
||||
unsigned max_cpages = min(count, (unsigned)NUM_PAGES_TO_ALLOC);
|
||||
unsigned max_cpages = min(count << order, (unsigned)NUM_PAGES_TO_ALLOC);
|
||||
|
||||
/* allocate array for page caching change */
|
||||
caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
|
||||
|
Loading…
Reference in New Issue
Block a user