forked from Minki/linux
Merge tag 'drm-intel-next-2021-03-16' of git://anongit.freedesktop.org/drm/drm-intel into drm-next
Highlights: - Alderlake S enabling, via topic branch (Aditya, Anusha, Caz, José, Lucas, Matt, Tejas) - Refactor display code to shrink intel_display.c etc. (Dave) - Support more gen 9 and Tigerlake PCH combinations (Lyude, Tejas) - Add eDP MSO support (Jani) Display: - Refactor to support multiple PSR instances (Gwan-gyeong) - Link training debug logging updates (Sean) - Updates to eDP fixed mode handling (Jani) - Disable PSR2 on JSL/EHL (Edmund) - Support DDR5 and LPDDR5 for bandwidth computation (Clint, José) - Update VBT DP max link rate table (Shawn) - Disable the QSES check for HDCP2.2 over MST (Juston) - PSR updates, refactoring, selective fetch (José, Gwan-gyeong) - Display init sequence refactoring (Lucas) - Limit LSPCON to gen 9 and 10 platforms (Ankit) - Fix DDI lane polarity per VBT info (Uma) - Fix HDMI vswing programming location in mode set (Ville) - Various display improvements and refactorings and cleanups (Ville) - Clean up DDI clock routing and readout (Ville) - Workaround async flip + VT-d corruption on HSW/BDW (Ville) - SAGV watermark fixes and cleanups (Ville) - Silence pipe tracepoint WARNs (Ville) Other: - Remove require_force_probe protection from RKL, may need to be revisited (Tejas) - Detect loss of MMIO access (Matt) - GVT display improvements - drm/i915: Disable runtime power management during shutdown (Imre) - Perf/OA updates (Umesh) - Remove references to struct drm_device.pdev, via topic branch (Thomas) - Backmerge (Jani) Signed-off-by: Dave Airlie <airlied@redhat.com> From: Jani Nikula <jani.nikula@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/87v99rnk1g.fsf@intel.com
This commit is contained in:
commit
06debd6e1b
@ -551,6 +551,7 @@ static const struct pci_device_id intel_early_ids[] __initconst = {
|
||||
INTEL_EHL_IDS(&gen11_early_ops),
|
||||
INTEL_TGL_12_IDS(&gen11_early_ops),
|
||||
INTEL_RKL_IDS(&gen11_early_ops),
|
||||
INTEL_ADLS_IDS(&gen11_early_ops),
|
||||
};
|
||||
|
||||
struct resource intel_graphics_stolen_res __ro_after_init = DEFINE_RES_MEM(0, 0);
|
||||
|
@ -223,7 +223,9 @@ i915-y += \
|
||||
display/intel_sprite.o \
|
||||
display/intel_tc.o \
|
||||
display/intel_vga.o \
|
||||
display/i9xx_plane.o
|
||||
display/i9xx_plane.o \
|
||||
display/skl_scaler.o \
|
||||
display/skl_universal_plane.o
|
||||
i915-$(CONFIG_ACPI) += \
|
||||
display/intel_acpi.o \
|
||||
display/intel_opregion.o
|
||||
@ -241,6 +243,7 @@ i915-y += \
|
||||
display/icl_dsi.o \
|
||||
display/intel_crt.o \
|
||||
display/intel_ddi.o \
|
||||
display/intel_ddi_buf_trans.o \
|
||||
display/intel_dp.o \
|
||||
display/intel_dp_aux.o \
|
||||
display/intel_dp_aux_backlight.o \
|
||||
|
@ -770,10 +770,6 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
|
||||
int num_formats;
|
||||
int ret, zpos;
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 9)
|
||||
return skl_universal_plane_create(dev_priv, pipe,
|
||||
PLANE_PRIMARY);
|
||||
|
||||
plane = intel_plane_alloc();
|
||||
if (IS_ERR(plane))
|
||||
return plane;
|
||||
@ -924,3 +920,122 @@ fail:
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
static int i9xx_format_to_fourcc(int format)
|
||||
{
|
||||
switch (format) {
|
||||
case DISPPLANE_8BPP:
|
||||
return DRM_FORMAT_C8;
|
||||
case DISPPLANE_BGRA555:
|
||||
return DRM_FORMAT_ARGB1555;
|
||||
case DISPPLANE_BGRX555:
|
||||
return DRM_FORMAT_XRGB1555;
|
||||
case DISPPLANE_BGRX565:
|
||||
return DRM_FORMAT_RGB565;
|
||||
default:
|
||||
case DISPPLANE_BGRX888:
|
||||
return DRM_FORMAT_XRGB8888;
|
||||
case DISPPLANE_RGBX888:
|
||||
return DRM_FORMAT_XBGR8888;
|
||||
case DISPPLANE_BGRA888:
|
||||
return DRM_FORMAT_ARGB8888;
|
||||
case DISPPLANE_RGBA888:
|
||||
return DRM_FORMAT_ABGR8888;
|
||||
case DISPPLANE_BGRX101010:
|
||||
return DRM_FORMAT_XRGB2101010;
|
||||
case DISPPLANE_RGBX101010:
|
||||
return DRM_FORMAT_XBGR2101010;
|
||||
case DISPPLANE_BGRA101010:
|
||||
return DRM_FORMAT_ARGB2101010;
|
||||
case DISPPLANE_RGBA101010:
|
||||
return DRM_FORMAT_ABGR2101010;
|
||||
case DISPPLANE_RGBX161616:
|
||||
return DRM_FORMAT_XBGR16161616F;
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
i9xx_get_initial_plane_config(struct intel_crtc *crtc,
|
||||
struct intel_initial_plane_config *plane_config)
|
||||
{
|
||||
struct drm_device *dev = crtc->base.dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_plane *plane = to_intel_plane(crtc->base.primary);
|
||||
enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
|
||||
enum pipe pipe;
|
||||
u32 val, base, offset;
|
||||
int fourcc, pixel_format;
|
||||
unsigned int aligned_height;
|
||||
struct drm_framebuffer *fb;
|
||||
struct intel_framebuffer *intel_fb;
|
||||
|
||||
if (!plane->get_hw_state(plane, &pipe))
|
||||
return;
|
||||
|
||||
drm_WARN_ON(dev, pipe != crtc->pipe);
|
||||
|
||||
intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
|
||||
if (!intel_fb) {
|
||||
drm_dbg_kms(&dev_priv->drm, "failed to alloc fb\n");
|
||||
return;
|
||||
}
|
||||
|
||||
fb = &intel_fb->base;
|
||||
|
||||
fb->dev = dev;
|
||||
|
||||
val = intel_de_read(dev_priv, DSPCNTR(i9xx_plane));
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 4) {
|
||||
if (val & DISPPLANE_TILED) {
|
||||
plane_config->tiling = I915_TILING_X;
|
||||
fb->modifier = I915_FORMAT_MOD_X_TILED;
|
||||
}
|
||||
|
||||
if (val & DISPPLANE_ROTATE_180)
|
||||
plane_config->rotation = DRM_MODE_ROTATE_180;
|
||||
}
|
||||
|
||||
if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B &&
|
||||
val & DISPPLANE_MIRROR)
|
||||
plane_config->rotation |= DRM_MODE_REFLECT_X;
|
||||
|
||||
pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
|
||||
fourcc = i9xx_format_to_fourcc(pixel_format);
|
||||
fb->format = drm_format_info(fourcc);
|
||||
|
||||
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
|
||||
offset = intel_de_read(dev_priv, DSPOFFSET(i9xx_plane));
|
||||
base = intel_de_read(dev_priv, DSPSURF(i9xx_plane)) & 0xfffff000;
|
||||
} else if (INTEL_GEN(dev_priv) >= 4) {
|
||||
if (plane_config->tiling)
|
||||
offset = intel_de_read(dev_priv,
|
||||
DSPTILEOFF(i9xx_plane));
|
||||
else
|
||||
offset = intel_de_read(dev_priv,
|
||||
DSPLINOFF(i9xx_plane));
|
||||
base = intel_de_read(dev_priv, DSPSURF(i9xx_plane)) & 0xfffff000;
|
||||
} else {
|
||||
base = intel_de_read(dev_priv, DSPADDR(i9xx_plane));
|
||||
}
|
||||
plane_config->base = base;
|
||||
|
||||
val = intel_de_read(dev_priv, PIPESRC(pipe));
|
||||
fb->width = ((val >> 16) & 0xfff) + 1;
|
||||
fb->height = ((val >> 0) & 0xfff) + 1;
|
||||
|
||||
val = intel_de_read(dev_priv, DSPSTRIDE(i9xx_plane));
|
||||
fb->pitches[0] = val & 0xffffffc0;
|
||||
|
||||
aligned_height = intel_fb_align_height(fb, 0, fb->height);
|
||||
|
||||
plane_config->size = fb->pitches[0] * aligned_height;
|
||||
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
|
||||
crtc->base.name, plane->base.name, fb->width, fb->height,
|
||||
fb->format->cpp[0] * 8, base, fb->pitches[0],
|
||||
plane_config->size);
|
||||
|
||||
plane_config->fb = intel_fb;
|
||||
}
|
||||
|
||||
|
@ -10,6 +10,8 @@
|
||||
|
||||
enum pipe;
|
||||
struct drm_i915_private;
|
||||
struct intel_crtc;
|
||||
struct intel_initial_plane_config;
|
||||
struct intel_plane;
|
||||
struct intel_plane_state;
|
||||
|
||||
@ -21,4 +23,6 @@ int i9xx_check_plane_surface(struct intel_plane_state *plane_state);
|
||||
struct intel_plane *
|
||||
intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe);
|
||||
|
||||
void i9xx_get_initial_plane_config(struct intel_crtc *crtc,
|
||||
struct intel_initial_plane_config *plane_config);
|
||||
#endif
|
||||
|
@ -35,6 +35,8 @@
|
||||
#include "intel_dsi.h"
|
||||
#include "intel_panel.h"
|
||||
#include "intel_vdsc.h"
|
||||
#include "skl_scaler.h"
|
||||
#include "skl_universal_plane.h"
|
||||
|
||||
static int header_credits_available(struct drm_i915_private *dev_priv,
|
||||
enum transcoder dsi_trans)
|
||||
@ -653,6 +655,24 @@ static void gen11_dsi_ungate_clocks(struct intel_encoder *encoder)
|
||||
mutex_unlock(&dev_priv->dpll.lock);
|
||||
}
|
||||
|
||||
static bool gen11_dsi_is_clock_enabled(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
|
||||
bool clock_enabled = false;
|
||||
enum phy phy;
|
||||
u32 tmp;
|
||||
|
||||
tmp = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0);
|
||||
|
||||
for_each_dsi_phy(phy, intel_dsi->phys) {
|
||||
if (!(tmp & ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy)))
|
||||
clock_enabled = true;
|
||||
}
|
||||
|
||||
return clock_enabled;
|
||||
}
|
||||
|
||||
static void gen11_dsi_map_pll(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
@ -1488,14 +1508,10 @@ static void gen11_dsi_get_cmd_mode_config(struct intel_dsi *intel_dsi,
|
||||
static void gen11_dsi_get_config(struct intel_encoder *encoder,
|
||||
struct intel_crtc_state *pipe_config)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
|
||||
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
|
||||
|
||||
/* FIXME: adapt icl_ddi_clock_get() for DSI and use that? */
|
||||
pipe_config->port_clock = intel_dpll_get_freq(i915,
|
||||
pipe_config->shared_dpll,
|
||||
&pipe_config->dpll_hw_state);
|
||||
intel_ddi_get_clock(encoder, pipe_config, icl_ddi_combo_get_pll(encoder));
|
||||
|
||||
pipe_config->hw.adjusted_mode.crtc_clock = intel_dsi->pclk;
|
||||
if (intel_dsi->dual_link)
|
||||
@ -1940,6 +1956,8 @@ void icl_dsi_init(struct drm_i915_private *dev_priv)
|
||||
encoder->pipe_mask = ~0;
|
||||
encoder->power_domain = POWER_DOMAIN_PORT_DSI;
|
||||
encoder->get_power_domains = gen11_dsi_get_power_domains;
|
||||
encoder->disable_clock = gen11_dsi_gate_clocks;
|
||||
encoder->is_clock_enabled = gen11_dsi_is_clock_enabled;
|
||||
|
||||
/* register DSI connector with DRM subsystem */
|
||||
drm_connector_init(dev, connector, &gen11_dsi_connector_funcs,
|
||||
|
@ -40,7 +40,7 @@
|
||||
#include "intel_global_state.h"
|
||||
#include "intel_hdcp.h"
|
||||
#include "intel_psr.h"
|
||||
#include "intel_sprite.h"
|
||||
#include "skl_universal_plane.h"
|
||||
|
||||
/**
|
||||
* intel_digital_connector_atomic_get_property - hook for connector->atomic_get_property.
|
||||
|
@ -1630,16 +1630,36 @@ static const u8 rkl_pch_tgp_ddc_pin_map[] = {
|
||||
[RKL_DDC_BUS_DDI_E] = GMBUS_PIN_10_TC2_ICP,
|
||||
};
|
||||
|
||||
static const u8 adls_ddc_pin_map[] = {
|
||||
[ICL_DDC_BUS_DDI_A] = GMBUS_PIN_1_BXT,
|
||||
[ADLS_DDC_BUS_PORT_TC1] = GMBUS_PIN_9_TC1_ICP,
|
||||
[ADLS_DDC_BUS_PORT_TC2] = GMBUS_PIN_10_TC2_ICP,
|
||||
[ADLS_DDC_BUS_PORT_TC3] = GMBUS_PIN_11_TC3_ICP,
|
||||
[ADLS_DDC_BUS_PORT_TC4] = GMBUS_PIN_12_TC4_ICP,
|
||||
};
|
||||
|
||||
static const u8 gen9bc_tgp_ddc_pin_map[] = {
|
||||
[DDC_BUS_DDI_B] = GMBUS_PIN_2_BXT,
|
||||
[DDC_BUS_DDI_C] = GMBUS_PIN_9_TC1_ICP,
|
||||
[DDC_BUS_DDI_D] = GMBUS_PIN_10_TC2_ICP,
|
||||
};
|
||||
|
||||
static u8 map_ddc_pin(struct drm_i915_private *dev_priv, u8 vbt_pin)
|
||||
{
|
||||
const u8 *ddc_pin_map;
|
||||
int n_entries;
|
||||
|
||||
if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1) {
|
||||
if (HAS_PCH_ADP(dev_priv)) {
|
||||
ddc_pin_map = adls_ddc_pin_map;
|
||||
n_entries = ARRAY_SIZE(adls_ddc_pin_map);
|
||||
} else if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1) {
|
||||
return vbt_pin;
|
||||
} else if (IS_ROCKETLAKE(dev_priv) && INTEL_PCH_TYPE(dev_priv) == PCH_TGP) {
|
||||
ddc_pin_map = rkl_pch_tgp_ddc_pin_map;
|
||||
n_entries = ARRAY_SIZE(rkl_pch_tgp_ddc_pin_map);
|
||||
} else if (HAS_PCH_TGP(dev_priv) && IS_GEN9_BC(dev_priv)) {
|
||||
ddc_pin_map = gen9bc_tgp_ddc_pin_map;
|
||||
n_entries = ARRAY_SIZE(gen9bc_tgp_ddc_pin_map);
|
||||
} else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) {
|
||||
ddc_pin_map = icp_ddc_pin_map;
|
||||
n_entries = ARRAY_SIZE(icp_ddc_pin_map);
|
||||
@ -1708,8 +1728,26 @@ static enum port dvo_port_to_port(struct drm_i915_private *dev_priv,
|
||||
[PORT_TC1] = { DVO_PORT_HDMIC, DVO_PORT_DPC, -1 },
|
||||
[PORT_TC2] = { DVO_PORT_HDMID, DVO_PORT_DPD, -1 },
|
||||
};
|
||||
/*
|
||||
* Alderlake S ports used in the driver are PORT_A, PORT_D, PORT_E,
|
||||
* PORT_F and PORT_G, we need to map that to correct VBT sections.
|
||||
*/
|
||||
static const int adls_port_mapping[][3] = {
|
||||
[PORT_A] = { DVO_PORT_HDMIA, DVO_PORT_DPA, -1 },
|
||||
[PORT_B] = { -1 },
|
||||
[PORT_C] = { -1 },
|
||||
[PORT_TC1] = { DVO_PORT_HDMIB, DVO_PORT_DPB, -1 },
|
||||
[PORT_TC2] = { DVO_PORT_HDMIC, DVO_PORT_DPC, -1 },
|
||||
[PORT_TC3] = { DVO_PORT_HDMID, DVO_PORT_DPD, -1 },
|
||||
[PORT_TC4] = { DVO_PORT_HDMIE, DVO_PORT_DPE, -1 },
|
||||
};
|
||||
|
||||
if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv))
|
||||
if (IS_ALDERLAKE_S(dev_priv))
|
||||
return __dvo_port_to_port(ARRAY_SIZE(adls_port_mapping),
|
||||
ARRAY_SIZE(adls_port_mapping[0]),
|
||||
adls_port_mapping,
|
||||
dvo_port);
|
||||
else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv))
|
||||
return __dvo_port_to_port(ARRAY_SIZE(rkl_port_mapping),
|
||||
ARRAY_SIZE(rkl_port_mapping[0]),
|
||||
rkl_port_mapping,
|
||||
@ -1721,6 +1759,44 @@ static enum port dvo_port_to_port(struct drm_i915_private *dev_priv,
|
||||
dvo_port);
|
||||
}
|
||||
|
||||
static int parse_bdb_230_dp_max_link_rate(const int vbt_max_link_rate)
|
||||
{
|
||||
switch (vbt_max_link_rate) {
|
||||
default:
|
||||
case BDB_230_VBT_DP_MAX_LINK_RATE_DEF:
|
||||
return 0;
|
||||
case BDB_230_VBT_DP_MAX_LINK_RATE_UHBR20:
|
||||
return 2000000;
|
||||
case BDB_230_VBT_DP_MAX_LINK_RATE_UHBR13P5:
|
||||
return 1350000;
|
||||
case BDB_230_VBT_DP_MAX_LINK_RATE_UHBR10:
|
||||
return 1000000;
|
||||
case BDB_230_VBT_DP_MAX_LINK_RATE_HBR3:
|
||||
return 810000;
|
||||
case BDB_230_VBT_DP_MAX_LINK_RATE_HBR2:
|
||||
return 540000;
|
||||
case BDB_230_VBT_DP_MAX_LINK_RATE_HBR:
|
||||
return 270000;
|
||||
case BDB_230_VBT_DP_MAX_LINK_RATE_LBR:
|
||||
return 162000;
|
||||
}
|
||||
}
|
||||
|
||||
static int parse_bdb_216_dp_max_link_rate(const int vbt_max_link_rate)
|
||||
{
|
||||
switch (vbt_max_link_rate) {
|
||||
default:
|
||||
case BDB_216_VBT_DP_MAX_LINK_RATE_HBR3:
|
||||
return 810000;
|
||||
case BDB_216_VBT_DP_MAX_LINK_RATE_HBR2:
|
||||
return 540000;
|
||||
case BDB_216_VBT_DP_MAX_LINK_RATE_HBR:
|
||||
return 270000;
|
||||
case BDB_216_VBT_DP_MAX_LINK_RATE_LBR:
|
||||
return 162000;
|
||||
}
|
||||
}
|
||||
|
||||
static void parse_ddi_port(struct drm_i915_private *dev_priv,
|
||||
struct display_device_data *devdata,
|
||||
u8 bdb_version)
|
||||
@ -1800,7 +1876,7 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv,
|
||||
/* The VBT HDMI level shift values match the table we have. */
|
||||
u8 hdmi_level_shift = child->hdmi_level_shifter_value;
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"VBT HDMI level shift for port %c: %d\n",
|
||||
"Port %c VBT HDMI level shift: %d\n",
|
||||
port_name(port),
|
||||
hdmi_level_shift);
|
||||
info->hdmi_level_shift = hdmi_level_shift;
|
||||
@ -1827,7 +1903,7 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv,
|
||||
|
||||
if (max_tmds_clock)
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"VBT HDMI max TMDS clock for port %c: %d kHz\n",
|
||||
"Port %c VBT HDMI max TMDS clock: %d kHz\n",
|
||||
port_name(port), max_tmds_clock);
|
||||
info->max_tmds_clock = max_tmds_clock;
|
||||
}
|
||||
@ -1836,33 +1912,23 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv,
|
||||
if (bdb_version >= 196 && child->iboost) {
|
||||
info->dp_boost_level = translate_iboost(child->dp_iboost_level);
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"VBT (e)DP boost level for port %c: %d\n",
|
||||
"Port %c VBT (e)DP boost level: %d\n",
|
||||
port_name(port), info->dp_boost_level);
|
||||
info->hdmi_boost_level = translate_iboost(child->hdmi_iboost_level);
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"VBT HDMI boost level for port %c: %d\n",
|
||||
"Port %c VBT HDMI boost level: %d\n",
|
||||
port_name(port), info->hdmi_boost_level);
|
||||
}
|
||||
|
||||
/* DP max link rate for CNL+ */
|
||||
if (bdb_version >= 216) {
|
||||
switch (child->dp_max_link_rate) {
|
||||
default:
|
||||
case VBT_DP_MAX_LINK_RATE_HBR3:
|
||||
info->dp_max_link_rate = 810000;
|
||||
break;
|
||||
case VBT_DP_MAX_LINK_RATE_HBR2:
|
||||
info->dp_max_link_rate = 540000;
|
||||
break;
|
||||
case VBT_DP_MAX_LINK_RATE_HBR:
|
||||
info->dp_max_link_rate = 270000;
|
||||
break;
|
||||
case VBT_DP_MAX_LINK_RATE_LBR:
|
||||
info->dp_max_link_rate = 162000;
|
||||
break;
|
||||
}
|
||||
if (bdb_version >= 230)
|
||||
info->dp_max_link_rate = parse_bdb_230_dp_max_link_rate(child->dp_max_link_rate);
|
||||
else
|
||||
info->dp_max_link_rate = parse_bdb_216_dp_max_link_rate(child->dp_max_link_rate);
|
||||
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"VBT DP max link rate for port %c: %d\n",
|
||||
"Port %c VBT DP max link rate: %d\n",
|
||||
port_name(port), info->dp_max_link_rate);
|
||||
}
|
||||
|
||||
@ -2098,7 +2164,7 @@ bool intel_bios_is_valid_vbt(const void *buf, size_t size)
|
||||
|
||||
static struct vbt_header *oprom_get_vbt(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct pci_dev *pdev = dev_priv->drm.pdev;
|
||||
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
|
||||
void __iomem *p = NULL, *oprom;
|
||||
struct vbt_header *vbt;
|
||||
u16 vbt_size;
|
||||
@ -2645,6 +2711,23 @@ intel_bios_is_lspcon_present(const struct drm_i915_private *i915,
|
||||
return HAS_LSPCON(i915) && child && child->lspcon;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_bios_is_lane_reversal_needed - if lane reversal needed on port
|
||||
* @i915: i915 device instance
|
||||
* @port: port to check
|
||||
*
|
||||
* Return true if port requires lane reversal
|
||||
*/
|
||||
bool
|
||||
intel_bios_is_lane_reversal_needed(const struct drm_i915_private *i915,
|
||||
enum port port)
|
||||
{
|
||||
const struct child_device_config *child =
|
||||
i915->vbt.ddi_port_info[port].child;
|
||||
|
||||
return child && child->lane_reversal;
|
||||
}
|
||||
|
||||
enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *dev_priv,
|
||||
enum port port)
|
||||
{
|
||||
@ -2661,27 +2744,44 @@ enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *dev_priv,
|
||||
return aux_ch;
|
||||
}
|
||||
|
||||
/*
|
||||
* RKL/DG1 VBT uses PHY based mapping. Combo PHYs A,B,C,D
|
||||
* map to DDI A,B,TC1,TC2 respectively.
|
||||
*
|
||||
* ADL-S VBT uses PHY based mapping. Combo PHYs A,B,C,D,E
|
||||
* map to DDI A,TC1,TC2,TC3,TC4 respectively.
|
||||
*/
|
||||
switch (info->alternate_aux_channel) {
|
||||
case DP_AUX_A:
|
||||
aux_ch = AUX_CH_A;
|
||||
break;
|
||||
case DP_AUX_B:
|
||||
aux_ch = AUX_CH_B;
|
||||
if (IS_ALDERLAKE_S(dev_priv))
|
||||
aux_ch = AUX_CH_USBC1;
|
||||
else
|
||||
aux_ch = AUX_CH_B;
|
||||
break;
|
||||
case DP_AUX_C:
|
||||
/*
|
||||
* RKL/DG1 VBT uses PHY based mapping. Combo PHYs A,B,C,D
|
||||
* map to DDI A,B,TC1,TC2 respectively.
|
||||
*/
|
||||
aux_ch = (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv)) ?
|
||||
AUX_CH_USBC1 : AUX_CH_C;
|
||||
if (IS_ALDERLAKE_S(dev_priv))
|
||||
aux_ch = AUX_CH_USBC2;
|
||||
else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv))
|
||||
aux_ch = AUX_CH_USBC1;
|
||||
else
|
||||
aux_ch = AUX_CH_C;
|
||||
break;
|
||||
case DP_AUX_D:
|
||||
aux_ch = (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv)) ?
|
||||
AUX_CH_USBC2 : AUX_CH_D;
|
||||
if (IS_ALDERLAKE_S(dev_priv))
|
||||
aux_ch = AUX_CH_USBC3;
|
||||
else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv))
|
||||
aux_ch = AUX_CH_USBC2;
|
||||
else
|
||||
aux_ch = AUX_CH_D;
|
||||
break;
|
||||
case DP_AUX_E:
|
||||
aux_ch = AUX_CH_E;
|
||||
if (IS_ALDERLAKE_S(dev_priv))
|
||||
aux_ch = AUX_CH_USBC4;
|
||||
else
|
||||
aux_ch = AUX_CH_E;
|
||||
break;
|
||||
case DP_AUX_F:
|
||||
aux_ch = AUX_CH_F;
|
||||
|
@ -241,6 +241,8 @@ bool intel_bios_is_port_hpd_inverted(const struct drm_i915_private *i915,
|
||||
enum port port);
|
||||
bool intel_bios_is_lspcon_present(const struct drm_i915_private *i915,
|
||||
enum port port);
|
||||
bool intel_bios_is_lane_reversal_needed(const struct drm_i915_private *i915,
|
||||
enum port port);
|
||||
enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *dev_priv, enum port port);
|
||||
bool intel_bios_get_dsc_params(struct intel_encoder *encoder,
|
||||
struct intel_crtc_state *crtc_state,
|
||||
|
@ -78,7 +78,17 @@ static int icl_get_qgv_points(struct drm_i915_private *dev_priv,
|
||||
qi->num_points = dram_info->num_qgv_points;
|
||||
|
||||
if (IS_GEN(dev_priv, 12))
|
||||
qi->t_bl = dev_priv->dram_info.type == INTEL_DRAM_DDR4 ? 4 : 16;
|
||||
switch (dram_info->type) {
|
||||
case INTEL_DRAM_DDR4:
|
||||
qi->t_bl = 4;
|
||||
break;
|
||||
case INTEL_DRAM_DDR5:
|
||||
qi->t_bl = 8;
|
||||
break;
|
||||
default:
|
||||
qi->t_bl = 16;
|
||||
break;
|
||||
}
|
||||
else if (IS_GEN(dev_priv, 11))
|
||||
qi->t_bl = dev_priv->dram_info.type == INTEL_DRAM_DDR4 ? 4 : 8;
|
||||
|
||||
@ -142,6 +152,12 @@ static const struct intel_sa_info rkl_sa_info = {
|
||||
.displayrtids = 128,
|
||||
};
|
||||
|
||||
static const struct intel_sa_info adls_sa_info = {
|
||||
.deburst = 16,
|
||||
.deprogbwlimit = 38, /* GB/s */
|
||||
.displayrtids = 256,
|
||||
};
|
||||
|
||||
static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel_sa_info *sa)
|
||||
{
|
||||
struct intel_qgv_info qi = {};
|
||||
@ -251,7 +267,9 @@ void intel_bw_init_hw(struct drm_i915_private *dev_priv)
|
||||
if (!HAS_DISPLAY(dev_priv))
|
||||
return;
|
||||
|
||||
if (IS_ROCKETLAKE(dev_priv))
|
||||
if (IS_ALDERLAKE_S(dev_priv))
|
||||
icl_get_bw_info(dev_priv, &adls_sa_info);
|
||||
else if (IS_ROCKETLAKE(dev_priv))
|
||||
icl_get_bw_info(dev_priv, &rkl_sa_info);
|
||||
else if (IS_GEN(dev_priv, 12))
|
||||
icl_get_bw_info(dev_priv, &tgl_sa_info);
|
||||
|
@ -96,7 +96,7 @@ static void fixed_450mhz_get_cdclk(struct drm_i915_private *dev_priv,
|
||||
static void i85x_get_cdclk(struct drm_i915_private *dev_priv,
|
||||
struct intel_cdclk_config *cdclk_config)
|
||||
{
|
||||
struct pci_dev *pdev = dev_priv->drm.pdev;
|
||||
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
|
||||
u16 hpllcc = 0;
|
||||
|
||||
/*
|
||||
@ -138,7 +138,7 @@ static void i85x_get_cdclk(struct drm_i915_private *dev_priv,
|
||||
static void i915gm_get_cdclk(struct drm_i915_private *dev_priv,
|
||||
struct intel_cdclk_config *cdclk_config)
|
||||
{
|
||||
struct pci_dev *pdev = dev_priv->drm.pdev;
|
||||
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
|
||||
u16 gcfgc = 0;
|
||||
|
||||
pci_read_config_word(pdev, GCFGC, &gcfgc);
|
||||
@ -162,7 +162,7 @@ static void i915gm_get_cdclk(struct drm_i915_private *dev_priv,
|
||||
static void i945gm_get_cdclk(struct drm_i915_private *dev_priv,
|
||||
struct intel_cdclk_config *cdclk_config)
|
||||
{
|
||||
struct pci_dev *pdev = dev_priv->drm.pdev;
|
||||
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
|
||||
u16 gcfgc = 0;
|
||||
|
||||
pci_read_config_word(pdev, GCFGC, &gcfgc);
|
||||
@ -256,7 +256,7 @@ static unsigned int intel_hpll_vco(struct drm_i915_private *dev_priv)
|
||||
static void g33_get_cdclk(struct drm_i915_private *dev_priv,
|
||||
struct intel_cdclk_config *cdclk_config)
|
||||
{
|
||||
struct pci_dev *pdev = dev_priv->drm.pdev;
|
||||
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
|
||||
static const u8 div_3200[] = { 12, 10, 8, 7, 5, 16 };
|
||||
static const u8 div_4000[] = { 14, 12, 10, 8, 6, 20 };
|
||||
static const u8 div_4800[] = { 20, 14, 12, 10, 8, 24 };
|
||||
@ -305,7 +305,7 @@ fail:
|
||||
static void pnv_get_cdclk(struct drm_i915_private *dev_priv,
|
||||
struct intel_cdclk_config *cdclk_config)
|
||||
{
|
||||
struct pci_dev *pdev = dev_priv->drm.pdev;
|
||||
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
|
||||
u16 gcfgc = 0;
|
||||
|
||||
pci_read_config_word(pdev, GCFGC, &gcfgc);
|
||||
@ -339,7 +339,7 @@ static void pnv_get_cdclk(struct drm_i915_private *dev_priv,
|
||||
static void i965gm_get_cdclk(struct drm_i915_private *dev_priv,
|
||||
struct intel_cdclk_config *cdclk_config)
|
||||
{
|
||||
struct pci_dev *pdev = dev_priv->drm.pdev;
|
||||
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
|
||||
static const u8 div_3200[] = { 16, 10, 8 };
|
||||
static const u8 div_4000[] = { 20, 12, 10 };
|
||||
static const u8 div_5333[] = { 24, 16, 14 };
|
||||
@ -384,7 +384,7 @@ fail:
|
||||
static void gm45_get_cdclk(struct drm_i915_private *dev_priv,
|
||||
struct intel_cdclk_config *cdclk_config)
|
||||
{
|
||||
struct pci_dev *pdev = dev_priv->drm.pdev;
|
||||
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
|
||||
unsigned int cdclk_sel;
|
||||
u16 tmp = 0;
|
||||
|
||||
@ -2145,10 +2145,10 @@ static int intel_compute_min_cdclk(struct intel_cdclk_state *cdclk_state)
|
||||
if (IS_ERR(bw_state))
|
||||
return PTR_ERR(bw_state);
|
||||
|
||||
if (cdclk_state->min_cdclk[i] == min_cdclk)
|
||||
if (cdclk_state->min_cdclk[crtc->pipe] == min_cdclk)
|
||||
continue;
|
||||
|
||||
cdclk_state->min_cdclk[i] = min_cdclk;
|
||||
cdclk_state->min_cdclk[crtc->pipe] = min_cdclk;
|
||||
|
||||
ret = intel_atomic_lock_global_state(&cdclk_state->base);
|
||||
if (ret)
|
||||
@ -2199,10 +2199,10 @@ static int bxt_compute_min_voltage_level(struct intel_cdclk_state *cdclk_state)
|
||||
else
|
||||
min_voltage_level = 0;
|
||||
|
||||
if (cdclk_state->min_voltage_level[i] == min_voltage_level)
|
||||
if (cdclk_state->min_voltage_level[crtc->pipe] == min_voltage_level)
|
||||
continue;
|
||||
|
||||
cdclk_state->min_voltage_level[i] = min_voltage_level;
|
||||
cdclk_state->min_voltage_level[crtc->pipe] = min_voltage_level;
|
||||
|
||||
ret = intel_atomic_lock_global_state(&cdclk_state->base);
|
||||
if (ret)
|
||||
|
@ -187,10 +187,16 @@ static bool has_phy_misc(struct drm_i915_private *i915, enum phy phy)
|
||||
* Some platforms only expect PHY_MISC to be programmed for PHY-A and
|
||||
* PHY-B and may not even have instances of the register for the
|
||||
* other combo PHY's.
|
||||
*
|
||||
* ADL-S technically has three instances of PHY_MISC, but only requires
|
||||
* that we program it for PHY A.
|
||||
*/
|
||||
if (IS_JSL_EHL(i915) ||
|
||||
IS_ROCKETLAKE(i915) ||
|
||||
IS_DG1(i915))
|
||||
|
||||
if (IS_ALDERLAKE_S(i915))
|
||||
return phy == PHY_A;
|
||||
else if (IS_JSL_EHL(i915) ||
|
||||
IS_ROCKETLAKE(i915) ||
|
||||
IS_DG1(i915))
|
||||
return phy < PHY_C;
|
||||
|
||||
return true;
|
||||
@ -246,14 +252,21 @@ static bool phy_is_master(struct drm_i915_private *dev_priv, enum phy phy)
|
||||
* RKL,DG1:
|
||||
* A(master) -> B(slave)
|
||||
* C(master) -> D(slave)
|
||||
* ADL-S:
|
||||
* A(master) -> B(slave), C(slave)
|
||||
* D(master) -> E(slave)
|
||||
*
|
||||
* We must set the IREFGEN bit for any PHY acting as a master
|
||||
* to another PHY.
|
||||
*/
|
||||
if ((IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv)) && phy == PHY_C)
|
||||
if (phy == PHY_A)
|
||||
return true;
|
||||
else if (IS_ALDERLAKE_S(dev_priv))
|
||||
return phy == PHY_D;
|
||||
else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv))
|
||||
return phy == PHY_C;
|
||||
|
||||
return phy == PHY_A;
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool icl_combo_phy_verify_state(struct drm_i915_private *dev_priv,
|
||||
|
@ -38,6 +38,7 @@
|
||||
#include "intel_crt.h"
|
||||
#include "intel_ddi.h"
|
||||
#include "intel_display_types.h"
|
||||
#include "intel_fdi.h"
|
||||
#include "intel_fifo_underrun.h"
|
||||
#include "intel_gmbus.h"
|
||||
#include "intel_hotplug.h"
|
||||
@ -141,7 +142,7 @@ static void hsw_crt_get_config(struct intel_encoder *encoder,
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
|
||||
intel_ddi_get_config(encoder, pipe_config);
|
||||
hsw_ddi_get_config(encoder, pipe_config);
|
||||
|
||||
pipe_config->hw.adjusted_mode.flags &= ~(DRM_MODE_FLAG_PHSYNC |
|
||||
DRM_MODE_FLAG_NHSYNC |
|
||||
@ -1075,6 +1076,9 @@ void intel_crt_init(struct drm_i915_private *dev_priv)
|
||||
crt->base.enable = hsw_enable_crt;
|
||||
crt->base.disable = hsw_disable_crt;
|
||||
crt->base.post_disable = hsw_post_disable_crt;
|
||||
crt->base.enable_clock = hsw_ddi_enable_clock;
|
||||
crt->base.disable_clock = hsw_ddi_disable_clock;
|
||||
crt->base.is_clock_enabled = hsw_ddi_is_clock_enabled;
|
||||
} else {
|
||||
if (HAS_PCH_SPLIT(dev_priv)) {
|
||||
crt->base.compute_config = pch_crt_compute_config;
|
||||
|
@ -10,6 +10,9 @@
|
||||
#include <drm/drm_plane.h>
|
||||
#include <drm/drm_plane_helper.h>
|
||||
|
||||
#include "i915_trace.h"
|
||||
#include "i915_vgpu.h"
|
||||
|
||||
#include "intel_atomic.h"
|
||||
#include "intel_atomic_plane.h"
|
||||
#include "intel_color.h"
|
||||
@ -17,9 +20,13 @@
|
||||
#include "intel_cursor.h"
|
||||
#include "intel_display_debugfs.h"
|
||||
#include "intel_display_types.h"
|
||||
#include "intel_dsi.h"
|
||||
#include "intel_pipe_crc.h"
|
||||
#include "intel_psr.h"
|
||||
#include "intel_sprite.h"
|
||||
#include "intel_vrr.h"
|
||||
#include "i9xx_plane.h"
|
||||
#include "skl_universal_plane.h"
|
||||
|
||||
static void assert_vblank_disabled(struct drm_crtc *crtc)
|
||||
{
|
||||
@ -32,6 +39,9 @@ u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
|
||||
struct drm_device *dev = crtc->base.dev;
|
||||
struct drm_vblank_crtc *vblank = &dev->vblank[drm_crtc_index(&crtc->base)];
|
||||
|
||||
if (!crtc->active)
|
||||
return 0;
|
||||
|
||||
if (!vblank->max_vblank_count)
|
||||
return (u32)drm_crtc_accurate_vblank_count(&crtc->base);
|
||||
|
||||
@ -41,8 +51,6 @@ u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
|
||||
u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
u32 mode_flags = crtc->mode_flags;
|
||||
|
||||
/*
|
||||
* From Gen 11, In case of dsi cmd mode, frame counter wouldnt
|
||||
@ -50,7 +58,8 @@ u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state)
|
||||
* the hw counter, then we would find it updated in only
|
||||
* the next TE, hence switching to sw counter.
|
||||
*/
|
||||
if (mode_flags & (I915_MODE_FLAG_DSI_USE_TE0 | I915_MODE_FLAG_DSI_USE_TE1))
|
||||
if (crtc_state->mode_flags & (I915_MODE_FLAG_DSI_USE_TE0 |
|
||||
I915_MODE_FLAG_DSI_USE_TE1))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
@ -77,12 +86,26 @@ void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state)
|
||||
drm_crtc_set_max_vblank_count(&crtc->base,
|
||||
intel_crtc_max_vblank_count(crtc_state));
|
||||
drm_crtc_vblank_on(&crtc->base);
|
||||
|
||||
/*
|
||||
* Should really happen exactly when we enable the pipe
|
||||
* but we want the frame counters in the trace, and that
|
||||
* requires vblank support on some platforms/outputs.
|
||||
*/
|
||||
trace_intel_pipe_enable(crtc);
|
||||
}
|
||||
|
||||
void intel_crtc_vblank_off(const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
|
||||
/*
|
||||
* Should really happen exactly when we disable the pipe
|
||||
* but we want the frame counters in the trace, and that
|
||||
* requires vblank support on some platforms/outputs.
|
||||
*/
|
||||
trace_intel_pipe_disable(crtc);
|
||||
|
||||
drm_crtc_vblank_off(&crtc->base);
|
||||
assert_vblank_disabled(&crtc->base);
|
||||
}
|
||||
@ -242,7 +265,11 @@ int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
|
||||
crtc->pipe = pipe;
|
||||
crtc->num_scalers = RUNTIME_INFO(dev_priv)->num_scalers[pipe];
|
||||
|
||||
primary = intel_primary_plane_create(dev_priv, pipe);
|
||||
if (INTEL_GEN(dev_priv) >= 9)
|
||||
primary = skl_universal_plane_create(dev_priv, pipe,
|
||||
PLANE_PRIMARY);
|
||||
else
|
||||
primary = intel_primary_plane_create(dev_priv, pipe);
|
||||
if (IS_ERR(primary)) {
|
||||
ret = PTR_ERR(primary);
|
||||
goto fail;
|
||||
@ -252,7 +279,11 @@ int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
|
||||
for_each_sprite(dev_priv, pipe, sprite) {
|
||||
struct intel_plane *plane;
|
||||
|
||||
plane = intel_sprite_plane_create(dev_priv, pipe, sprite);
|
||||
if (INTEL_GEN(dev_priv) >= 9)
|
||||
plane = skl_universal_plane_create(dev_priv, pipe,
|
||||
PLANE_SPRITE0 + sprite);
|
||||
else
|
||||
plane = intel_sprite_plane_create(dev_priv, pipe, sprite);
|
||||
if (IS_ERR(plane)) {
|
||||
ret = PTR_ERR(plane);
|
||||
goto fail;
|
||||
@ -322,3 +353,238 @@ fail:
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
|
||||
int usecs)
|
||||
{
|
||||
/* paranoia */
|
||||
if (!adjusted_mode->crtc_htotal)
|
||||
return 1;
|
||||
|
||||
return DIV_ROUND_UP(usecs * adjusted_mode->crtc_clock,
|
||||
1000 * adjusted_mode->crtc_htotal);
|
||||
}
|
||||
|
||||
static int intel_mode_vblank_start(const struct drm_display_mode *mode)
|
||||
{
|
||||
int vblank_start = mode->crtc_vblank_start;
|
||||
|
||||
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
|
||||
vblank_start = DIV_ROUND_UP(vblank_start, 2);
|
||||
|
||||
return vblank_start;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_pipe_update_start() - start update of a set of display registers
|
||||
* @new_crtc_state: the new crtc state
|
||||
*
|
||||
* Mark the start of an update to pipe registers that should be updated
|
||||
* atomically regarding vblank. If the next vblank will happens within
|
||||
* the next 100 us, this function waits until the vblank passes.
|
||||
*
|
||||
* After a successful call to this function, interrupts will be disabled
|
||||
* until a subsequent call to intel_pipe_update_end(). That is done to
|
||||
* avoid random delays.
|
||||
*/
|
||||
void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state)
|
||||
{
|
||||
struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
const struct drm_display_mode *adjusted_mode = &new_crtc_state->hw.adjusted_mode;
|
||||
long timeout = msecs_to_jiffies_timeout(1);
|
||||
int scanline, min, max, vblank_start;
|
||||
wait_queue_head_t *wq = drm_crtc_vblank_waitqueue(&crtc->base);
|
||||
bool need_vlv_dsi_wa = (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
|
||||
intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI);
|
||||
DEFINE_WAIT(wait);
|
||||
|
||||
if (new_crtc_state->uapi.async_flip)
|
||||
return;
|
||||
|
||||
if (new_crtc_state->vrr.enable)
|
||||
vblank_start = intel_vrr_vmax_vblank_start(new_crtc_state);
|
||||
else
|
||||
vblank_start = intel_mode_vblank_start(adjusted_mode);
|
||||
|
||||
/* FIXME needs to be calibrated sensibly */
|
||||
min = vblank_start - intel_usecs_to_scanlines(adjusted_mode,
|
||||
VBLANK_EVASION_TIME_US);
|
||||
max = vblank_start - 1;
|
||||
|
||||
if (min <= 0 || max <= 0)
|
||||
goto irq_disable;
|
||||
|
||||
if (drm_WARN_ON(&dev_priv->drm, drm_crtc_vblank_get(&crtc->base)))
|
||||
goto irq_disable;
|
||||
|
||||
/*
|
||||
* Wait for psr to idle out after enabling the VBL interrupts
|
||||
* VBL interrupts will start the PSR exit and prevent a PSR
|
||||
* re-entry as well.
|
||||
*/
|
||||
intel_psr_wait_for_idle(new_crtc_state);
|
||||
|
||||
local_irq_disable();
|
||||
|
||||
crtc->debug.min_vbl = min;
|
||||
crtc->debug.max_vbl = max;
|
||||
trace_intel_pipe_update_start(crtc);
|
||||
|
||||
for (;;) {
|
||||
/*
|
||||
* prepare_to_wait() has a memory barrier, which guarantees
|
||||
* other CPUs can see the task state update by the time we
|
||||
* read the scanline.
|
||||
*/
|
||||
prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE);
|
||||
|
||||
scanline = intel_get_crtc_scanline(crtc);
|
||||
if (scanline < min || scanline > max)
|
||||
break;
|
||||
|
||||
if (!timeout) {
|
||||
drm_err(&dev_priv->drm,
|
||||
"Potential atomic update failure on pipe %c\n",
|
||||
pipe_name(crtc->pipe));
|
||||
break;
|
||||
}
|
||||
|
||||
local_irq_enable();
|
||||
|
||||
timeout = schedule_timeout(timeout);
|
||||
|
||||
local_irq_disable();
|
||||
}
|
||||
|
||||
finish_wait(wq, &wait);
|
||||
|
||||
drm_crtc_vblank_put(&crtc->base);
|
||||
|
||||
/*
|
||||
* On VLV/CHV DSI the scanline counter would appear to
|
||||
* increment approx. 1/3 of a scanline before start of vblank.
|
||||
* The registers still get latched at start of vblank however.
|
||||
* This means we must not write any registers on the first
|
||||
* line of vblank (since not the whole line is actually in
|
||||
* vblank). And unfortunately we can't use the interrupt to
|
||||
* wait here since it will fire too soon. We could use the
|
||||
* frame start interrupt instead since it will fire after the
|
||||
* critical scanline, but that would require more changes
|
||||
* in the interrupt code. So for now we'll just do the nasty
|
||||
* thing and poll for the bad scanline to pass us by.
|
||||
*
|
||||
* FIXME figure out if BXT+ DSI suffers from this as well
|
||||
*/
|
||||
while (need_vlv_dsi_wa && scanline == vblank_start)
|
||||
scanline = intel_get_crtc_scanline(crtc);
|
||||
|
||||
crtc->debug.scanline_start = scanline;
|
||||
crtc->debug.start_vbl_time = ktime_get();
|
||||
crtc->debug.start_vbl_count = intel_crtc_get_vblank_counter(crtc);
|
||||
|
||||
trace_intel_pipe_update_vblank_evaded(crtc);
|
||||
return;
|
||||
|
||||
irq_disable:
|
||||
local_irq_disable();
|
||||
}
|
||||
|
||||
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_VBLANK_EVADE)
|
||||
static void dbg_vblank_evade(struct intel_crtc *crtc, ktime_t end)
|
||||
{
|
||||
u64 delta = ktime_to_ns(ktime_sub(end, crtc->debug.start_vbl_time));
|
||||
unsigned int h;
|
||||
|
||||
h = ilog2(delta >> 9);
|
||||
if (h >= ARRAY_SIZE(crtc->debug.vbl.times))
|
||||
h = ARRAY_SIZE(crtc->debug.vbl.times) - 1;
|
||||
crtc->debug.vbl.times[h]++;
|
||||
|
||||
crtc->debug.vbl.sum += delta;
|
||||
if (!crtc->debug.vbl.min || delta < crtc->debug.vbl.min)
|
||||
crtc->debug.vbl.min = delta;
|
||||
if (delta > crtc->debug.vbl.max)
|
||||
crtc->debug.vbl.max = delta;
|
||||
|
||||
if (delta > 1000 * VBLANK_EVASION_TIME_US) {
|
||||
drm_dbg_kms(crtc->base.dev,
|
||||
"Atomic update on pipe (%c) took %lld us, max time under evasion is %u us\n",
|
||||
pipe_name(crtc->pipe),
|
||||
div_u64(delta, 1000),
|
||||
VBLANK_EVASION_TIME_US);
|
||||
crtc->debug.vbl.over++;
|
||||
}
|
||||
}
|
||||
#else
|
||||
static void dbg_vblank_evade(struct intel_crtc *crtc, ktime_t end) {}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* intel_pipe_update_end() - end update of a set of display registers
|
||||
* @new_crtc_state: the new crtc state
|
||||
*
|
||||
* Mark the end of an update started with intel_pipe_update_start(). This
|
||||
* re-enables interrupts and verifies the update was actually completed
|
||||
* before a vblank.
|
||||
*/
|
||||
void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state)
|
||||
{
|
||||
struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
|
||||
enum pipe pipe = crtc->pipe;
|
||||
int scanline_end = intel_get_crtc_scanline(crtc);
|
||||
u32 end_vbl_count = intel_crtc_get_vblank_counter(crtc);
|
||||
ktime_t end_vbl_time = ktime_get();
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
|
||||
if (new_crtc_state->uapi.async_flip)
|
||||
return;
|
||||
|
||||
trace_intel_pipe_update_end(crtc, end_vbl_count, scanline_end);
|
||||
|
||||
/*
|
||||
* Incase of mipi dsi command mode, we need to set frame update
|
||||
* request for every commit.
|
||||
*/
|
||||
if (INTEL_GEN(dev_priv) >= 11 &&
|
||||
intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI))
|
||||
icl_dsi_frame_update(new_crtc_state);
|
||||
|
||||
/* We're still in the vblank-evade critical section, this can't race.
|
||||
* Would be slightly nice to just grab the vblank count and arm the
|
||||
* event outside of the critical section - the spinlock might spin for a
|
||||
* while ... */
|
||||
if (new_crtc_state->uapi.event) {
|
||||
drm_WARN_ON(&dev_priv->drm,
|
||||
drm_crtc_vblank_get(&crtc->base) != 0);
|
||||
|
||||
spin_lock(&crtc->base.dev->event_lock);
|
||||
drm_crtc_arm_vblank_event(&crtc->base,
|
||||
new_crtc_state->uapi.event);
|
||||
spin_unlock(&crtc->base.dev->event_lock);
|
||||
|
||||
new_crtc_state->uapi.event = NULL;
|
||||
}
|
||||
|
||||
local_irq_enable();
|
||||
|
||||
/* Send VRR Push to terminate Vblank */
|
||||
intel_vrr_send_push(new_crtc_state);
|
||||
|
||||
if (intel_vgpu_active(dev_priv))
|
||||
return;
|
||||
|
||||
if (crtc->debug.start_vbl_count &&
|
||||
crtc->debug.start_vbl_count != end_vbl_count) {
|
||||
drm_err(&dev_priv->drm,
|
||||
"Atomic update failure on pipe %c (start=%u end=%u) time %lld us, min %d, max %d, scanline start %d, end %d\n",
|
||||
pipe_name(pipe), crtc->debug.start_vbl_count,
|
||||
end_vbl_count,
|
||||
ktime_us_delta(end_vbl_time,
|
||||
crtc->debug.start_vbl_time),
|
||||
crtc->debug.min_vbl, crtc->debug.max_vbl,
|
||||
crtc->debug.scanline_start, scanline_end);
|
||||
}
|
||||
|
||||
dbg_vblank_evade(crtc, end_vbl_time);
|
||||
}
|
||||
|
@ -40,6 +40,10 @@
|
||||
|
||||
#define GEN12_CSR_MAX_FW_SIZE ICL_CSR_MAX_FW_SIZE
|
||||
|
||||
#define ADLS_CSR_PATH "i915/adls_dmc_ver2_01.bin"
|
||||
#define ADLS_CSR_VERSION_REQUIRED CSR_VERSION(2, 1)
|
||||
MODULE_FIRMWARE(ADLS_CSR_PATH);
|
||||
|
||||
#define DG1_CSR_PATH "i915/dg1_dmc_ver2_02.bin"
|
||||
#define DG1_CSR_VERSION_REQUIRED CSR_VERSION(2, 2)
|
||||
MODULE_FIRMWARE(DG1_CSR_PATH);
|
||||
@ -640,7 +644,7 @@ static void csr_load_work_fn(struct work_struct *work)
|
||||
dev_priv = container_of(work, typeof(*dev_priv), csr.work);
|
||||
csr = &dev_priv->csr;
|
||||
|
||||
request_firmware(&fw, dev_priv->csr.fw_path, &dev_priv->drm.pdev->dev);
|
||||
request_firmware(&fw, dev_priv->csr.fw_path, dev_priv->drm.dev);
|
||||
parse_csr_fw(dev_priv, fw);
|
||||
|
||||
if (dev_priv->csr.dmc_payload) {
|
||||
@ -689,7 +693,11 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv)
|
||||
*/
|
||||
intel_csr_runtime_pm_get(dev_priv);
|
||||
|
||||
if (IS_DG1(dev_priv)) {
|
||||
if (IS_ALDERLAKE_S(dev_priv)) {
|
||||
csr->fw_path = ADLS_CSR_PATH;
|
||||
csr->required_version = ADLS_CSR_VERSION_REQUIRED;
|
||||
csr->max_fw_size = GEN12_CSR_MAX_FW_SIZE;
|
||||
} else if (IS_DG1(dev_priv)) {
|
||||
csr->fw_path = DG1_CSR_PATH;
|
||||
csr->required_version = DG1_CSR_VERSION_REQUIRED;
|
||||
csr->max_fw_size = GEN12_CSR_MAX_FW_SIZE;
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -17,6 +17,7 @@ struct intel_crtc_state;
|
||||
struct intel_dp;
|
||||
struct intel_dpll_hw_state;
|
||||
struct intel_encoder;
|
||||
struct intel_shared_dpll;
|
||||
enum transcoder;
|
||||
|
||||
i915_reg_t dp_tp_ctl_reg(struct intel_encoder *encoder,
|
||||
@ -27,8 +28,22 @@ void intel_ddi_fdi_post_disable(struct intel_atomic_state *state,
|
||||
struct intel_encoder *intel_encoder,
|
||||
const struct intel_crtc_state *old_crtc_state,
|
||||
const struct drm_connector_state *old_conn_state);
|
||||
void hsw_fdi_link_train(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state);
|
||||
void intel_ddi_enable_clock(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state);
|
||||
void intel_ddi_get_clock(struct intel_encoder *encoder,
|
||||
struct intel_crtc_state *crtc_state,
|
||||
struct intel_shared_dpll *pll);
|
||||
void hsw_ddi_enable_clock(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state);
|
||||
void hsw_ddi_disable_clock(struct intel_encoder *encoder);
|
||||
bool hsw_ddi_is_clock_enabled(struct intel_encoder *encoder);
|
||||
void hsw_ddi_get_config(struct intel_encoder *encoder,
|
||||
struct intel_crtc_state *crtc_state);
|
||||
struct intel_shared_dpll *icl_ddi_combo_get_pll(struct intel_encoder *encoder);
|
||||
void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state);
|
||||
void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
|
||||
enum port port);
|
||||
void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port);
|
||||
bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe);
|
||||
void intel_ddi_enable_transcoder_func(struct intel_encoder *encoder,
|
||||
@ -40,8 +55,6 @@ void intel_ddi_disable_pipe_clock(const struct intel_crtc_state *crtc_state);
|
||||
void intel_ddi_set_dp_msa(const struct intel_crtc_state *crtc_state,
|
||||
const struct drm_connector_state *conn_state);
|
||||
bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
|
||||
void intel_ddi_get_config(struct intel_encoder *encoder,
|
||||
struct intel_crtc_state *pipe_config);
|
||||
void intel_ddi_set_vc_payload_alloc(const struct intel_crtc_state *crtc_state,
|
||||
bool state);
|
||||
void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv,
|
||||
@ -53,6 +66,6 @@ u32 ddi_signal_levels(struct intel_dp *intel_dp,
|
||||
int intel_ddi_toggle_hdcp_bits(struct intel_encoder *intel_encoder,
|
||||
enum transcoder cpu_transcoder,
|
||||
bool enable, u32 hdcp_mask);
|
||||
void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder);
|
||||
void intel_ddi_sanitize_encoder_pll_mapping(struct intel_encoder *encoder);
|
||||
|
||||
#endif /* __INTEL_DDI_H__ */
|
||||
|
1394
drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c
Normal file
1394
drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c
Normal file
File diff suppressed because it is too large
Load Diff
100
drivers/gpu/drm/i915/display/intel_ddi_buf_trans.h
Normal file
100
drivers/gpu/drm/i915/display/intel_ddi_buf_trans.h
Normal file
@ -0,0 +1,100 @@
|
||||
/* SPDX-License-Identifier: MIT */
|
||||
/*
|
||||
* Copyright © 2020 Intel Corporation
|
||||
*/
|
||||
|
||||
#ifndef _INTEL_DDI_BUF_TRANS_H_
|
||||
#define _INTEL_DDI_BUF_TRANS_H_
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
struct drm_i915_private;
|
||||
struct intel_encoder;
|
||||
struct intel_crtc_state;
|
||||
|
||||
struct ddi_buf_trans {
|
||||
u32 trans1; /* balance leg enable, de-emph level */
|
||||
u32 trans2; /* vref sel, vswing */
|
||||
u8 i_boost; /* SKL: I_boost; valid: 0x0, 0x1, 0x3, 0x7 */
|
||||
};
|
||||
|
||||
struct bxt_ddi_buf_trans {
|
||||
u8 margin; /* swing value */
|
||||
u8 scale; /* scale value */
|
||||
u8 enable; /* scale enable */
|
||||
u8 deemphasis;
|
||||
};
|
||||
|
||||
struct cnl_ddi_buf_trans {
|
||||
u8 dw2_swing_sel;
|
||||
u8 dw7_n_scalar;
|
||||
u8 dw4_cursor_coeff;
|
||||
u8 dw4_post_cursor_2;
|
||||
u8 dw4_post_cursor_1;
|
||||
};
|
||||
|
||||
struct icl_mg_phy_ddi_buf_trans {
|
||||
u32 cri_txdeemph_override_11_6;
|
||||
u32 cri_txdeemph_override_5_0;
|
||||
u32 cri_txdeemph_override_17_12;
|
||||
};
|
||||
|
||||
struct tgl_dkl_phy_ddi_buf_trans {
|
||||
u32 dkl_vswing_control;
|
||||
u32 dkl_preshoot_control;
|
||||
u32 dkl_de_emphasis_control;
|
||||
};
|
||||
|
||||
bool is_hobl_buf_trans(const struct cnl_ddi_buf_trans *table);
|
||||
|
||||
int intel_ddi_hdmi_num_entries(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
int *default_entry);
|
||||
|
||||
const struct ddi_buf_trans *
|
||||
intel_ddi_get_buf_trans_edp(struct intel_encoder *encoder, int *n_entries);
|
||||
const struct ddi_buf_trans *
|
||||
intel_ddi_get_buf_trans_fdi(struct drm_i915_private *dev_priv,
|
||||
int *n_entries);
|
||||
const struct ddi_buf_trans *
|
||||
intel_ddi_get_buf_trans_hdmi(struct intel_encoder *encoder,
|
||||
int *n_entries);
|
||||
const struct ddi_buf_trans *
|
||||
intel_ddi_get_buf_trans_dp(struct intel_encoder *encoder, int *n_entries);
|
||||
|
||||
const struct bxt_ddi_buf_trans *
|
||||
bxt_get_buf_trans(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
int *n_entries);
|
||||
|
||||
const struct cnl_ddi_buf_trans *
|
||||
tgl_get_combo_buf_trans(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
int *n_entries);
|
||||
const struct tgl_dkl_phy_ddi_buf_trans *
|
||||
tgl_get_dkl_buf_trans(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
int *n_entries);
|
||||
const struct cnl_ddi_buf_trans *
|
||||
jsl_get_combo_buf_trans(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
int *n_entries);
|
||||
const struct cnl_ddi_buf_trans *
|
||||
ehl_get_combo_buf_trans(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
int *n_entries);
|
||||
const struct cnl_ddi_buf_trans *
|
||||
icl_get_combo_buf_trans(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
int *n_entries);
|
||||
const struct icl_mg_phy_ddi_buf_trans *
|
||||
icl_get_mg_buf_trans(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
int *n_entries);
|
||||
|
||||
const struct cnl_ddi_buf_trans *
|
||||
cnl_get_buf_trans(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
int *n_entries);
|
||||
|
||||
#endif
|
File diff suppressed because it is too large
Load Diff
@ -52,6 +52,7 @@ struct intel_crtc_state;
|
||||
struct intel_digital_port;
|
||||
struct intel_dp;
|
||||
struct intel_encoder;
|
||||
struct intel_initial_plane_config;
|
||||
struct intel_load_detect_pipe;
|
||||
struct intel_plane;
|
||||
struct intel_plane_state;
|
||||
@ -352,11 +353,6 @@ enum phy_fia {
|
||||
for_each_cpu_transcoder(__dev_priv, __t) \
|
||||
for_each_if ((__mask) & BIT(__t))
|
||||
|
||||
#define for_each_universal_plane(__dev_priv, __pipe, __p) \
|
||||
for ((__p) = 0; \
|
||||
(__p) < RUNTIME_INFO(__dev_priv)->num_sprites[(__pipe)] + 1; \
|
||||
(__p)++)
|
||||
|
||||
#define for_each_sprite(__dev_priv, __p, __s) \
|
||||
for ((__s) = 0; \
|
||||
(__s) < RUNTIME_INFO(__dev_priv)->num_sprites[(__p)]; \
|
||||
@ -417,10 +413,19 @@ enum phy_fia {
|
||||
for_each_if((encoder_mask) & \
|
||||
drm_encoder_mask(&intel_encoder->base))
|
||||
|
||||
#define for_each_intel_encoder_mask_with_psr(dev, intel_encoder, encoder_mask) \
|
||||
list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \
|
||||
for_each_if(((encoder_mask) & drm_encoder_mask(&(intel_encoder)->base)) && \
|
||||
intel_encoder_can_psr(intel_encoder))
|
||||
|
||||
#define for_each_intel_dp(dev, intel_encoder) \
|
||||
for_each_intel_encoder(dev, intel_encoder) \
|
||||
for_each_if(intel_encoder_is_dp(intel_encoder))
|
||||
|
||||
#define for_each_intel_encoder_with_psr(dev, intel_encoder) \
|
||||
for_each_intel_encoder((dev), (intel_encoder)) \
|
||||
for_each_if(intel_encoder_can_psr(intel_encoder))
|
||||
|
||||
#define for_each_intel_connector_iter(intel_connector, iter) \
|
||||
while ((intel_connector = to_intel_connector(drm_connector_list_iter_next(iter))))
|
||||
|
||||
@ -507,8 +512,6 @@ void intel_link_compute_m_n(u16 bpp, int nlanes,
|
||||
int pixel_clock, int link_clock,
|
||||
struct intel_link_m_n *m_n,
|
||||
bool constant_n, bool fec_enable);
|
||||
bool is_ccs_modifier(u64 modifier);
|
||||
int intel_main_to_aux_plane(const struct drm_framebuffer *fb, int main_plane);
|
||||
void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv);
|
||||
u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
|
||||
u32 pixel_format, u64 modifier);
|
||||
@ -586,9 +589,6 @@ void intel_cleanup_plane_fb(struct drm_plane *plane,
|
||||
void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
|
||||
enum pipe pipe);
|
||||
|
||||
int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
|
||||
const struct dpll *dpll);
|
||||
void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe);
|
||||
int lpt_get_iclkip(struct drm_i915_private *dev_priv);
|
||||
bool intel_fuzzy_clock_check(int clock1, int clock2);
|
||||
|
||||
@ -613,25 +613,8 @@ enum intel_display_power_domain
|
||||
intel_legacy_aux_to_power_domain(enum aux_ch aux_ch);
|
||||
void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
|
||||
struct intel_crtc_state *crtc_state);
|
||||
|
||||
u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_center);
|
||||
void skl_scaler_disable(const struct intel_crtc_state *old_crtc_state);
|
||||
u32 skl_scaler_get_filter_select(enum drm_scaling_filter filter, int set);
|
||||
void skl_scaler_setup_filter(struct drm_i915_private *dev_priv, enum pipe pipe,
|
||||
int id, int set, enum drm_scaling_filter filter);
|
||||
void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state);
|
||||
u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
|
||||
const struct intel_plane_state *plane_state);
|
||||
u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state);
|
||||
u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
|
||||
const struct intel_plane_state *plane_state);
|
||||
u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state);
|
||||
u32 skl_plane_stride(const struct intel_plane_state *plane_state,
|
||||
int plane);
|
||||
int skl_check_plane_surface(struct intel_plane_state *plane_state);
|
||||
int skl_calc_main_surface_offset(const struct intel_plane_state *plane_state,
|
||||
int *x, int *y, u32 *offset);
|
||||
int skl_format_to_fourcc(int format, bool rgb_order, bool alpha);
|
||||
|
||||
int bdw_get_pipemisc_bpp(struct intel_crtc *crtc);
|
||||
unsigned int intel_plane_fence_y_offset(const struct intel_plane_state *plane_state);
|
||||
|
||||
@ -653,12 +636,21 @@ void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state);
|
||||
struct intel_encoder *
|
||||
intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
|
||||
const struct intel_crtc_state *crtc_state);
|
||||
|
||||
unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
|
||||
int color_plane);
|
||||
void intel_fb_plane_get_subsampling(int *hsub, int *vsub,
|
||||
const struct drm_framebuffer *fb,
|
||||
int color_plane);
|
||||
u32 intel_plane_adjust_aligned_offset(int *x, int *y,
|
||||
const struct intel_plane_state *state,
|
||||
int color_plane,
|
||||
u32 old_offset, u32 new_offset);
|
||||
unsigned int intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane);
|
||||
unsigned int intel_tile_height(const struct drm_framebuffer *fb, int color_plane);
|
||||
|
||||
void intel_display_driver_register(struct drm_i915_private *i915);
|
||||
void intel_display_driver_unregister(struct drm_i915_private *i915);
|
||||
|
||||
/* modesetting */
|
||||
void intel_modeset_init_hw(struct drm_i915_private *i915);
|
||||
|
@ -249,12 +249,11 @@ static int i915_psr_sink_status_show(struct seq_file *m, void *data)
|
||||
"sink internal error",
|
||||
};
|
||||
struct drm_connector *connector = m->private;
|
||||
struct drm_i915_private *dev_priv = to_i915(connector->dev);
|
||||
struct intel_dp *intel_dp =
|
||||
intel_attached_dp(to_intel_connector(connector));
|
||||
int ret;
|
||||
|
||||
if (!CAN_PSR(dev_priv)) {
|
||||
if (!CAN_PSR(intel_dp)) {
|
||||
seq_puts(m, "PSR Unsupported\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
@ -280,12 +279,13 @@ static int i915_psr_sink_status_show(struct seq_file *m, void *data)
|
||||
DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
|
||||
|
||||
static void
|
||||
psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
|
||||
psr_source_status(struct intel_dp *intel_dp, struct seq_file *m)
|
||||
{
|
||||
u32 val, status_val;
|
||||
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
|
||||
const char *status = "unknown";
|
||||
u32 val, status_val;
|
||||
|
||||
if (dev_priv->psr.psr2_enabled) {
|
||||
if (intel_dp->psr.psr2_enabled) {
|
||||
static const char * const live_status[] = {
|
||||
"IDLE",
|
||||
"CAPTURE",
|
||||
@ -300,7 +300,7 @@ psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
|
||||
"TG_ON"
|
||||
};
|
||||
val = intel_de_read(dev_priv,
|
||||
EDP_PSR2_STATUS(dev_priv->psr.transcoder));
|
||||
EDP_PSR2_STATUS(intel_dp->psr.transcoder));
|
||||
status_val = (val & EDP_PSR2_STATUS_STATE_MASK) >>
|
||||
EDP_PSR2_STATUS_STATE_SHIFT;
|
||||
if (status_val < ARRAY_SIZE(live_status))
|
||||
@ -317,7 +317,7 @@ psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
|
||||
"SRDENT_ON",
|
||||
};
|
||||
val = intel_de_read(dev_priv,
|
||||
EDP_PSR_STATUS(dev_priv->psr.transcoder));
|
||||
EDP_PSR_STATUS(intel_dp->psr.transcoder));
|
||||
status_val = (val & EDP_PSR_STATUS_STATE_MASK) >>
|
||||
EDP_PSR_STATUS_STATE_SHIFT;
|
||||
if (status_val < ARRAY_SIZE(live_status))
|
||||
@ -327,21 +327,18 @@ psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
|
||||
seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val);
|
||||
}
|
||||
|
||||
static int i915_edp_psr_status(struct seq_file *m, void *data)
|
||||
static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = node_to_i915(m->private);
|
||||
struct i915_psr *psr = &dev_priv->psr;
|
||||
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
|
||||
struct intel_psr *psr = &intel_dp->psr;
|
||||
intel_wakeref_t wakeref;
|
||||
const char *status;
|
||||
bool enabled;
|
||||
u32 val;
|
||||
|
||||
if (!HAS_PSR(dev_priv))
|
||||
return -ENODEV;
|
||||
|
||||
seq_printf(m, "Sink support: %s", yesno(psr->sink_support));
|
||||
if (psr->dp)
|
||||
seq_printf(m, " [0x%02x]", psr->dp->psr_dpcd[0]);
|
||||
if (psr->sink_support)
|
||||
seq_printf(m, " [0x%02x]", intel_dp->psr_dpcd[0]);
|
||||
seq_puts(m, "\n");
|
||||
|
||||
if (!psr->sink_support)
|
||||
@ -365,16 +362,16 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
|
||||
|
||||
if (psr->psr2_enabled) {
|
||||
val = intel_de_read(dev_priv,
|
||||
EDP_PSR2_CTL(dev_priv->psr.transcoder));
|
||||
EDP_PSR2_CTL(intel_dp->psr.transcoder));
|
||||
enabled = val & EDP_PSR2_ENABLE;
|
||||
} else {
|
||||
val = intel_de_read(dev_priv,
|
||||
EDP_PSR_CTL(dev_priv->psr.transcoder));
|
||||
EDP_PSR_CTL(intel_dp->psr.transcoder));
|
||||
enabled = val & EDP_PSR_ENABLE;
|
||||
}
|
||||
seq_printf(m, "Source PSR ctl: %s [0x%08x]\n",
|
||||
enableddisabled(enabled), val);
|
||||
psr_source_status(dev_priv, m);
|
||||
psr_source_status(intel_dp, m);
|
||||
seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
|
||||
psr->busy_frontbuffer_bits);
|
||||
|
||||
@ -383,7 +380,7 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
|
||||
*/
|
||||
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
|
||||
val = intel_de_read(dev_priv,
|
||||
EDP_PSR_PERF_CNT(dev_priv->psr.transcoder));
|
||||
EDP_PSR_PERF_CNT(intel_dp->psr.transcoder));
|
||||
val &= EDP_PSR_PERF_CNT_MASK;
|
||||
seq_printf(m, "Performance counter: %u\n", val);
|
||||
}
|
||||
@ -404,7 +401,7 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
|
||||
*/
|
||||
for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) {
|
||||
val = intel_de_read(dev_priv,
|
||||
PSR2_SU_STATUS(dev_priv->psr.transcoder, frame));
|
||||
PSR2_SU_STATUS(intel_dp->psr.transcoder, frame));
|
||||
su_frames_val[frame / 3] = val;
|
||||
}
|
||||
|
||||
@ -430,23 +427,50 @@ unlock:
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int i915_edp_psr_status(struct seq_file *m, void *data)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = node_to_i915(m->private);
|
||||
struct intel_dp *intel_dp = NULL;
|
||||
struct intel_encoder *encoder;
|
||||
|
||||
if (!HAS_PSR(dev_priv))
|
||||
return -ENODEV;
|
||||
|
||||
/* Find the first EDP which supports PSR */
|
||||
for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
|
||||
intel_dp = enc_to_intel_dp(encoder);
|
||||
break;
|
||||
}
|
||||
|
||||
if (!intel_dp)
|
||||
return -ENODEV;
|
||||
|
||||
return intel_psr_status(m, intel_dp);
|
||||
}
|
||||
|
||||
static int
|
||||
i915_edp_psr_debug_set(void *data, u64 val)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = data;
|
||||
struct intel_encoder *encoder;
|
||||
intel_wakeref_t wakeref;
|
||||
int ret;
|
||||
int ret = -ENODEV;
|
||||
|
||||
if (!CAN_PSR(dev_priv))
|
||||
return -ENODEV;
|
||||
if (!HAS_PSR(dev_priv))
|
||||
return ret;
|
||||
|
||||
drm_dbg_kms(&dev_priv->drm, "Setting PSR debug to %llx\n", val);
|
||||
for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
|
||||
|
||||
wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
|
||||
drm_dbg_kms(&dev_priv->drm, "Setting PSR debug to %llx\n", val);
|
||||
|
||||
ret = intel_psr_debug_set(dev_priv, val);
|
||||
wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
|
||||
|
||||
intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
|
||||
// TODO: split to each transcoder's PSR debug state
|
||||
ret = intel_psr_debug_set(intel_dp, val);
|
||||
|
||||
intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -455,12 +479,20 @@ static int
|
||||
i915_edp_psr_debug_get(void *data, u64 *val)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = data;
|
||||
struct intel_encoder *encoder;
|
||||
|
||||
if (!CAN_PSR(dev_priv))
|
||||
if (!HAS_PSR(dev_priv))
|
||||
return -ENODEV;
|
||||
|
||||
*val = READ_ONCE(dev_priv->psr.debug);
|
||||
return 0;
|
||||
for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
|
||||
|
||||
// TODO: split to each transcoder's PSR debug state
|
||||
*val = READ_ONCE(intel_dp->psr.debug);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
|
||||
@ -1062,8 +1094,8 @@ static int i915_shared_dplls_info(struct seq_file *m, void *unused)
|
||||
|
||||
seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name,
|
||||
pll->info->id);
|
||||
seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n",
|
||||
pll->state.crtc_mask, pll->active_mask, yesno(pll->on));
|
||||
seq_printf(m, " pipe_mask: 0x%x, active: 0x%x, on: %s\n",
|
||||
pll->state.pipe_mask, pll->active_mask, yesno(pll->on));
|
||||
seq_printf(m, " tracked hardware state:\n");
|
||||
seq_printf(m, " dpll: 0x%08x\n", pll->state.hw_state.dpll);
|
||||
seq_printf(m, " dpll_md: 0x%08x\n",
|
||||
@ -1229,9 +1261,6 @@ static void drrs_status_per_crtc(struct seq_file *m,
|
||||
/* disable_drrs() will make drrs->dp NULL */
|
||||
if (!drrs->dp) {
|
||||
seq_puts(m, "Idleness DRRS: Disabled\n");
|
||||
if (dev_priv->psr.enabled)
|
||||
seq_puts(m,
|
||||
"\tAs PSR is enabled, DRRS is not enabled\n");
|
||||
mutex_unlock(&drrs->mutex);
|
||||
return;
|
||||
}
|
||||
@ -2165,19 +2194,40 @@ DEFINE_SHOW_ATTRIBUTE(i915_panel);
|
||||
static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
|
||||
{
|
||||
struct drm_connector *connector = m->private;
|
||||
struct drm_i915_private *i915 = to_i915(connector->dev);
|
||||
struct intel_connector *intel_connector = to_intel_connector(connector);
|
||||
int ret;
|
||||
|
||||
if (connector->status != connector_status_connected)
|
||||
return -ENODEV;
|
||||
ret = drm_modeset_lock_single_interruptible(&i915->drm.mode_config.connection_mutex);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (!connector->encoder || connector->status != connector_status_connected) {
|
||||
ret = -ENODEV;
|
||||
goto out;
|
||||
}
|
||||
|
||||
seq_printf(m, "%s:%d HDCP version: ", connector->name,
|
||||
connector->base.id);
|
||||
intel_hdcp_info(m, intel_connector);
|
||||
|
||||
return 0;
|
||||
out:
|
||||
drm_modeset_unlock(&i915->drm.mode_config.connection_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
|
||||
|
||||
static int i915_psr_status_show(struct seq_file *m, void *data)
|
||||
{
|
||||
struct drm_connector *connector = m->private;
|
||||
struct intel_dp *intel_dp =
|
||||
intel_attached_dp(to_intel_connector(connector));
|
||||
|
||||
return intel_psr_status(m, intel_dp);
|
||||
}
|
||||
DEFINE_SHOW_ATTRIBUTE(i915_psr_status);
|
||||
|
||||
#define LPSP_CAPABLE(COND) (COND ? seq_puts(m, "LPSP: capable\n") : \
|
||||
seq_puts(m, "LPSP: incapable\n"))
|
||||
|
||||
@ -2353,6 +2403,12 @@ int intel_connector_debugfs_add(struct drm_connector *connector)
|
||||
connector, &i915_psr_sink_status_fops);
|
||||
}
|
||||
|
||||
if (HAS_PSR(dev_priv) &&
|
||||
connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
|
||||
debugfs_create_file("i915_psr_status", 0444, root,
|
||||
connector, &i915_psr_status_fops);
|
||||
}
|
||||
|
||||
if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
|
||||
connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
|
||||
connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) {
|
||||
|
@ -2886,24 +2886,24 @@ intel_display_power_put_mask_in_set(struct drm_i915_private *i915,
|
||||
BIT_ULL(POWER_DOMAIN_PIPE_B) | \
|
||||
BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
|
||||
BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
|
||||
BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
|
||||
BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \
|
||||
BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) | \
|
||||
BIT_ULL(POWER_DOMAIN_PORT_DDI_G_LANES) | \
|
||||
BIT_ULL(POWER_DOMAIN_PORT_DDI_H_LANES) | \
|
||||
BIT_ULL(POWER_DOMAIN_PORT_DDI_I_LANES) | \
|
||||
BIT_ULL(POWER_DOMAIN_AUX_D) | \
|
||||
BIT_ULL(POWER_DOMAIN_AUX_E) | \
|
||||
BIT_ULL(POWER_DOMAIN_AUX_F) | \
|
||||
BIT_ULL(POWER_DOMAIN_AUX_G) | \
|
||||
BIT_ULL(POWER_DOMAIN_AUX_H) | \
|
||||
BIT_ULL(POWER_DOMAIN_AUX_I) | \
|
||||
BIT_ULL(POWER_DOMAIN_AUX_D_TBT) | \
|
||||
BIT_ULL(POWER_DOMAIN_AUX_E_TBT) | \
|
||||
BIT_ULL(POWER_DOMAIN_AUX_F_TBT) | \
|
||||
BIT_ULL(POWER_DOMAIN_AUX_G_TBT) | \
|
||||
BIT_ULL(POWER_DOMAIN_AUX_H_TBT) | \
|
||||
BIT_ULL(POWER_DOMAIN_AUX_I_TBT) | \
|
||||
BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC1) | \
|
||||
BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC2) | \
|
||||
BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC3) | \
|
||||
BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC4) | \
|
||||
BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC5) | \
|
||||
BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC6) | \
|
||||
BIT_ULL(POWER_DOMAIN_AUX_USBC1) | \
|
||||
BIT_ULL(POWER_DOMAIN_AUX_USBC2) | \
|
||||
BIT_ULL(POWER_DOMAIN_AUX_USBC3) | \
|
||||
BIT_ULL(POWER_DOMAIN_AUX_USBC4) | \
|
||||
BIT_ULL(POWER_DOMAIN_AUX_USBC5) | \
|
||||
BIT_ULL(POWER_DOMAIN_AUX_USBC6) | \
|
||||
BIT_ULL(POWER_DOMAIN_AUX_TBT1) | \
|
||||
BIT_ULL(POWER_DOMAIN_AUX_TBT2) | \
|
||||
BIT_ULL(POWER_DOMAIN_AUX_TBT3) | \
|
||||
BIT_ULL(POWER_DOMAIN_AUX_TBT4) | \
|
||||
BIT_ULL(POWER_DOMAIN_AUX_TBT5) | \
|
||||
BIT_ULL(POWER_DOMAIN_AUX_TBT6) | \
|
||||
BIT_ULL(POWER_DOMAIN_VGA) | \
|
||||
BIT_ULL(POWER_DOMAIN_AUDIO) | \
|
||||
BIT_ULL(POWER_DOMAIN_INIT))
|
||||
@ -2921,18 +2921,12 @@ intel_display_power_put_mask_in_set(struct drm_i915_private *i915,
|
||||
BIT_ULL(POWER_DOMAIN_AUX_C) | \
|
||||
BIT_ULL(POWER_DOMAIN_INIT))
|
||||
|
||||
#define TGL_DDI_IO_D_TC1_POWER_DOMAINS ( \
|
||||
BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO))
|
||||
#define TGL_DDI_IO_E_TC2_POWER_DOMAINS ( \
|
||||
BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO))
|
||||
#define TGL_DDI_IO_F_TC3_POWER_DOMAINS ( \
|
||||
BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO))
|
||||
#define TGL_DDI_IO_G_TC4_POWER_DOMAINS ( \
|
||||
BIT_ULL(POWER_DOMAIN_PORT_DDI_G_IO))
|
||||
#define TGL_DDI_IO_H_TC5_POWER_DOMAINS ( \
|
||||
BIT_ULL(POWER_DOMAIN_PORT_DDI_H_IO))
|
||||
#define TGL_DDI_IO_I_TC6_POWER_DOMAINS ( \
|
||||
BIT_ULL(POWER_DOMAIN_PORT_DDI_I_IO))
|
||||
#define TGL_DDI_IO_TC1_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC1)
|
||||
#define TGL_DDI_IO_TC2_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC2)
|
||||
#define TGL_DDI_IO_TC3_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC3)
|
||||
#define TGL_DDI_IO_TC4_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC4)
|
||||
#define TGL_DDI_IO_TC5_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC5)
|
||||
#define TGL_DDI_IO_TC6_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC6)
|
||||
|
||||
#define TGL_AUX_A_IO_POWER_DOMAINS ( \
|
||||
BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \
|
||||
@ -2941,44 +2935,34 @@ intel_display_power_put_mask_in_set(struct drm_i915_private *i915,
|
||||
BIT_ULL(POWER_DOMAIN_AUX_B))
|
||||
#define TGL_AUX_C_IO_POWER_DOMAINS ( \
|
||||
BIT_ULL(POWER_DOMAIN_AUX_C))
|
||||
#define TGL_AUX_D_TC1_IO_POWER_DOMAINS ( \
|
||||
BIT_ULL(POWER_DOMAIN_AUX_D))
|
||||
#define TGL_AUX_E_TC2_IO_POWER_DOMAINS ( \
|
||||
BIT_ULL(POWER_DOMAIN_AUX_E))
|
||||
#define TGL_AUX_F_TC3_IO_POWER_DOMAINS ( \
|
||||
BIT_ULL(POWER_DOMAIN_AUX_F))
|
||||
#define TGL_AUX_G_TC4_IO_POWER_DOMAINS ( \
|
||||
BIT_ULL(POWER_DOMAIN_AUX_G))
|
||||
#define TGL_AUX_H_TC5_IO_POWER_DOMAINS ( \
|
||||
BIT_ULL(POWER_DOMAIN_AUX_H))
|
||||
#define TGL_AUX_I_TC6_IO_POWER_DOMAINS ( \
|
||||
BIT_ULL(POWER_DOMAIN_AUX_I))
|
||||
#define TGL_AUX_D_TBT1_IO_POWER_DOMAINS ( \
|
||||
BIT_ULL(POWER_DOMAIN_AUX_D_TBT))
|
||||
#define TGL_AUX_E_TBT2_IO_POWER_DOMAINS ( \
|
||||
BIT_ULL(POWER_DOMAIN_AUX_E_TBT))
|
||||
#define TGL_AUX_F_TBT3_IO_POWER_DOMAINS ( \
|
||||
BIT_ULL(POWER_DOMAIN_AUX_F_TBT))
|
||||
#define TGL_AUX_G_TBT4_IO_POWER_DOMAINS ( \
|
||||
BIT_ULL(POWER_DOMAIN_AUX_G_TBT))
|
||||
#define TGL_AUX_H_TBT5_IO_POWER_DOMAINS ( \
|
||||
BIT_ULL(POWER_DOMAIN_AUX_H_TBT))
|
||||
#define TGL_AUX_I_TBT6_IO_POWER_DOMAINS ( \
|
||||
BIT_ULL(POWER_DOMAIN_AUX_I_TBT))
|
||||
|
||||
#define TGL_AUX_IO_USBC1_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC1)
|
||||
#define TGL_AUX_IO_USBC2_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC2)
|
||||
#define TGL_AUX_IO_USBC3_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC3)
|
||||
#define TGL_AUX_IO_USBC4_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC4)
|
||||
#define TGL_AUX_IO_USBC5_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC5)
|
||||
#define TGL_AUX_IO_USBC6_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC6)
|
||||
|
||||
#define TGL_AUX_IO_TBT1_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT1)
|
||||
#define TGL_AUX_IO_TBT2_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT2)
|
||||
#define TGL_AUX_IO_TBT3_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT3)
|
||||
#define TGL_AUX_IO_TBT4_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT4)
|
||||
#define TGL_AUX_IO_TBT5_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT5)
|
||||
#define TGL_AUX_IO_TBT6_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT6)
|
||||
|
||||
#define TGL_TC_COLD_OFF_POWER_DOMAINS ( \
|
||||
BIT_ULL(POWER_DOMAIN_AUX_D) | \
|
||||
BIT_ULL(POWER_DOMAIN_AUX_E) | \
|
||||
BIT_ULL(POWER_DOMAIN_AUX_F) | \
|
||||
BIT_ULL(POWER_DOMAIN_AUX_G) | \
|
||||
BIT_ULL(POWER_DOMAIN_AUX_H) | \
|
||||
BIT_ULL(POWER_DOMAIN_AUX_I) | \
|
||||
BIT_ULL(POWER_DOMAIN_AUX_D_TBT) | \
|
||||
BIT_ULL(POWER_DOMAIN_AUX_E_TBT) | \
|
||||
BIT_ULL(POWER_DOMAIN_AUX_F_TBT) | \
|
||||
BIT_ULL(POWER_DOMAIN_AUX_G_TBT) | \
|
||||
BIT_ULL(POWER_DOMAIN_AUX_H_TBT) | \
|
||||
BIT_ULL(POWER_DOMAIN_AUX_I_TBT) | \
|
||||
BIT_ULL(POWER_DOMAIN_AUX_USBC1) | \
|
||||
BIT_ULL(POWER_DOMAIN_AUX_USBC2) | \
|
||||
BIT_ULL(POWER_DOMAIN_AUX_USBC3) | \
|
||||
BIT_ULL(POWER_DOMAIN_AUX_USBC4) | \
|
||||
BIT_ULL(POWER_DOMAIN_AUX_USBC5) | \
|
||||
BIT_ULL(POWER_DOMAIN_AUX_USBC6) | \
|
||||
BIT_ULL(POWER_DOMAIN_AUX_TBT1) | \
|
||||
BIT_ULL(POWER_DOMAIN_AUX_TBT2) | \
|
||||
BIT_ULL(POWER_DOMAIN_AUX_TBT3) | \
|
||||
BIT_ULL(POWER_DOMAIN_AUX_TBT4) | \
|
||||
BIT_ULL(POWER_DOMAIN_AUX_TBT5) | \
|
||||
BIT_ULL(POWER_DOMAIN_AUX_TBT6) | \
|
||||
BIT_ULL(POWER_DOMAIN_TC_COLD_OFF))
|
||||
|
||||
#define RKL_PW_4_POWER_DOMAINS ( \
|
||||
@ -2994,10 +2978,10 @@ intel_display_power_put_mask_in_set(struct drm_i915_private *i915,
|
||||
BIT_ULL(POWER_DOMAIN_AUDIO) | \
|
||||
BIT_ULL(POWER_DOMAIN_VGA) | \
|
||||
BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
|
||||
BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
|
||||
BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \
|
||||
BIT_ULL(POWER_DOMAIN_AUX_D) | \
|
||||
BIT_ULL(POWER_DOMAIN_AUX_E) | \
|
||||
BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC1) | \
|
||||
BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC2) | \
|
||||
BIT_ULL(POWER_DOMAIN_AUX_USBC1) | \
|
||||
BIT_ULL(POWER_DOMAIN_AUX_USBC2) | \
|
||||
BIT_ULL(POWER_DOMAIN_INIT))
|
||||
|
||||
/*
|
||||
@ -4145,8 +4129,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
|
||||
}
|
||||
},
|
||||
{
|
||||
.name = "DDI D TC1 IO",
|
||||
.domains = TGL_DDI_IO_D_TC1_POWER_DOMAINS,
|
||||
.name = "DDI IO TC1",
|
||||
.domains = TGL_DDI_IO_TC1_POWER_DOMAINS,
|
||||
.ops = &hsw_power_well_ops,
|
||||
.id = DISP_PW_ID_NONE,
|
||||
{
|
||||
@ -4155,8 +4139,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
|
||||
},
|
||||
},
|
||||
{
|
||||
.name = "DDI E TC2 IO",
|
||||
.domains = TGL_DDI_IO_E_TC2_POWER_DOMAINS,
|
||||
.name = "DDI IO TC2",
|
||||
.domains = TGL_DDI_IO_TC2_POWER_DOMAINS,
|
||||
.ops = &hsw_power_well_ops,
|
||||
.id = DISP_PW_ID_NONE,
|
||||
{
|
||||
@ -4165,8 +4149,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
|
||||
},
|
||||
},
|
||||
{
|
||||
.name = "DDI F TC3 IO",
|
||||
.domains = TGL_DDI_IO_F_TC3_POWER_DOMAINS,
|
||||
.name = "DDI IO TC3",
|
||||
.domains = TGL_DDI_IO_TC3_POWER_DOMAINS,
|
||||
.ops = &hsw_power_well_ops,
|
||||
.id = DISP_PW_ID_NONE,
|
||||
{
|
||||
@ -4175,8 +4159,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
|
||||
},
|
||||
},
|
||||
{
|
||||
.name = "DDI G TC4 IO",
|
||||
.domains = TGL_DDI_IO_G_TC4_POWER_DOMAINS,
|
||||
.name = "DDI IO TC4",
|
||||
.domains = TGL_DDI_IO_TC4_POWER_DOMAINS,
|
||||
.ops = &hsw_power_well_ops,
|
||||
.id = DISP_PW_ID_NONE,
|
||||
{
|
||||
@ -4185,8 +4169,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
|
||||
},
|
||||
},
|
||||
{
|
||||
.name = "DDI H TC5 IO",
|
||||
.domains = TGL_DDI_IO_H_TC5_POWER_DOMAINS,
|
||||
.name = "DDI IO TC5",
|
||||
.domains = TGL_DDI_IO_TC5_POWER_DOMAINS,
|
||||
.ops = &hsw_power_well_ops,
|
||||
.id = DISP_PW_ID_NONE,
|
||||
{
|
||||
@ -4195,8 +4179,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
|
||||
},
|
||||
},
|
||||
{
|
||||
.name = "DDI I TC6 IO",
|
||||
.domains = TGL_DDI_IO_I_TC6_POWER_DOMAINS,
|
||||
.name = "DDI IO TC6",
|
||||
.domains = TGL_DDI_IO_TC6_POWER_DOMAINS,
|
||||
.ops = &hsw_power_well_ops,
|
||||
.id = DISP_PW_ID_NONE,
|
||||
{
|
||||
@ -4241,8 +4225,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
|
||||
},
|
||||
},
|
||||
{
|
||||
.name = "AUX D TC1",
|
||||
.domains = TGL_AUX_D_TC1_IO_POWER_DOMAINS,
|
||||
.name = "AUX USBC1",
|
||||
.domains = TGL_AUX_IO_USBC1_POWER_DOMAINS,
|
||||
.ops = &icl_aux_power_well_ops,
|
||||
.id = DISP_PW_ID_NONE,
|
||||
{
|
||||
@ -4252,8 +4236,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
|
||||
},
|
||||
},
|
||||
{
|
||||
.name = "AUX E TC2",
|
||||
.domains = TGL_AUX_E_TC2_IO_POWER_DOMAINS,
|
||||
.name = "AUX USBC2",
|
||||
.domains = TGL_AUX_IO_USBC2_POWER_DOMAINS,
|
||||
.ops = &icl_aux_power_well_ops,
|
||||
.id = DISP_PW_ID_NONE,
|
||||
{
|
||||
@ -4263,8 +4247,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
|
||||
},
|
||||
},
|
||||
{
|
||||
.name = "AUX F TC3",
|
||||
.domains = TGL_AUX_F_TC3_IO_POWER_DOMAINS,
|
||||
.name = "AUX USBC3",
|
||||
.domains = TGL_AUX_IO_USBC3_POWER_DOMAINS,
|
||||
.ops = &icl_aux_power_well_ops,
|
||||
.id = DISP_PW_ID_NONE,
|
||||
{
|
||||
@ -4274,8 +4258,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
|
||||
},
|
||||
},
|
||||
{
|
||||
.name = "AUX G TC4",
|
||||
.domains = TGL_AUX_G_TC4_IO_POWER_DOMAINS,
|
||||
.name = "AUX USBC4",
|
||||
.domains = TGL_AUX_IO_USBC4_POWER_DOMAINS,
|
||||
.ops = &icl_aux_power_well_ops,
|
||||
.id = DISP_PW_ID_NONE,
|
||||
{
|
||||
@ -4285,8 +4269,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
|
||||
},
|
||||
},
|
||||
{
|
||||
.name = "AUX H TC5",
|
||||
.domains = TGL_AUX_H_TC5_IO_POWER_DOMAINS,
|
||||
.name = "AUX USBC5",
|
||||
.domains = TGL_AUX_IO_USBC5_POWER_DOMAINS,
|
||||
.ops = &icl_aux_power_well_ops,
|
||||
.id = DISP_PW_ID_NONE,
|
||||
{
|
||||
@ -4296,8 +4280,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
|
||||
},
|
||||
},
|
||||
{
|
||||
.name = "AUX I TC6",
|
||||
.domains = TGL_AUX_I_TC6_IO_POWER_DOMAINS,
|
||||
.name = "AUX USBC6",
|
||||
.domains = TGL_AUX_IO_USBC6_POWER_DOMAINS,
|
||||
.ops = &icl_aux_power_well_ops,
|
||||
.id = DISP_PW_ID_NONE,
|
||||
{
|
||||
@ -4307,8 +4291,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
|
||||
},
|
||||
},
|
||||
{
|
||||
.name = "AUX D TBT1",
|
||||
.domains = TGL_AUX_D_TBT1_IO_POWER_DOMAINS,
|
||||
.name = "AUX TBT1",
|
||||
.domains = TGL_AUX_IO_TBT1_POWER_DOMAINS,
|
||||
.ops = &icl_aux_power_well_ops,
|
||||
.id = DISP_PW_ID_NONE,
|
||||
{
|
||||
@ -4318,8 +4302,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
|
||||
},
|
||||
},
|
||||
{
|
||||
.name = "AUX E TBT2",
|
||||
.domains = TGL_AUX_E_TBT2_IO_POWER_DOMAINS,
|
||||
.name = "AUX TBT2",
|
||||
.domains = TGL_AUX_IO_TBT2_POWER_DOMAINS,
|
||||
.ops = &icl_aux_power_well_ops,
|
||||
.id = DISP_PW_ID_NONE,
|
||||
{
|
||||
@ -4329,8 +4313,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
|
||||
},
|
||||
},
|
||||
{
|
||||
.name = "AUX F TBT3",
|
||||
.domains = TGL_AUX_F_TBT3_IO_POWER_DOMAINS,
|
||||
.name = "AUX TBT3",
|
||||
.domains = TGL_AUX_IO_TBT3_POWER_DOMAINS,
|
||||
.ops = &icl_aux_power_well_ops,
|
||||
.id = DISP_PW_ID_NONE,
|
||||
{
|
||||
@ -4340,8 +4324,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
|
||||
},
|
||||
},
|
||||
{
|
||||
.name = "AUX G TBT4",
|
||||
.domains = TGL_AUX_G_TBT4_IO_POWER_DOMAINS,
|
||||
.name = "AUX TBT4",
|
||||
.domains = TGL_AUX_IO_TBT4_POWER_DOMAINS,
|
||||
.ops = &icl_aux_power_well_ops,
|
||||
.id = DISP_PW_ID_NONE,
|
||||
{
|
||||
@ -4351,8 +4335,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
|
||||
},
|
||||
},
|
||||
{
|
||||
.name = "AUX H TBT5",
|
||||
.domains = TGL_AUX_H_TBT5_IO_POWER_DOMAINS,
|
||||
.name = "AUX TBT5",
|
||||
.domains = TGL_AUX_IO_TBT5_POWER_DOMAINS,
|
||||
.ops = &icl_aux_power_well_ops,
|
||||
.id = DISP_PW_ID_NONE,
|
||||
{
|
||||
@ -4362,8 +4346,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
|
||||
},
|
||||
},
|
||||
{
|
||||
.name = "AUX I TBT6",
|
||||
.domains = TGL_AUX_I_TBT6_IO_POWER_DOMAINS,
|
||||
.name = "AUX TBT6",
|
||||
.domains = TGL_AUX_IO_TBT6_POWER_DOMAINS,
|
||||
.ops = &icl_aux_power_well_ops,
|
||||
.id = DISP_PW_ID_NONE,
|
||||
{
|
||||
@ -4471,8 +4455,8 @@ static const struct i915_power_well_desc rkl_power_wells[] = {
|
||||
}
|
||||
},
|
||||
{
|
||||
.name = "DDI D TC1 IO",
|
||||
.domains = TGL_DDI_IO_D_TC1_POWER_DOMAINS,
|
||||
.name = "DDI IO TC1",
|
||||
.domains = TGL_DDI_IO_TC1_POWER_DOMAINS,
|
||||
.ops = &hsw_power_well_ops,
|
||||
.id = DISP_PW_ID_NONE,
|
||||
{
|
||||
@ -4481,8 +4465,8 @@ static const struct i915_power_well_desc rkl_power_wells[] = {
|
||||
},
|
||||
},
|
||||
{
|
||||
.name = "DDI E TC2 IO",
|
||||
.domains = TGL_DDI_IO_E_TC2_POWER_DOMAINS,
|
||||
.name = "DDI IO TC2",
|
||||
.domains = TGL_DDI_IO_TC2_POWER_DOMAINS,
|
||||
.ops = &hsw_power_well_ops,
|
||||
.id = DISP_PW_ID_NONE,
|
||||
{
|
||||
@ -4511,8 +4495,8 @@ static const struct i915_power_well_desc rkl_power_wells[] = {
|
||||
},
|
||||
},
|
||||
{
|
||||
.name = "AUX D TC1",
|
||||
.domains = TGL_AUX_D_TC1_IO_POWER_DOMAINS,
|
||||
.name = "AUX USBC1",
|
||||
.domains = TGL_AUX_IO_USBC1_POWER_DOMAINS,
|
||||
.ops = &icl_aux_power_well_ops,
|
||||
.id = DISP_PW_ID_NONE,
|
||||
{
|
||||
@ -4521,8 +4505,8 @@ static const struct i915_power_well_desc rkl_power_wells[] = {
|
||||
},
|
||||
},
|
||||
{
|
||||
.name = "AUX E TC2",
|
||||
.domains = TGL_AUX_E_TC2_IO_POWER_DOMAINS,
|
||||
.name = "AUX USBC2",
|
||||
.domains = TGL_AUX_IO_USBC2_POWER_DOMAINS,
|
||||
.ops = &icl_aux_power_well_ops,
|
||||
.id = DISP_PW_ID_NONE,
|
||||
{
|
||||
@ -4689,7 +4673,7 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
|
||||
* The enabling order will be from lower to higher indexed wells,
|
||||
* the disabling order is reversed.
|
||||
*/
|
||||
if (IS_DG1(dev_priv)) {
|
||||
if (IS_ALDERLAKE_S(dev_priv) || IS_DG1(dev_priv)) {
|
||||
err = set_power_wells_mask(power_domains, tgl_power_wells,
|
||||
BIT_ULL(TGL_DISP_PW_TC_COLD_OFF));
|
||||
} else if (IS_ROCKETLAKE(dev_priv)) {
|
||||
@ -5317,17 +5301,25 @@ struct buddy_page_mask {
|
||||
|
||||
static const struct buddy_page_mask tgl_buddy_page_masks[] = {
|
||||
{ .num_channels = 1, .type = INTEL_DRAM_DDR4, .page_mask = 0xF },
|
||||
{ .num_channels = 1, .type = INTEL_DRAM_DDR5, .page_mask = 0xF },
|
||||
{ .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1C },
|
||||
{ .num_channels = 2, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x1C },
|
||||
{ .num_channels = 2, .type = INTEL_DRAM_DDR4, .page_mask = 0x1F },
|
||||
{ .num_channels = 2, .type = INTEL_DRAM_DDR5, .page_mask = 0x1E },
|
||||
{ .num_channels = 4, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x38 },
|
||||
{ .num_channels = 4, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x38 },
|
||||
{}
|
||||
};
|
||||
|
||||
static const struct buddy_page_mask wa_1409767108_buddy_page_masks[] = {
|
||||
{ .num_channels = 1, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1 },
|
||||
{ .num_channels = 1, .type = INTEL_DRAM_DDR4, .page_mask = 0x1 },
|
||||
{ .num_channels = 1, .type = INTEL_DRAM_DDR5, .page_mask = 0x1 },
|
||||
{ .num_channels = 1, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x1 },
|
||||
{ .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x3 },
|
||||
{ .num_channels = 2, .type = INTEL_DRAM_DDR4, .page_mask = 0x3 },
|
||||
{ .num_channels = 2, .type = INTEL_DRAM_DDR5, .page_mask = 0x3 },
|
||||
{ .num_channels = 2, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x3 },
|
||||
{}
|
||||
};
|
||||
|
||||
@ -5339,9 +5331,10 @@ static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv)
|
||||
unsigned long abox_mask = INTEL_INFO(dev_priv)->abox_mask;
|
||||
int config, i;
|
||||
|
||||
if (IS_DG1_REVID(dev_priv, DG1_REVID_A0, DG1_REVID_A0) ||
|
||||
IS_TGL_DISP_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_B0))
|
||||
/* Wa_1409767108:tgl,dg1 */
|
||||
if (IS_ALDERLAKE_S(dev_priv) ||
|
||||
IS_DG1_REVID(dev_priv, DG1_REVID_A0, DG1_REVID_A0) ||
|
||||
IS_TGL_DISP_STEPPING(dev_priv, STEP_A0, STEP_B0))
|
||||
/* Wa_1409767108:tgl,dg1,adl-s */
|
||||
table = wa_1409767108_buddy_page_masks;
|
||||
else
|
||||
table = tgl_buddy_page_masks;
|
||||
@ -5379,7 +5372,7 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv,
|
||||
|
||||
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
|
||||
|
||||
/* Wa_14011294188:ehl,jsl,tgl,rkl */
|
||||
/* Wa_14011294188:ehl,jsl,tgl,rkl,adl-s */
|
||||
if (INTEL_PCH_TYPE(dev_priv) >= PCH_JSP &&
|
||||
INTEL_PCH_TYPE(dev_priv) < PCH_DG1)
|
||||
intel_de_rmw(dev_priv, SOUTH_DSPCLK_GATE_D, 0,
|
||||
|
@ -41,6 +41,14 @@ enum intel_display_power_domain {
|
||||
POWER_DOMAIN_PORT_DDI_G_LANES,
|
||||
POWER_DOMAIN_PORT_DDI_H_LANES,
|
||||
POWER_DOMAIN_PORT_DDI_I_LANES,
|
||||
|
||||
POWER_DOMAIN_PORT_DDI_LANES_TC1 = POWER_DOMAIN_PORT_DDI_D_LANES, /* tgl+ */
|
||||
POWER_DOMAIN_PORT_DDI_LANES_TC2,
|
||||
POWER_DOMAIN_PORT_DDI_LANES_TC3,
|
||||
POWER_DOMAIN_PORT_DDI_LANES_TC4,
|
||||
POWER_DOMAIN_PORT_DDI_LANES_TC5,
|
||||
POWER_DOMAIN_PORT_DDI_LANES_TC6,
|
||||
|
||||
POWER_DOMAIN_PORT_DDI_A_IO,
|
||||
POWER_DOMAIN_PORT_DDI_B_IO,
|
||||
POWER_DOMAIN_PORT_DDI_C_IO,
|
||||
@ -50,6 +58,14 @@ enum intel_display_power_domain {
|
||||
POWER_DOMAIN_PORT_DDI_G_IO,
|
||||
POWER_DOMAIN_PORT_DDI_H_IO,
|
||||
POWER_DOMAIN_PORT_DDI_I_IO,
|
||||
|
||||
POWER_DOMAIN_PORT_DDI_IO_TC1 = POWER_DOMAIN_PORT_DDI_D_IO, /* tgl+ */
|
||||
POWER_DOMAIN_PORT_DDI_IO_TC2,
|
||||
POWER_DOMAIN_PORT_DDI_IO_TC3,
|
||||
POWER_DOMAIN_PORT_DDI_IO_TC4,
|
||||
POWER_DOMAIN_PORT_DDI_IO_TC5,
|
||||
POWER_DOMAIN_PORT_DDI_IO_TC6,
|
||||
|
||||
POWER_DOMAIN_PORT_DSI,
|
||||
POWER_DOMAIN_PORT_CRT,
|
||||
POWER_DOMAIN_PORT_OTHER,
|
||||
@ -64,6 +80,14 @@ enum intel_display_power_domain {
|
||||
POWER_DOMAIN_AUX_G,
|
||||
POWER_DOMAIN_AUX_H,
|
||||
POWER_DOMAIN_AUX_I,
|
||||
|
||||
POWER_DOMAIN_AUX_USBC1 = POWER_DOMAIN_AUX_D, /* tgl+ */
|
||||
POWER_DOMAIN_AUX_USBC2,
|
||||
POWER_DOMAIN_AUX_USBC3,
|
||||
POWER_DOMAIN_AUX_USBC4,
|
||||
POWER_DOMAIN_AUX_USBC5,
|
||||
POWER_DOMAIN_AUX_USBC6,
|
||||
|
||||
POWER_DOMAIN_AUX_IO_A,
|
||||
POWER_DOMAIN_AUX_C_TBT,
|
||||
POWER_DOMAIN_AUX_D_TBT,
|
||||
@ -72,6 +96,14 @@ enum intel_display_power_domain {
|
||||
POWER_DOMAIN_AUX_G_TBT,
|
||||
POWER_DOMAIN_AUX_H_TBT,
|
||||
POWER_DOMAIN_AUX_I_TBT,
|
||||
|
||||
POWER_DOMAIN_AUX_TBT1 = POWER_DOMAIN_AUX_D_TBT, /* tgl+ */
|
||||
POWER_DOMAIN_AUX_TBT2,
|
||||
POWER_DOMAIN_AUX_TBT3,
|
||||
POWER_DOMAIN_AUX_TBT4,
|
||||
POWER_DOMAIN_AUX_TBT5,
|
||||
POWER_DOMAIN_AUX_TBT6,
|
||||
|
||||
POWER_DOMAIN_GMBUS,
|
||||
POWER_DOMAIN_MODESET,
|
||||
POWER_DOMAIN_GT_IRQ,
|
||||
|
@ -37,6 +37,7 @@
|
||||
#include <drm/drm_dp_mst_helper.h>
|
||||
#include <drm/drm_encoder.h>
|
||||
#include <drm/drm_fb_helper.h>
|
||||
#include <drm/drm_fourcc.h>
|
||||
#include <drm/drm_probe_helper.h>
|
||||
#include <drm/drm_rect.h>
|
||||
#include <drm/drm_vblank.h>
|
||||
@ -219,6 +220,16 @@ struct intel_encoder {
|
||||
* encoders have been disabled and suspended.
|
||||
*/
|
||||
void (*shutdown)(struct intel_encoder *encoder);
|
||||
/*
|
||||
* Enable/disable the clock to the port.
|
||||
*/
|
||||
void (*enable_clock)(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state);
|
||||
void (*disable_clock)(struct intel_encoder *encoder);
|
||||
/*
|
||||
* Returns whether the port clock is enabled or not.
|
||||
*/
|
||||
bool (*is_clock_enabled)(struct intel_encoder *encoder);
|
||||
enum hpd_pin hpd_pin;
|
||||
enum intel_display_power_domain power_domain;
|
||||
/* for communication with audio component; protected by av_mutex */
|
||||
@ -714,9 +725,9 @@ struct intel_pipe_wm {
|
||||
|
||||
struct skl_wm_level {
|
||||
u16 min_ddb_alloc;
|
||||
u16 plane_res_b;
|
||||
u8 plane_res_l;
|
||||
bool plane_en;
|
||||
u16 blocks;
|
||||
u8 lines;
|
||||
bool enable;
|
||||
bool ignore_lines;
|
||||
bool can_sagv;
|
||||
};
|
||||
@ -725,7 +736,10 @@ struct skl_plane_wm {
|
||||
struct skl_wm_level wm[8];
|
||||
struct skl_wm_level uv_wm[8];
|
||||
struct skl_wm_level trans_wm;
|
||||
struct skl_wm_level sagv_wm0;
|
||||
struct {
|
||||
struct skl_wm_level wm0;
|
||||
struct skl_wm_level trans_wm;
|
||||
} sagv;
|
||||
bool is_planar;
|
||||
};
|
||||
|
||||
@ -1159,6 +1173,13 @@ struct intel_crtc_state {
|
||||
u8 pipeline_full;
|
||||
u16 flipline, vmin, vmax;
|
||||
} vrr;
|
||||
|
||||
/* Stream Splitter for eDP MSO */
|
||||
struct {
|
||||
bool enable;
|
||||
u8 link_count;
|
||||
u8 pixel_overlap;
|
||||
} splitter;
|
||||
};
|
||||
|
||||
enum intel_pipe_crc_source {
|
||||
@ -1414,6 +1435,44 @@ struct intel_pps {
|
||||
struct edp_power_seq pps_delays;
|
||||
};
|
||||
|
||||
struct intel_psr {
|
||||
/* Mutex for PSR state of the transcoder */
|
||||
struct mutex lock;
|
||||
|
||||
#define I915_PSR_DEBUG_MODE_MASK 0x0f
|
||||
#define I915_PSR_DEBUG_DEFAULT 0x00
|
||||
#define I915_PSR_DEBUG_DISABLE 0x01
|
||||
#define I915_PSR_DEBUG_ENABLE 0x02
|
||||
#define I915_PSR_DEBUG_FORCE_PSR1 0x03
|
||||
#define I915_PSR_DEBUG_ENABLE_SEL_FETCH 0x4
|
||||
#define I915_PSR_DEBUG_IRQ 0x10
|
||||
|
||||
u32 debug;
|
||||
bool sink_support;
|
||||
bool source_support;
|
||||
bool enabled;
|
||||
enum pipe pipe;
|
||||
enum transcoder transcoder;
|
||||
bool active;
|
||||
struct work_struct work;
|
||||
unsigned int busy_frontbuffer_bits;
|
||||
bool sink_psr2_support;
|
||||
bool link_standby;
|
||||
bool colorimetry_support;
|
||||
bool psr2_enabled;
|
||||
bool psr2_sel_fetch_enabled;
|
||||
u8 sink_sync_latency;
|
||||
ktime_t last_entry_attempt;
|
||||
ktime_t last_exit;
|
||||
bool sink_not_reliable;
|
||||
bool irq_aux_error;
|
||||
u16 su_x_granularity;
|
||||
bool dc3co_enabled;
|
||||
u32 dc3co_exit_delay;
|
||||
struct delayed_work dc3co_work;
|
||||
struct drm_dp_vsc_sdp vsc;
|
||||
};
|
||||
|
||||
struct intel_dp {
|
||||
i915_reg_t output_reg;
|
||||
u32 DP;
|
||||
@ -1448,6 +1507,8 @@ struct intel_dp {
|
||||
int max_link_lane_count;
|
||||
/* Max rate for the current link */
|
||||
int max_link_rate;
|
||||
int mso_link_count;
|
||||
int mso_pixel_overlap;
|
||||
/* sink or branch descriptor */
|
||||
struct drm_dp_desc desc;
|
||||
struct drm_dp_aux aux;
|
||||
@ -1516,6 +1577,8 @@ struct intel_dp {
|
||||
bool hobl_active;
|
||||
|
||||
struct intel_dp_pcon_frl frl;
|
||||
|
||||
struct intel_psr psr;
|
||||
};
|
||||
|
||||
enum lspcon_vendor {
|
||||
@ -1752,6 +1815,17 @@ dp_to_i915(struct intel_dp *intel_dp)
|
||||
return to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
|
||||
}
|
||||
|
||||
#define CAN_PSR(intel_dp) ((intel_dp)->psr.sink_support && \
|
||||
(intel_dp)->psr.source_support)
|
||||
|
||||
static inline bool intel_encoder_can_psr(struct intel_encoder *encoder)
|
||||
{
|
||||
if (!intel_encoder_is_dp(encoder))
|
||||
return false;
|
||||
|
||||
return CAN_PSR(enc_to_intel_dp(encoder));
|
||||
}
|
||||
|
||||
static inline struct intel_digital_port *
|
||||
hdmi_to_dig_port(struct intel_hdmi *intel_hdmi)
|
||||
{
|
||||
@ -1893,4 +1967,39 @@ static inline u32 intel_fdi_link_freq(struct drm_i915_private *dev_priv,
|
||||
return dev_priv->fdi_pll_freq;
|
||||
}
|
||||
|
||||
static inline bool is_ccs_modifier(u64 modifier)
|
||||
{
|
||||
return modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS ||
|
||||
modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC ||
|
||||
modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS ||
|
||||
modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
|
||||
modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
|
||||
}
|
||||
|
||||
static inline bool is_ccs_plane(const struct drm_framebuffer *fb, int plane)
|
||||
{
|
||||
if (!is_ccs_modifier(fb->modifier))
|
||||
return false;
|
||||
|
||||
return plane >= fb->format->num_planes / 2;
|
||||
}
|
||||
|
||||
static inline bool is_gen12_ccs_modifier(u64 modifier)
|
||||
{
|
||||
return modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS ||
|
||||
modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC ||
|
||||
modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS;
|
||||
}
|
||||
|
||||
static inline bool is_gen12_ccs_plane(const struct drm_framebuffer *fb, int plane)
|
||||
{
|
||||
return is_gen12_ccs_modifier(fb->modifier) && is_ccs_plane(fb, plane);
|
||||
}
|
||||
|
||||
static inline bool is_gen12_ccs_cc_plane(const struct drm_framebuffer *fb, int plane)
|
||||
{
|
||||
return fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC &&
|
||||
plane == 2;
|
||||
}
|
||||
|
||||
#endif /* __INTEL_DISPLAY_TYPES_H__ */
|
||||
|
@ -50,6 +50,7 @@
|
||||
#include "intel_dp_aux.h"
|
||||
#include "intel_dp_link_training.h"
|
||||
#include "intel_dp_mst.h"
|
||||
#include "intel_dpll.h"
|
||||
#include "intel_dpio_phy.h"
|
||||
#include "intel_fifo_underrun.h"
|
||||
#include "intel_hdcp.h"
|
||||
@ -788,10 +789,10 @@ intel_dp_mode_valid(struct drm_connector *connector,
|
||||
return MODE_H_ILLEGAL;
|
||||
|
||||
if (intel_dp_is_edp(intel_dp) && fixed_mode) {
|
||||
if (mode->hdisplay > fixed_mode->hdisplay)
|
||||
if (mode->hdisplay != fixed_mode->hdisplay)
|
||||
return MODE_PANEL;
|
||||
|
||||
if (mode->vdisplay > fixed_mode->vdisplay)
|
||||
if (mode->vdisplay != fixed_mode->vdisplay)
|
||||
return MODE_PANEL;
|
||||
|
||||
target_clock = fixed_mode->clock;
|
||||
@ -1663,12 +1664,10 @@ void intel_dp_compute_psr_vsc_sdp(struct intel_dp *intel_dp,
|
||||
const struct drm_connector_state *conn_state,
|
||||
struct drm_dp_vsc_sdp *vsc)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
|
||||
|
||||
vsc->sdp_type = DP_SDP_VSC;
|
||||
|
||||
if (dev_priv->psr.psr2_enabled) {
|
||||
if (dev_priv->psr.colorimetry_support &&
|
||||
if (intel_dp->psr.psr2_enabled) {
|
||||
if (intel_dp->psr.colorimetry_support &&
|
||||
intel_dp_needs_vsc_sdp(crtc_state, conn_state)) {
|
||||
/* [PSR2, +Colorimetry] */
|
||||
intel_dp_compute_vsc_colorimetry(crtc_state, conn_state,
|
||||
@ -1724,6 +1723,7 @@ intel_dp_drrs_compute_config(struct intel_dp *intel_dp,
|
||||
{
|
||||
struct intel_connector *intel_connector = intel_dp->attached_connector;
|
||||
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
|
||||
int pixel_clock;
|
||||
|
||||
if (pipe_config->vrr.enable)
|
||||
return;
|
||||
@ -1742,10 +1742,18 @@ intel_dp_drrs_compute_config(struct intel_dp *intel_dp,
|
||||
return;
|
||||
|
||||
pipe_config->has_drrs = true;
|
||||
intel_link_compute_m_n(output_bpp, pipe_config->lane_count,
|
||||
intel_connector->panel.downclock_mode->clock,
|
||||
|
||||
pixel_clock = intel_connector->panel.downclock_mode->clock;
|
||||
if (pipe_config->splitter.enable)
|
||||
pixel_clock /= pipe_config->splitter.link_count;
|
||||
|
||||
intel_link_compute_m_n(output_bpp, pipe_config->lane_count, pixel_clock,
|
||||
pipe_config->port_clock, &pipe_config->dp_m2_n2,
|
||||
constant_n, pipe_config->fec_enable);
|
||||
|
||||
/* FIXME: abstract this better */
|
||||
if (pipe_config->splitter.enable)
|
||||
pipe_config->dp_m2_n2.gmch_m *= pipe_config->splitter.link_count;
|
||||
}
|
||||
|
||||
int
|
||||
@ -1820,6 +1828,26 @@ intel_dp_compute_config(struct intel_encoder *encoder,
|
||||
output_bpp = intel_dp_output_bpp(pipe_config->output_format,
|
||||
pipe_config->pipe_bpp);
|
||||
|
||||
if (intel_dp->mso_link_count) {
|
||||
int n = intel_dp->mso_link_count;
|
||||
int overlap = intel_dp->mso_pixel_overlap;
|
||||
|
||||
pipe_config->splitter.enable = true;
|
||||
pipe_config->splitter.link_count = n;
|
||||
pipe_config->splitter.pixel_overlap = overlap;
|
||||
|
||||
drm_dbg_kms(&dev_priv->drm, "MSO link count %d, pixel overlap %d\n",
|
||||
n, overlap);
|
||||
|
||||
adjusted_mode->crtc_hdisplay = adjusted_mode->crtc_hdisplay / n + overlap;
|
||||
adjusted_mode->crtc_hblank_start = adjusted_mode->crtc_hblank_start / n + overlap;
|
||||
adjusted_mode->crtc_hblank_end = adjusted_mode->crtc_hblank_end / n + overlap;
|
||||
adjusted_mode->crtc_hsync_start = adjusted_mode->crtc_hsync_start / n + overlap;
|
||||
adjusted_mode->crtc_hsync_end = adjusted_mode->crtc_hsync_end / n + overlap;
|
||||
adjusted_mode->crtc_htotal = adjusted_mode->crtc_htotal / n + overlap;
|
||||
adjusted_mode->crtc_clock /= n;
|
||||
}
|
||||
|
||||
intel_link_compute_m_n(output_bpp,
|
||||
pipe_config->lane_count,
|
||||
adjusted_mode->crtc_clock,
|
||||
@ -1827,6 +1855,10 @@ intel_dp_compute_config(struct intel_encoder *encoder,
|
||||
&pipe_config->dp_m_n,
|
||||
constant_n, pipe_config->fec_enable);
|
||||
|
||||
/* FIXME: abstract this better */
|
||||
if (pipe_config->splitter.enable)
|
||||
pipe_config->dp_m_n.gmch_m *= pipe_config->splitter.link_count;
|
||||
|
||||
if (!HAS_DDI(dev_priv))
|
||||
intel_dp_set_clock(encoder, pipe_config);
|
||||
|
||||
@ -2359,7 +2391,7 @@ bool intel_dp_initial_fastset_check(struct intel_encoder *encoder,
|
||||
return false;
|
||||
}
|
||||
|
||||
if (CAN_PSR(i915) && intel_dp_is_edp(intel_dp)) {
|
||||
if (CAN_PSR(intel_dp)) {
|
||||
drm_dbg_kms(&i915->drm, "Forcing full modeset to compute PSR state\n");
|
||||
crtc_state->uapi.mode_changed = true;
|
||||
return false;
|
||||
@ -2650,7 +2682,7 @@ void intel_dp_check_frl_training(struct intel_dp *intel_dp)
|
||||
if (intel_dp_pcon_start_frl_training(intel_dp) < 0) {
|
||||
int ret, mode;
|
||||
|
||||
drm_dbg(&dev_priv->drm, "Couldnt set FRL mode, continuing with TMDS mode\n");
|
||||
drm_dbg(&dev_priv->drm, "Couldn't set FRL mode, continuing with TMDS mode\n");
|
||||
ret = drm_dp_pcon_reset_frl_config(&intel_dp->aux);
|
||||
mode = drm_dp_pcon_hdmi_link_mode(&intel_dp->aux, NULL);
|
||||
|
||||
@ -3517,6 +3549,64 @@ static void intel_dp_get_dsc_sink_cap(struct intel_dp *intel_dp)
|
||||
}
|
||||
}
|
||||
|
||||
static void intel_edp_mso_mode_fixup(struct intel_connector *connector,
|
||||
struct drm_display_mode *mode)
|
||||
{
|
||||
struct intel_dp *intel_dp = intel_attached_dp(connector);
|
||||
struct drm_i915_private *i915 = to_i915(connector->base.dev);
|
||||
int n = intel_dp->mso_link_count;
|
||||
int overlap = intel_dp->mso_pixel_overlap;
|
||||
|
||||
if (!mode || !n)
|
||||
return;
|
||||
|
||||
mode->hdisplay = (mode->hdisplay - overlap) * n;
|
||||
mode->hsync_start = (mode->hsync_start - overlap) * n;
|
||||
mode->hsync_end = (mode->hsync_end - overlap) * n;
|
||||
mode->htotal = (mode->htotal - overlap) * n;
|
||||
mode->clock *= n;
|
||||
|
||||
drm_mode_set_name(mode);
|
||||
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"[CONNECTOR:%d:%s] using generated MSO mode: ",
|
||||
connector->base.base.id, connector->base.name);
|
||||
drm_mode_debug_printmodeline(mode);
|
||||
}
|
||||
|
||||
static void intel_edp_mso_init(struct intel_dp *intel_dp)
|
||||
{
|
||||
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
|
||||
u8 mso;
|
||||
|
||||
if (intel_dp->edp_dpcd[0] < DP_EDP_14)
|
||||
return;
|
||||
|
||||
if (drm_dp_dpcd_readb(&intel_dp->aux, DP_EDP_MSO_LINK_CAPABILITIES, &mso) != 1) {
|
||||
drm_err(&i915->drm, "Failed to read MSO cap\n");
|
||||
return;
|
||||
}
|
||||
|
||||
/* Valid configurations are SST or MSO 2x1, 2x2, 4x1 */
|
||||
mso &= DP_EDP_MSO_NUMBER_OF_LINKS_MASK;
|
||||
if (mso % 2 || mso > drm_dp_max_lane_count(intel_dp->dpcd)) {
|
||||
drm_err(&i915->drm, "Invalid MSO link count cap %u\n", mso);
|
||||
mso = 0;
|
||||
}
|
||||
|
||||
if (mso) {
|
||||
drm_dbg_kms(&i915->drm, "Sink MSO %ux%u configuration\n",
|
||||
mso, drm_dp_max_lane_count(intel_dp->dpcd) / mso);
|
||||
if (!HAS_MSO(i915)) {
|
||||
drm_err(&i915->drm, "No source MSO support, disabling\n");
|
||||
mso = 0;
|
||||
}
|
||||
}
|
||||
|
||||
intel_dp->mso_link_count = mso;
|
||||
intel_dp->mso_pixel_overlap = 0; /* FIXME: read from DisplayID v2.0 */
|
||||
}
|
||||
|
||||
static bool
|
||||
intel_edp_init_dpcd(struct intel_dp *intel_dp)
|
||||
{
|
||||
@ -3600,6 +3690,8 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
|
||||
*/
|
||||
intel_edp_init_source_oui(intel_dp, true);
|
||||
|
||||
intel_edp_mso_init(intel_dp);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -5548,19 +5640,18 @@ static int intel_dp_get_modes(struct drm_connector *connector)
|
||||
{
|
||||
struct intel_connector *intel_connector = to_intel_connector(connector);
|
||||
struct edid *edid;
|
||||
int num_modes = 0;
|
||||
|
||||
edid = intel_connector->detect_edid;
|
||||
if (edid) {
|
||||
int ret = intel_connector_update_modes(connector, edid);
|
||||
num_modes = intel_connector_update_modes(connector, edid);
|
||||
|
||||
if (intel_vrr_is_capable(connector))
|
||||
drm_connector_set_vrr_capable_property(connector,
|
||||
true);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* if eDP has no EDID, fall back to fixed mode */
|
||||
/* Also add fixed mode, which may or may not be present in EDID */
|
||||
if (intel_dp_is_edp(intel_attached_dp(intel_connector)) &&
|
||||
intel_connector->panel.fixed_mode) {
|
||||
struct drm_display_mode *mode;
|
||||
@ -5569,10 +5660,13 @@ static int intel_dp_get_modes(struct drm_connector *connector)
|
||||
intel_connector->panel.fixed_mode);
|
||||
if (mode) {
|
||||
drm_mode_probed_add(connector, mode);
|
||||
return 1;
|
||||
num_modes++;
|
||||
}
|
||||
}
|
||||
|
||||
if (num_modes)
|
||||
return num_modes;
|
||||
|
||||
if (!edid) {
|
||||
struct intel_dp *intel_dp = intel_attached_dp(intel_connector);
|
||||
struct drm_display_mode *mode;
|
||||
@ -5582,11 +5676,11 @@ static int intel_dp_get_modes(struct drm_connector *connector)
|
||||
intel_dp->downstream_ports);
|
||||
if (mode) {
|
||||
drm_mode_probed_add(connector, mode);
|
||||
return 1;
|
||||
num_modes++;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
return num_modes;
|
||||
}
|
||||
|
||||
static int
|
||||
@ -6459,6 +6553,10 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
|
||||
if (fixed_mode)
|
||||
downclock_mode = intel_dp_drrs_init(intel_connector, fixed_mode);
|
||||
|
||||
/* multiply the mode clock and horizontal timings for MSO */
|
||||
intel_edp_mso_mode_fixup(intel_connector, fixed_mode);
|
||||
intel_edp_mso_mode_fixup(intel_connector, downclock_mode);
|
||||
|
||||
/* fallback to VBT if available for eDP */
|
||||
if (!fixed_mode)
|
||||
fixed_mode = intel_panel_vbt_fixed_mode(intel_connector);
|
||||
@ -6641,6 +6739,8 @@ intel_dp_init_connector(struct intel_digital_port *dig_port,
|
||||
intel_dp->frl.is_trained = false;
|
||||
intel_dp->frl.trained_rate_gbps = 0;
|
||||
|
||||
intel_psr_init(intel_dp);
|
||||
|
||||
return true;
|
||||
|
||||
fail:
|
||||
|
@ -698,30 +698,6 @@ intel_dp_mst_hdcp_stream_encryption(struct intel_connector *connector,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool intel_dp_mst_get_qses_status(struct intel_digital_port *dig_port,
|
||||
struct intel_connector *connector)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
|
||||
struct drm_dp_query_stream_enc_status_ack_reply reply;
|
||||
struct intel_dp *intel_dp = &dig_port->dp;
|
||||
int ret;
|
||||
|
||||
ret = drm_dp_send_query_stream_enc_status(&intel_dp->mst_mgr,
|
||||
connector->port, &reply);
|
||||
if (ret) {
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"[%s:%d] failed QSES ret=%d\n",
|
||||
connector->base.name, connector->base.base.id, ret);
|
||||
return false;
|
||||
}
|
||||
|
||||
drm_dbg_kms(&i915->drm, "[%s:%d] QSES stream auth: %d stream enc: %d\n",
|
||||
connector->base.name, connector->base.base.id,
|
||||
reply.auth_completed, reply.encryption_enabled);
|
||||
|
||||
return reply.auth_completed && reply.encryption_enabled;
|
||||
}
|
||||
|
||||
static int
|
||||
intel_dp_mst_hdcp2_stream_encryption(struct intel_connector *connector,
|
||||
bool enable)
|
||||
@ -757,11 +733,6 @@ intel_dp_mst_hdcp2_stream_encryption(struct intel_connector *connector,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* DP v2.0 I.3.3 ignore the stream signature L' in QSES reply msg reply.
|
||||
* I.3.5 MST source device may use a QSES msg to query downstream status
|
||||
* for a particular stream.
|
||||
*/
|
||||
static
|
||||
int intel_dp_mst_hdcp2_check_link(struct intel_digital_port *dig_port,
|
||||
struct intel_connector *connector)
|
||||
@ -781,7 +752,7 @@ int intel_dp_mst_hdcp2_check_link(struct intel_digital_port *dig_port,
|
||||
return ret;
|
||||
}
|
||||
|
||||
return intel_dp_mst_get_qses_status(dig_port, connector) ? 0 : -EINVAL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct intel_hdcp_shim intel_dp_mst_hdcp_shim = {
|
||||
|
@ -26,12 +26,13 @@
|
||||
#include "intel_dp_link_training.h"
|
||||
|
||||
static void
|
||||
intel_dp_dump_link_status(const u8 link_status[DP_LINK_STATUS_SIZE])
|
||||
intel_dp_dump_link_status(struct drm_device *drm,
|
||||
const u8 link_status[DP_LINK_STATUS_SIZE])
|
||||
{
|
||||
|
||||
DRM_DEBUG_KMS("ln0_1:0x%x ln2_3:0x%x align:0x%x sink:0x%x adj_req0_1:0x%x adj_req2_3:0x%x",
|
||||
link_status[0], link_status[1], link_status[2],
|
||||
link_status[3], link_status[4], link_status[5]);
|
||||
drm_dbg_kms(drm,
|
||||
"ln0_1:0x%x ln2_3:0x%x align:0x%x sink:0x%x adj_req0_1:0x%x adj_req2_3:0x%x\n",
|
||||
link_status[0], link_status[1], link_status[2],
|
||||
link_status[3], link_status[4], link_status[5]);
|
||||
}
|
||||
|
||||
static void intel_dp_reset_lttpr_count(struct intel_dp *intel_dp)
|
||||
@ -642,7 +643,7 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp,
|
||||
/* Make sure clock is still ok */
|
||||
if (!drm_dp_clock_recovery_ok(link_status,
|
||||
crtc_state->lane_count)) {
|
||||
intel_dp_dump_link_status(link_status);
|
||||
intel_dp_dump_link_status(&i915->drm, link_status);
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"Clock recovery check failed, cannot "
|
||||
"continue channel equalization\n");
|
||||
@ -669,7 +670,7 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp,
|
||||
|
||||
/* Try 5 times, else fail and try at lower BW */
|
||||
if (tries == 5) {
|
||||
intel_dp_dump_link_status(link_status);
|
||||
intel_dp_dump_link_status(&i915->drm, link_status);
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"Channel equalization failed 5 times\n");
|
||||
}
|
||||
@ -731,7 +732,7 @@ intel_dp_link_train_phy(struct intel_dp *intel_dp,
|
||||
|
||||
out:
|
||||
drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
|
||||
"[CONNECTOR:%d:%s] Link Training %s at link rate = %d, lane count = %d, at %s",
|
||||
"[CONNECTOR:%d:%s] Link Training %s at link rate = %d, lane count = %d, at %s\n",
|
||||
intel_connector->base.base.id,
|
||||
intel_connector->base.name,
|
||||
ret ? "passed" : "failed",
|
||||
|
@ -39,6 +39,7 @@
|
||||
#include "intel_dp_mst.h"
|
||||
#include "intel_dpio_phy.h"
|
||||
#include "intel_hdcp.h"
|
||||
#include "skl_scaler.h"
|
||||
|
||||
static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
|
||||
struct intel_crtc_state *crtc_state,
|
||||
@ -590,7 +591,7 @@ static void intel_dp_mst_enc_get_config(struct intel_encoder *encoder,
|
||||
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
|
||||
struct intel_digital_port *dig_port = intel_mst->primary;
|
||||
|
||||
intel_ddi_get_config(&dig_port->base, pipe_config);
|
||||
dig_port->base.get_config(&dig_port->base, pipe_config);
|
||||
}
|
||||
|
||||
static bool intel_dp_mst_initial_fastset_check(struct intel_encoder *encoder,
|
||||
|
@ -3,11 +3,13 @@
|
||||
* Copyright © 2020 Intel Corporation
|
||||
*/
|
||||
#include <linux/kernel.h>
|
||||
#include "intel_crtc.h"
|
||||
#include "intel_display_types.h"
|
||||
#include "intel_display.h"
|
||||
#include "intel_dpll.h"
|
||||
#include "intel_lvds.h"
|
||||
#include "intel_panel.h"
|
||||
#include "intel_sideband.h"
|
||||
|
||||
struct intel_limit {
|
||||
struct {
|
||||
@ -1361,3 +1363,510 @@ intel_dpll_init_clock_hook(struct drm_i915_private *dev_priv)
|
||||
else
|
||||
dev_priv->display.crtc_compute_clock = i8xx_crtc_compute_clock;
|
||||
}
|
||||
|
||||
static bool i9xx_has_pps(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
if (IS_I830(dev_priv))
|
||||
return false;
|
||||
|
||||
return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
|
||||
}
|
||||
|
||||
void i9xx_enable_pll(struct intel_crtc *crtc,
|
||||
const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
i915_reg_t reg = DPLL(crtc->pipe);
|
||||
u32 dpll = crtc_state->dpll_hw_state.dpll;
|
||||
int i;
|
||||
|
||||
assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder);
|
||||
|
||||
/* PLL is protected by panel, make sure we can write it */
|
||||
if (i9xx_has_pps(dev_priv))
|
||||
assert_panel_unlocked(dev_priv, crtc->pipe);
|
||||
|
||||
/*
|
||||
* Apparently we need to have VGA mode enabled prior to changing
|
||||
* the P1/P2 dividers. Otherwise the DPLL will keep using the old
|
||||
* dividers, even though the register value does change.
|
||||
*/
|
||||
intel_de_write(dev_priv, reg, dpll & ~DPLL_VGA_MODE_DIS);
|
||||
intel_de_write(dev_priv, reg, dpll);
|
||||
|
||||
/* Wait for the clocks to stabilize. */
|
||||
intel_de_posting_read(dev_priv, reg);
|
||||
udelay(150);
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 4) {
|
||||
intel_de_write(dev_priv, DPLL_MD(crtc->pipe),
|
||||
crtc_state->dpll_hw_state.dpll_md);
|
||||
} else {
|
||||
/* The pixel multiplier can only be updated once the
|
||||
* DPLL is enabled and the clocks are stable.
|
||||
*
|
||||
* So write it again.
|
||||
*/
|
||||
intel_de_write(dev_priv, reg, dpll);
|
||||
}
|
||||
|
||||
/* We do this three times for luck */
|
||||
for (i = 0; i < 3; i++) {
|
||||
intel_de_write(dev_priv, reg, dpll);
|
||||
intel_de_posting_read(dev_priv, reg);
|
||||
udelay(150); /* wait for warmup */
|
||||
}
|
||||
}
|
||||
|
||||
static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv,
|
||||
enum pipe pipe)
|
||||
{
|
||||
u32 reg_val;
|
||||
|
||||
/*
|
||||
* PLLB opamp always calibrates to max value of 0x3f, force enable it
|
||||
* and set it to a reasonable value instead.
|
||||
*/
|
||||
reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
|
||||
reg_val &= 0xffffff00;
|
||||
reg_val |= 0x00000030;
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
|
||||
|
||||
reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
|
||||
reg_val &= 0x00ffffff;
|
||||
reg_val |= 0x8c000000;
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
|
||||
|
||||
reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
|
||||
reg_val &= 0xffffff00;
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
|
||||
|
||||
reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
|
||||
reg_val &= 0x00ffffff;
|
||||
reg_val |= 0xb0000000;
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
|
||||
}
|
||||
|
||||
static void _vlv_enable_pll(struct intel_crtc *crtc,
|
||||
const struct intel_crtc_state *pipe_config)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
enum pipe pipe = crtc->pipe;
|
||||
|
||||
intel_de_write(dev_priv, DPLL(pipe), pipe_config->dpll_hw_state.dpll);
|
||||
intel_de_posting_read(dev_priv, DPLL(pipe));
|
||||
udelay(150);
|
||||
|
||||
if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1))
|
||||
drm_err(&dev_priv->drm, "DPLL %d failed to lock\n", pipe);
|
||||
}
|
||||
|
||||
void vlv_enable_pll(struct intel_crtc *crtc,
|
||||
const struct intel_crtc_state *pipe_config)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
enum pipe pipe = crtc->pipe;
|
||||
|
||||
assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder);
|
||||
|
||||
/* PLL is protected by panel, make sure we can write it */
|
||||
assert_panel_unlocked(dev_priv, pipe);
|
||||
|
||||
if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
|
||||
_vlv_enable_pll(crtc, pipe_config);
|
||||
|
||||
intel_de_write(dev_priv, DPLL_MD(pipe),
|
||||
pipe_config->dpll_hw_state.dpll_md);
|
||||
intel_de_posting_read(dev_priv, DPLL_MD(pipe));
|
||||
}
|
||||
|
||||
|
||||
static void _chv_enable_pll(struct intel_crtc *crtc,
|
||||
const struct intel_crtc_state *pipe_config)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
enum pipe pipe = crtc->pipe;
|
||||
enum dpio_channel port = vlv_pipe_to_channel(pipe);
|
||||
u32 tmp;
|
||||
|
||||
vlv_dpio_get(dev_priv);
|
||||
|
||||
/* Enable back the 10bit clock to display controller */
|
||||
tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
|
||||
tmp |= DPIO_DCLKP_EN;
|
||||
vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
|
||||
|
||||
vlv_dpio_put(dev_priv);
|
||||
|
||||
/*
|
||||
* Need to wait > 100ns between dclkp clock enable bit and PLL enable.
|
||||
*/
|
||||
udelay(1);
|
||||
|
||||
/* Enable PLL */
|
||||
intel_de_write(dev_priv, DPLL(pipe), pipe_config->dpll_hw_state.dpll);
|
||||
|
||||
/* Check PLL is locked */
|
||||
if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1))
|
||||
drm_err(&dev_priv->drm, "PLL %d failed to lock\n", pipe);
|
||||
}
|
||||
|
||||
void chv_enable_pll(struct intel_crtc *crtc,
|
||||
const struct intel_crtc_state *pipe_config)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
enum pipe pipe = crtc->pipe;
|
||||
|
||||
assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder);
|
||||
|
||||
/* PLL is protected by panel, make sure we can write it */
|
||||
assert_panel_unlocked(dev_priv, pipe);
|
||||
|
||||
if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
|
||||
_chv_enable_pll(crtc, pipe_config);
|
||||
|
||||
if (pipe != PIPE_A) {
|
||||
/*
|
||||
* WaPixelRepeatModeFixForC0:chv
|
||||
*
|
||||
* DPLLCMD is AWOL. Use chicken bits to propagate
|
||||
* the value from DPLLBMD to either pipe B or C.
|
||||
*/
|
||||
intel_de_write(dev_priv, CBR4_VLV, CBR_DPLLBMD_PIPE(pipe));
|
||||
intel_de_write(dev_priv, DPLL_MD(PIPE_B),
|
||||
pipe_config->dpll_hw_state.dpll_md);
|
||||
intel_de_write(dev_priv, CBR4_VLV, 0);
|
||||
dev_priv->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md;
|
||||
|
||||
/*
|
||||
* DPLLB VGA mode also seems to cause problems.
|
||||
* We should always have it disabled.
|
||||
*/
|
||||
drm_WARN_ON(&dev_priv->drm,
|
||||
(intel_de_read(dev_priv, DPLL(PIPE_B)) &
|
||||
DPLL_VGA_MODE_DIS) == 0);
|
||||
} else {
|
||||
intel_de_write(dev_priv, DPLL_MD(pipe),
|
||||
pipe_config->dpll_hw_state.dpll_md);
|
||||
intel_de_posting_read(dev_priv, DPLL_MD(pipe));
|
||||
}
|
||||
}
|
||||
|
||||
void vlv_prepare_pll(struct intel_crtc *crtc,
|
||||
const struct intel_crtc_state *pipe_config)
|
||||
{
|
||||
struct drm_device *dev = crtc->base.dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
enum pipe pipe = crtc->pipe;
|
||||
u32 mdiv;
|
||||
u32 bestn, bestm1, bestm2, bestp1, bestp2;
|
||||
u32 coreclk, reg_val;
|
||||
|
||||
/* Enable Refclk */
|
||||
intel_de_write(dev_priv, DPLL(pipe),
|
||||
pipe_config->dpll_hw_state.dpll & ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV));
|
||||
|
||||
/* No need to actually set up the DPLL with DSI */
|
||||
if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
|
||||
return;
|
||||
|
||||
vlv_dpio_get(dev_priv);
|
||||
|
||||
bestn = pipe_config->dpll.n;
|
||||
bestm1 = pipe_config->dpll.m1;
|
||||
bestm2 = pipe_config->dpll.m2;
|
||||
bestp1 = pipe_config->dpll.p1;
|
||||
bestp2 = pipe_config->dpll.p2;
|
||||
|
||||
/* See eDP HDMI DPIO driver vbios notes doc */
|
||||
|
||||
/* PLL B needs special handling */
|
||||
if (pipe == PIPE_B)
|
||||
vlv_pllb_recal_opamp(dev_priv, pipe);
|
||||
|
||||
/* Set up Tx target for periodic Rcomp update */
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
|
||||
|
||||
/* Disable target IRef on PLL */
|
||||
reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
|
||||
reg_val &= 0x00ffffff;
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
|
||||
|
||||
/* Disable fast lock */
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
|
||||
|
||||
/* Set idtafcrecal before PLL is enabled */
|
||||
mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
|
||||
mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
|
||||
mdiv |= ((bestn << DPIO_N_SHIFT));
|
||||
mdiv |= (1 << DPIO_K_SHIFT);
|
||||
|
||||
/*
|
||||
* Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
|
||||
* but we don't support that).
|
||||
* Note: don't use the DAC post divider as it seems unstable.
|
||||
*/
|
||||
mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
|
||||
|
||||
mdiv |= DPIO_ENABLE_CALIBRATION;
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
|
||||
|
||||
/* Set HBR and RBR LPF coefficients */
|
||||
if (pipe_config->port_clock == 162000 ||
|
||||
intel_crtc_has_type(pipe_config, INTEL_OUTPUT_ANALOG) ||
|
||||
intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI))
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
|
||||
0x009f0003);
|
||||
else
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
|
||||
0x00d0000f);
|
||||
|
||||
if (intel_crtc_has_dp_encoder(pipe_config)) {
|
||||
/* Use SSC source */
|
||||
if (pipe == PIPE_A)
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
|
||||
0x0df40000);
|
||||
else
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
|
||||
0x0df70000);
|
||||
} else { /* HDMI or VGA */
|
||||
/* Use bend source */
|
||||
if (pipe == PIPE_A)
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
|
||||
0x0df70000);
|
||||
else
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
|
||||
0x0df40000);
|
||||
}
|
||||
|
||||
coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
|
||||
coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
|
||||
if (intel_crtc_has_dp_encoder(pipe_config))
|
||||
coreclk |= 0x01000000;
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
|
||||
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
|
||||
|
||||
vlv_dpio_put(dev_priv);
|
||||
}
|
||||
|
||||
void chv_prepare_pll(struct intel_crtc *crtc,
|
||||
const struct intel_crtc_state *pipe_config)
|
||||
{
|
||||
struct drm_device *dev = crtc->base.dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
enum pipe pipe = crtc->pipe;
|
||||
enum dpio_channel port = vlv_pipe_to_channel(pipe);
|
||||
u32 loopfilter, tribuf_calcntr;
|
||||
u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
|
||||
u32 dpio_val;
|
||||
int vco;
|
||||
|
||||
/* Enable Refclk and SSC */
|
||||
intel_de_write(dev_priv, DPLL(pipe),
|
||||
pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
|
||||
|
||||
/* No need to actually set up the DPLL with DSI */
|
||||
if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
|
||||
return;
|
||||
|
||||
bestn = pipe_config->dpll.n;
|
||||
bestm2_frac = pipe_config->dpll.m2 & 0x3fffff;
|
||||
bestm1 = pipe_config->dpll.m1;
|
||||
bestm2 = pipe_config->dpll.m2 >> 22;
|
||||
bestp1 = pipe_config->dpll.p1;
|
||||
bestp2 = pipe_config->dpll.p2;
|
||||
vco = pipe_config->dpll.vco;
|
||||
dpio_val = 0;
|
||||
loopfilter = 0;
|
||||
|
||||
vlv_dpio_get(dev_priv);
|
||||
|
||||
/* p1 and p2 divider */
|
||||
vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
|
||||
5 << DPIO_CHV_S1_DIV_SHIFT |
|
||||
bestp1 << DPIO_CHV_P1_DIV_SHIFT |
|
||||
bestp2 << DPIO_CHV_P2_DIV_SHIFT |
|
||||
1 << DPIO_CHV_K_DIV_SHIFT);
|
||||
|
||||
/* Feedback post-divider - m2 */
|
||||
vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2);
|
||||
|
||||
/* Feedback refclk divider - n and m1 */
|
||||
vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port),
|
||||
DPIO_CHV_M1_DIV_BY_2 |
|
||||
1 << DPIO_CHV_N_DIV_SHIFT);
|
||||
|
||||
/* M2 fraction division */
|
||||
vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
|
||||
|
||||
/* M2 fraction division enable */
|
||||
dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
|
||||
dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
|
||||
dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT);
|
||||
if (bestm2_frac)
|
||||
dpio_val |= DPIO_CHV_FRAC_DIV_EN;
|
||||
vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val);
|
||||
|
||||
/* Program digital lock detect threshold */
|
||||
dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port));
|
||||
dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
|
||||
DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
|
||||
dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT);
|
||||
if (!bestm2_frac)
|
||||
dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
|
||||
vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val);
|
||||
|
||||
/* Loop filter */
|
||||
if (vco == 5400000) {
|
||||
loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT);
|
||||
loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT);
|
||||
loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT);
|
||||
tribuf_calcntr = 0x9;
|
||||
} else if (vco <= 6200000) {
|
||||
loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT);
|
||||
loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT);
|
||||
loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
|
||||
tribuf_calcntr = 0x9;
|
||||
} else if (vco <= 6480000) {
|
||||
loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
|
||||
loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
|
||||
loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
|
||||
tribuf_calcntr = 0x8;
|
||||
} else {
|
||||
/* Not supported. Apply the same limits as in the max case */
|
||||
loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
|
||||
loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
|
||||
loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
|
||||
tribuf_calcntr = 0;
|
||||
}
|
||||
vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
|
||||
|
||||
dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port));
|
||||
dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
|
||||
dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT);
|
||||
vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val);
|
||||
|
||||
/* AFC Recal */
|
||||
vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
|
||||
vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
|
||||
DPIO_AFC_RECAL);
|
||||
|
||||
vlv_dpio_put(dev_priv);
|
||||
}
|
||||
|
||||
/**
|
||||
* vlv_force_pll_on - forcibly enable just the PLL
|
||||
* @dev_priv: i915 private structure
|
||||
* @pipe: pipe PLL to enable
|
||||
* @dpll: PLL configuration
|
||||
*
|
||||
* Enable the PLL for @pipe using the supplied @dpll config. To be used
|
||||
* in cases where we need the PLL enabled even when @pipe is not going to
|
||||
* be enabled.
|
||||
*/
|
||||
int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
|
||||
const struct dpll *dpll)
|
||||
{
|
||||
struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
|
||||
struct intel_crtc_state *pipe_config;
|
||||
|
||||
pipe_config = intel_crtc_state_alloc(crtc);
|
||||
if (!pipe_config)
|
||||
return -ENOMEM;
|
||||
|
||||
pipe_config->cpu_transcoder = (enum transcoder)pipe;
|
||||
pipe_config->pixel_multiplier = 1;
|
||||
pipe_config->dpll = *dpll;
|
||||
|
||||
if (IS_CHERRYVIEW(dev_priv)) {
|
||||
chv_compute_dpll(crtc, pipe_config);
|
||||
chv_prepare_pll(crtc, pipe_config);
|
||||
chv_enable_pll(crtc, pipe_config);
|
||||
} else {
|
||||
vlv_compute_dpll(crtc, pipe_config);
|
||||
vlv_prepare_pll(crtc, pipe_config);
|
||||
vlv_enable_pll(crtc, pipe_config);
|
||||
}
|
||||
|
||||
kfree(pipe_config);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
/* Make sure the pipe isn't still relying on us */
|
||||
assert_pipe_disabled(dev_priv, (enum transcoder)pipe);
|
||||
|
||||
val = DPLL_INTEGRATED_REF_CLK_VLV |
|
||||
DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
|
||||
if (pipe != PIPE_A)
|
||||
val |= DPLL_INTEGRATED_CRI_CLK_VLV;
|
||||
|
||||
intel_de_write(dev_priv, DPLL(pipe), val);
|
||||
intel_de_posting_read(dev_priv, DPLL(pipe));
|
||||
}
|
||||
|
||||
void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
|
||||
{
|
||||
enum dpio_channel port = vlv_pipe_to_channel(pipe);
|
||||
u32 val;
|
||||
|
||||
/* Make sure the pipe isn't still relying on us */
|
||||
assert_pipe_disabled(dev_priv, (enum transcoder)pipe);
|
||||
|
||||
val = DPLL_SSC_REF_CLK_CHV |
|
||||
DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
|
||||
if (pipe != PIPE_A)
|
||||
val |= DPLL_INTEGRATED_CRI_CLK_VLV;
|
||||
|
||||
intel_de_write(dev_priv, DPLL(pipe), val);
|
||||
intel_de_posting_read(dev_priv, DPLL(pipe));
|
||||
|
||||
vlv_dpio_get(dev_priv);
|
||||
|
||||
/* Disable 10bit clock to display controller */
|
||||
val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
|
||||
val &= ~DPIO_DCLKP_EN;
|
||||
vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
|
||||
|
||||
vlv_dpio_put(dev_priv);
|
||||
}
|
||||
|
||||
void i9xx_disable_pll(const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
enum pipe pipe = crtc->pipe;
|
||||
|
||||
/* Don't disable pipe or pipe PLLs if needed */
|
||||
if (IS_I830(dev_priv))
|
||||
return;
|
||||
|
||||
/* Make sure the pipe isn't still relying on us */
|
||||
assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder);
|
||||
|
||||
intel_de_write(dev_priv, DPLL(pipe), DPLL_VGA_MODE_DIS);
|
||||
intel_de_posting_read(dev_priv, DPLL(pipe));
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* vlv_force_pll_off - forcibly disable just the PLL
|
||||
* @dev_priv: i915 private structure
|
||||
* @pipe: pipe PLL to disable
|
||||
*
|
||||
* Disable the PLL for @pipe. To be used in cases where we need
|
||||
* the PLL enabled even when @pipe is not going to be enabled.
|
||||
*/
|
||||
void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe)
|
||||
{
|
||||
if (IS_CHERRYVIEW(dev_priv))
|
||||
chv_disable_pll(dev_priv, pipe);
|
||||
else
|
||||
vlv_disable_pll(dev_priv, pipe);
|
||||
}
|
||||
|
@ -10,6 +10,7 @@ struct dpll;
|
||||
struct drm_i915_private;
|
||||
struct intel_crtc;
|
||||
struct intel_crtc_state;
|
||||
enum pipe;
|
||||
|
||||
void intel_dpll_init_clock_hook(struct drm_i915_private *dev_priv);
|
||||
int vlv_calc_dpll_params(int refclk, struct dpll *clock);
|
||||
@ -20,4 +21,21 @@ void vlv_compute_dpll(struct intel_crtc *crtc,
|
||||
void chv_compute_dpll(struct intel_crtc *crtc,
|
||||
struct intel_crtc_state *pipe_config);
|
||||
|
||||
int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
|
||||
const struct dpll *dpll);
|
||||
void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe);
|
||||
void i9xx_enable_pll(struct intel_crtc *crtc,
|
||||
const struct intel_crtc_state *crtc_state);
|
||||
void vlv_enable_pll(struct intel_crtc *crtc,
|
||||
const struct intel_crtc_state *pipe_config);
|
||||
void chv_enable_pll(struct intel_crtc *crtc,
|
||||
const struct intel_crtc_state *pipe_config);
|
||||
void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe);
|
||||
void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe);
|
||||
void i9xx_disable_pll(const struct intel_crtc_state *crtc_state);
|
||||
void vlv_prepare_pll(struct intel_crtc *crtc,
|
||||
const struct intel_crtc_state *pipe_config);
|
||||
void chv_prepare_pll(struct intel_crtc *crtc,
|
||||
const struct intel_crtc_state *pipe_config);
|
||||
|
||||
#endif
|
||||
|
@ -176,7 +176,7 @@ void intel_prepare_shared_dpll(const struct intel_crtc_state *crtc_state)
|
||||
return;
|
||||
|
||||
mutex_lock(&dev_priv->dpll.lock);
|
||||
drm_WARN_ON(&dev_priv->drm, !pll->state.crtc_mask);
|
||||
drm_WARN_ON(&dev_priv->drm, !pll->state.pipe_mask);
|
||||
if (!pll->active_mask) {
|
||||
drm_dbg(&dev_priv->drm, "setting up %s\n", pll->info->name);
|
||||
drm_WARN_ON(&dev_priv->drm, pll->on);
|
||||
@ -198,7 +198,7 @@ void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
struct intel_shared_dpll *pll = crtc_state->shared_dpll;
|
||||
unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
|
||||
unsigned int pipe_mask = BIT(crtc->pipe);
|
||||
unsigned int old_mask;
|
||||
|
||||
if (drm_WARN_ON(&dev_priv->drm, pll == NULL))
|
||||
@ -207,16 +207,16 @@ void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
|
||||
mutex_lock(&dev_priv->dpll.lock);
|
||||
old_mask = pll->active_mask;
|
||||
|
||||
if (drm_WARN_ON(&dev_priv->drm, !(pll->state.crtc_mask & crtc_mask)) ||
|
||||
drm_WARN_ON(&dev_priv->drm, pll->active_mask & crtc_mask))
|
||||
if (drm_WARN_ON(&dev_priv->drm, !(pll->state.pipe_mask & pipe_mask)) ||
|
||||
drm_WARN_ON(&dev_priv->drm, pll->active_mask & pipe_mask))
|
||||
goto out;
|
||||
|
||||
pll->active_mask |= crtc_mask;
|
||||
pll->active_mask |= pipe_mask;
|
||||
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"enable %s (active %x, on? %d) for crtc %d\n",
|
||||
"enable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n",
|
||||
pll->info->name, pll->active_mask, pll->on,
|
||||
crtc->base.base.id);
|
||||
crtc->base.base.id, crtc->base.name);
|
||||
|
||||
if (old_mask) {
|
||||
drm_WARN_ON(&dev_priv->drm, !pll->on);
|
||||
@ -244,7 +244,7 @@ void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
struct intel_shared_dpll *pll = crtc_state->shared_dpll;
|
||||
unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
|
||||
unsigned int pipe_mask = BIT(crtc->pipe);
|
||||
|
||||
/* PCH only available on ILK+ */
|
||||
if (INTEL_GEN(dev_priv) < 5)
|
||||
@ -254,18 +254,20 @@ void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
|
||||
return;
|
||||
|
||||
mutex_lock(&dev_priv->dpll.lock);
|
||||
if (drm_WARN_ON(&dev_priv->drm, !(pll->active_mask & crtc_mask)))
|
||||
if (drm_WARN(&dev_priv->drm, !(pll->active_mask & pipe_mask),
|
||||
"%s not used by [CRTC:%d:%s]\n", pll->info->name,
|
||||
crtc->base.base.id, crtc->base.name))
|
||||
goto out;
|
||||
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"disable %s (active %x, on? %d) for crtc %d\n",
|
||||
"disable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n",
|
||||
pll->info->name, pll->active_mask, pll->on,
|
||||
crtc->base.base.id);
|
||||
crtc->base.base.id, crtc->base.name);
|
||||
|
||||
assert_shared_dpll_enabled(dev_priv, pll);
|
||||
drm_WARN_ON(&dev_priv->drm, !pll->on);
|
||||
|
||||
pll->active_mask &= ~crtc_mask;
|
||||
pll->active_mask &= ~pipe_mask;
|
||||
if (pll->active_mask)
|
||||
goto out;
|
||||
|
||||
@ -296,7 +298,7 @@ intel_find_shared_dpll(struct intel_atomic_state *state,
|
||||
pll = &dev_priv->dpll.shared_dplls[i];
|
||||
|
||||
/* Only want to check enabled timings first */
|
||||
if (shared_dpll[i].crtc_mask == 0) {
|
||||
if (shared_dpll[i].pipe_mask == 0) {
|
||||
if (!unused_pll)
|
||||
unused_pll = pll;
|
||||
continue;
|
||||
@ -306,10 +308,10 @@ intel_find_shared_dpll(struct intel_atomic_state *state,
|
||||
&shared_dpll[i].hw_state,
|
||||
sizeof(*pll_state)) == 0) {
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"[CRTC:%d:%s] sharing existing %s (crtc mask 0x%08x, active %x)\n",
|
||||
"[CRTC:%d:%s] sharing existing %s (pipe mask 0x%x, active 0x%x)\n",
|
||||
crtc->base.base.id, crtc->base.name,
|
||||
pll->info->name,
|
||||
shared_dpll[i].crtc_mask,
|
||||
shared_dpll[i].pipe_mask,
|
||||
pll->active_mask);
|
||||
return pll;
|
||||
}
|
||||
@ -338,13 +340,13 @@ intel_reference_shared_dpll(struct intel_atomic_state *state,
|
||||
|
||||
shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
|
||||
|
||||
if (shared_dpll[id].crtc_mask == 0)
|
||||
if (shared_dpll[id].pipe_mask == 0)
|
||||
shared_dpll[id].hw_state = *pll_state;
|
||||
|
||||
drm_dbg(&i915->drm, "using %s for pipe %c\n", pll->info->name,
|
||||
pipe_name(crtc->pipe));
|
||||
|
||||
shared_dpll[id].crtc_mask |= 1 << crtc->pipe;
|
||||
shared_dpll[id].pipe_mask |= BIT(crtc->pipe);
|
||||
}
|
||||
|
||||
static void intel_unreference_shared_dpll(struct intel_atomic_state *state,
|
||||
@ -354,7 +356,7 @@ static void intel_unreference_shared_dpll(struct intel_atomic_state *state,
|
||||
struct intel_shared_dpll_state *shared_dpll;
|
||||
|
||||
shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
|
||||
shared_dpll[pll->info->id].crtc_mask &= ~(1 << crtc->pipe);
|
||||
shared_dpll[pll->info->id].pipe_mask &= ~BIT(crtc->pipe);
|
||||
}
|
||||
|
||||
static void intel_put_dpll(struct intel_atomic_state *state,
|
||||
@ -3559,7 +3561,13 @@ static bool icl_get_combo_phy_dpll(struct intel_atomic_state *state,
|
||||
|
||||
icl_calc_dpll_state(dev_priv, &pll_params, &port_dpll->hw_state);
|
||||
|
||||
if (IS_DG1(dev_priv)) {
|
||||
if (IS_ALDERLAKE_S(dev_priv)) {
|
||||
dpll_mask =
|
||||
BIT(DPLL_ID_DG1_DPLL3) |
|
||||
BIT(DPLL_ID_DG1_DPLL2) |
|
||||
BIT(DPLL_ID_ICL_DPLL1) |
|
||||
BIT(DPLL_ID_ICL_DPLL0);
|
||||
} else if (IS_DG1(dev_priv)) {
|
||||
if (port == PORT_D || port == PORT_E) {
|
||||
dpll_mask =
|
||||
BIT(DPLL_ID_DG1_DPLL2) |
|
||||
@ -3865,7 +3873,10 @@ static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
|
||||
if (!(val & PLL_ENABLE))
|
||||
goto out;
|
||||
|
||||
if (IS_DG1(dev_priv)) {
|
||||
if (IS_ALDERLAKE_S(dev_priv)) {
|
||||
hw_state->cfgcr0 = intel_de_read(dev_priv, ADLS_DPLL_CFGCR0(id));
|
||||
hw_state->cfgcr1 = intel_de_read(dev_priv, ADLS_DPLL_CFGCR1(id));
|
||||
} else if (IS_DG1(dev_priv)) {
|
||||
hw_state->cfgcr0 = intel_de_read(dev_priv, DG1_DPLL_CFGCR0(id));
|
||||
hw_state->cfgcr1 = intel_de_read(dev_priv, DG1_DPLL_CFGCR1(id));
|
||||
} else if (IS_ROCKETLAKE(dev_priv)) {
|
||||
@ -3921,7 +3932,10 @@ static void icl_dpll_write(struct drm_i915_private *dev_priv,
|
||||
const enum intel_dpll_id id = pll->info->id;
|
||||
i915_reg_t cfgcr0_reg, cfgcr1_reg;
|
||||
|
||||
if (IS_DG1(dev_priv)) {
|
||||
if (IS_ALDERLAKE_S(dev_priv)) {
|
||||
cfgcr0_reg = ADLS_DPLL_CFGCR0(id);
|
||||
cfgcr1_reg = ADLS_DPLL_CFGCR1(id);
|
||||
} else if (IS_DG1(dev_priv)) {
|
||||
cfgcr0_reg = DG1_DPLL_CFGCR0(id);
|
||||
cfgcr1_reg = DG1_DPLL_CFGCR1(id);
|
||||
} else if (IS_ROCKETLAKE(dev_priv)) {
|
||||
@ -4384,6 +4398,22 @@ static const struct intel_dpll_mgr dg1_pll_mgr = {
|
||||
.dump_hw_state = icl_dump_hw_state,
|
||||
};
|
||||
|
||||
static const struct dpll_info adls_plls[] = {
|
||||
{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
|
||||
{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
|
||||
{ "DPLL 2", &combo_pll_funcs, DPLL_ID_DG1_DPLL2, 0 },
|
||||
{ "DPLL 3", &combo_pll_funcs, DPLL_ID_DG1_DPLL3, 0 },
|
||||
{ },
|
||||
};
|
||||
|
||||
static const struct intel_dpll_mgr adls_pll_mgr = {
|
||||
.dpll_info = adls_plls,
|
||||
.get_dplls = icl_get_dplls,
|
||||
.put_dplls = icl_put_dplls,
|
||||
.update_ref_clks = icl_update_dpll_ref_clks,
|
||||
.dump_hw_state = icl_dump_hw_state,
|
||||
};
|
||||
|
||||
/**
|
||||
* intel_shared_dpll_init - Initialize shared DPLLs
|
||||
* @dev: drm device
|
||||
@ -4397,7 +4427,9 @@ void intel_shared_dpll_init(struct drm_device *dev)
|
||||
const struct dpll_info *dpll_info;
|
||||
int i;
|
||||
|
||||
if (IS_DG1(dev_priv))
|
||||
if (IS_ALDERLAKE_S(dev_priv))
|
||||
dpll_mgr = &adls_pll_mgr;
|
||||
else if (IS_DG1(dev_priv))
|
||||
dpll_mgr = &dg1_pll_mgr;
|
||||
else if (IS_ROCKETLAKE(dev_priv))
|
||||
dpll_mgr = &rkl_pll_mgr;
|
||||
@ -4567,28 +4599,31 @@ static void readout_dpll_hw_state(struct drm_i915_private *i915,
|
||||
POWER_DOMAIN_DPLL_DC_OFF);
|
||||
}
|
||||
|
||||
pll->state.crtc_mask = 0;
|
||||
pll->state.pipe_mask = 0;
|
||||
for_each_intel_crtc(&i915->drm, crtc) {
|
||||
struct intel_crtc_state *crtc_state =
|
||||
to_intel_crtc_state(crtc->base.state);
|
||||
|
||||
if (crtc_state->hw.active && crtc_state->shared_dpll == pll)
|
||||
pll->state.crtc_mask |= 1 << crtc->pipe;
|
||||
pll->state.pipe_mask |= BIT(crtc->pipe);
|
||||
}
|
||||
pll->active_mask = pll->state.crtc_mask;
|
||||
pll->active_mask = pll->state.pipe_mask;
|
||||
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"%s hw state readout: crtc_mask 0x%08x, on %i\n",
|
||||
pll->info->name, pll->state.crtc_mask, pll->on);
|
||||
"%s hw state readout: pipe_mask 0x%x, on %i\n",
|
||||
pll->info->name, pll->state.pipe_mask, pll->on);
|
||||
}
|
||||
|
||||
void intel_dpll_update_ref_clks(struct drm_i915_private *i915)
|
||||
{
|
||||
if (i915->dpll.mgr && i915->dpll.mgr->update_ref_clks)
|
||||
i915->dpll.mgr->update_ref_clks(i915);
|
||||
}
|
||||
|
||||
void intel_dpll_readout_hw_state(struct drm_i915_private *i915)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (i915->dpll.mgr && i915->dpll.mgr->update_ref_clks)
|
||||
i915->dpll.mgr->update_ref_clks(i915);
|
||||
|
||||
for (i = 0; i < i915->dpll.num_shared_dpll; i++)
|
||||
readout_dpll_hw_state(i915, &i915->dpll.shared_dplls[i]);
|
||||
}
|
||||
|
@ -241,9 +241,9 @@ struct intel_dpll_hw_state {
|
||||
*/
|
||||
struct intel_shared_dpll_state {
|
||||
/**
|
||||
* @crtc_mask: mask of CRTC using this DPLL, active or not
|
||||
* @pipe_mask: mask of pipes using this DPLL, active or not
|
||||
*/
|
||||
unsigned crtc_mask;
|
||||
u8 pipe_mask;
|
||||
|
||||
/**
|
||||
* @hw_state: hardware configuration for the DPLL stored in
|
||||
@ -351,9 +351,9 @@ struct intel_shared_dpll {
|
||||
struct intel_shared_dpll_state state;
|
||||
|
||||
/**
|
||||
* @active_mask: mask of active CRTCs (i.e. DPMS on) using this DPLL
|
||||
* @active_mask: mask of active pipes (i.e. DPMS on) using this DPLL
|
||||
*/
|
||||
unsigned active_mask;
|
||||
u8 active_mask;
|
||||
|
||||
/**
|
||||
* @on: is the PLL actually active? Disabled during modeset
|
||||
@ -410,6 +410,7 @@ void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state);
|
||||
void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state);
|
||||
void intel_shared_dpll_swap_state(struct intel_atomic_state *state);
|
||||
void intel_shared_dpll_init(struct drm_device *dev);
|
||||
void intel_dpll_update_ref_clks(struct drm_i915_private *dev_priv);
|
||||
void intel_dpll_readout_hw_state(struct drm_i915_private *dev_priv);
|
||||
void intel_dpll_sanitize_state(struct drm_i915_private *dev_priv);
|
||||
|
||||
|
@ -425,7 +425,7 @@ static void i2c_acpi_find_adapter(struct intel_dsi *intel_dsi,
|
||||
const u16 slave_addr)
|
||||
{
|
||||
struct drm_device *drm_dev = intel_dsi->base.base.dev;
|
||||
struct device *dev = &drm_dev->pdev->dev;
|
||||
struct device *dev = drm_dev->dev;
|
||||
struct acpi_device *acpi_dev;
|
||||
struct list_head resource_list;
|
||||
struct i2c_adapter_lookup lookup;
|
||||
|
@ -167,7 +167,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
|
||||
struct intel_framebuffer *intel_fb = ifbdev->fb;
|
||||
struct drm_device *dev = helper->dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct pci_dev *pdev = dev_priv->drm.pdev;
|
||||
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
|
||||
struct i915_ggtt *ggtt = &dev_priv->ggtt;
|
||||
const struct i915_ggtt_view view = {
|
||||
.type = I915_GGTT_VIEW_NORMAL,
|
||||
|
@ -3,6 +3,8 @@
|
||||
* Copyright © 2020 Intel Corporation
|
||||
*/
|
||||
#include "intel_atomic.h"
|
||||
#include "intel_ddi.h"
|
||||
#include "intel_ddi_buf_trans.h"
|
||||
#include "intel_display_types.h"
|
||||
#include "intel_fdi.h"
|
||||
|
||||
@ -550,6 +552,142 @@ train_done:
|
||||
drm_dbg_kms(&dev_priv->drm, "FDI train done.\n");
|
||||
}
|
||||
|
||||
/* Starting with Haswell, different DDI ports can work in FDI mode for
|
||||
* connection to the PCH-located connectors. For this, it is necessary to train
|
||||
* both the DDI port and PCH receiver for the desired DDI buffer settings.
|
||||
*
|
||||
* The recommended port to work in FDI mode is DDI E, which we use here. Also,
|
||||
* please note that when FDI mode is active on DDI E, it shares 2 lines with
|
||||
* DDI A (which is used for eDP)
|
||||
*/
|
||||
void hsw_fdi_link_train(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
u32 temp, i, rx_ctl_val;
|
||||
int n_entries;
|
||||
|
||||
intel_ddi_get_buf_trans_fdi(dev_priv, &n_entries);
|
||||
|
||||
intel_prepare_dp_ddi_buffers(encoder, crtc_state);
|
||||
|
||||
/* Set the FDI_RX_MISC pwrdn lanes and the 2 workarounds listed at the
|
||||
* mode set "sequence for CRT port" document:
|
||||
* - TP1 to TP2 time with the default value
|
||||
* - FDI delay to 90h
|
||||
*
|
||||
* WaFDIAutoLinkSetTimingOverrride:hsw
|
||||
*/
|
||||
intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A),
|
||||
FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2) | FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
|
||||
|
||||
/* Enable the PCH Receiver FDI PLL */
|
||||
rx_ctl_val = dev_priv->fdi_rx_config | FDI_RX_ENHANCE_FRAME_ENABLE |
|
||||
FDI_RX_PLL_ENABLE |
|
||||
FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
|
||||
intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
|
||||
intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A));
|
||||
udelay(220);
|
||||
|
||||
/* Switch from Rawclk to PCDclk */
|
||||
rx_ctl_val |= FDI_PCDCLK;
|
||||
intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
|
||||
|
||||
/* Configure Port Clock Select */
|
||||
drm_WARN_ON(&dev_priv->drm, crtc_state->shared_dpll->info->id != DPLL_ID_SPLL);
|
||||
intel_ddi_enable_clock(encoder, crtc_state);
|
||||
|
||||
/* Start the training iterating through available voltages and emphasis,
|
||||
* testing each value twice. */
|
||||
for (i = 0; i < n_entries * 2; i++) {
|
||||
/* Configure DP_TP_CTL with auto-training */
|
||||
intel_de_write(dev_priv, DP_TP_CTL(PORT_E),
|
||||
DP_TP_CTL_FDI_AUTOTRAIN |
|
||||
DP_TP_CTL_ENHANCED_FRAME_ENABLE |
|
||||
DP_TP_CTL_LINK_TRAIN_PAT1 |
|
||||
DP_TP_CTL_ENABLE);
|
||||
|
||||
/* Configure and enable DDI_BUF_CTL for DDI E with next voltage.
|
||||
* DDI E does not support port reversal, the functionality is
|
||||
* achieved on the PCH side in FDI_RX_CTL, so no need to set the
|
||||
* port reversal bit */
|
||||
intel_de_write(dev_priv, DDI_BUF_CTL(PORT_E),
|
||||
DDI_BUF_CTL_ENABLE | ((crtc_state->fdi_lanes - 1) << 1) | DDI_BUF_TRANS_SELECT(i / 2));
|
||||
intel_de_posting_read(dev_priv, DDI_BUF_CTL(PORT_E));
|
||||
|
||||
udelay(600);
|
||||
|
||||
/* Program PCH FDI Receiver TU */
|
||||
intel_de_write(dev_priv, FDI_RX_TUSIZE1(PIPE_A), TU_SIZE(64));
|
||||
|
||||
/* Enable PCH FDI Receiver with auto-training */
|
||||
rx_ctl_val |= FDI_RX_ENABLE | FDI_LINK_TRAIN_AUTO;
|
||||
intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
|
||||
intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A));
|
||||
|
||||
/* Wait for FDI receiver lane calibration */
|
||||
udelay(30);
|
||||
|
||||
/* Unset FDI_RX_MISC pwrdn lanes */
|
||||
temp = intel_de_read(dev_priv, FDI_RX_MISC(PIPE_A));
|
||||
temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
|
||||
intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A), temp);
|
||||
intel_de_posting_read(dev_priv, FDI_RX_MISC(PIPE_A));
|
||||
|
||||
/* Wait for FDI auto training time */
|
||||
udelay(5);
|
||||
|
||||
temp = intel_de_read(dev_priv, DP_TP_STATUS(PORT_E));
|
||||
if (temp & DP_TP_STATUS_AUTOTRAIN_DONE) {
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"FDI link training done on step %d\n", i);
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* Leave things enabled even if we failed to train FDI.
|
||||
* Results in less fireworks from the state checker.
|
||||
*/
|
||||
if (i == n_entries * 2 - 1) {
|
||||
drm_err(&dev_priv->drm, "FDI link training failed!\n");
|
||||
break;
|
||||
}
|
||||
|
||||
rx_ctl_val &= ~FDI_RX_ENABLE;
|
||||
intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
|
||||
intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A));
|
||||
|
||||
temp = intel_de_read(dev_priv, DDI_BUF_CTL(PORT_E));
|
||||
temp &= ~DDI_BUF_CTL_ENABLE;
|
||||
intel_de_write(dev_priv, DDI_BUF_CTL(PORT_E), temp);
|
||||
intel_de_posting_read(dev_priv, DDI_BUF_CTL(PORT_E));
|
||||
|
||||
/* Disable DP_TP_CTL and FDI_RX_CTL and retry */
|
||||
temp = intel_de_read(dev_priv, DP_TP_CTL(PORT_E));
|
||||
temp &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
|
||||
temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
|
||||
intel_de_write(dev_priv, DP_TP_CTL(PORT_E), temp);
|
||||
intel_de_posting_read(dev_priv, DP_TP_CTL(PORT_E));
|
||||
|
||||
intel_wait_ddi_buf_idle(dev_priv, PORT_E);
|
||||
|
||||
/* Reset FDI_RX_MISC pwrdn lanes */
|
||||
temp = intel_de_read(dev_priv, FDI_RX_MISC(PIPE_A));
|
||||
temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
|
||||
temp |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2);
|
||||
intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A), temp);
|
||||
intel_de_posting_read(dev_priv, FDI_RX_MISC(PIPE_A));
|
||||
}
|
||||
|
||||
/* Enable normal pixel sending for FDI */
|
||||
intel_de_write(dev_priv, DP_TP_CTL(PORT_E),
|
||||
DP_TP_CTL_FDI_AUTOTRAIN |
|
||||
DP_TP_CTL_LINK_TRAIN_NORMAL |
|
||||
DP_TP_CTL_ENHANCED_FRAME_ENABLE |
|
||||
DP_TP_CTL_ENABLE);
|
||||
}
|
||||
|
||||
void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
|
@ -9,6 +9,7 @@
|
||||
struct drm_i915_private;
|
||||
struct intel_crtc;
|
||||
struct intel_crtc_state;
|
||||
struct intel_encoder;
|
||||
|
||||
#define I915_DISPLAY_CONFIG_RETRY 1
|
||||
int ilk_fdi_compute_config(struct intel_crtc *intel_crtc,
|
||||
@ -18,5 +19,7 @@ void ilk_fdi_disable(struct intel_crtc *crtc);
|
||||
void ilk_fdi_pll_disable(struct intel_crtc *intel_crtc);
|
||||
void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state);
|
||||
void intel_fdi_init_hook(struct drm_i915_private *dev_priv);
|
||||
void hsw_fdi_link_train(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state);
|
||||
|
||||
#endif
|
||||
|
@ -224,6 +224,8 @@ static void frontbuffer_release(struct kref *ref)
|
||||
struct drm_i915_gem_object *obj = front->obj;
|
||||
struct i915_vma *vma;
|
||||
|
||||
drm_WARN_ON(obj->base.dev, atomic_read(&front->bits));
|
||||
|
||||
spin_lock(&obj->vma.lock);
|
||||
for_each_ggtt_vma(vma, obj) {
|
||||
i915_vma_clear_scanout(vma);
|
||||
|
@ -840,7 +840,7 @@ static const struct i2c_lock_operations gmbus_lock_ops = {
|
||||
*/
|
||||
int intel_gmbus_setup(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct pci_dev *pdev = dev_priv->drm.pdev;
|
||||
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
|
||||
struct intel_gmbus *bus;
|
||||
unsigned int pin;
|
||||
int ret;
|
||||
|
@ -2233,6 +2233,16 @@ hdmi_port_clock_valid(struct intel_hdmi *hdmi,
|
||||
return MODE_OK;
|
||||
}
|
||||
|
||||
static int intel_hdmi_port_clock(int clock, int bpc)
|
||||
{
|
||||
/*
|
||||
* Need to adjust the port link by:
|
||||
* 1.5x for 12bpc
|
||||
* 1.25x for 10bpc
|
||||
*/
|
||||
return clock * bpc / 8;
|
||||
}
|
||||
|
||||
static enum drm_mode_status
|
||||
intel_hdmi_mode_valid(struct drm_connector *connector,
|
||||
struct drm_display_mode *mode)
|
||||
@ -2264,17 +2274,18 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
|
||||
clock /= 2;
|
||||
|
||||
/* check if we can do 8bpc */
|
||||
status = hdmi_port_clock_valid(hdmi, clock, true, has_hdmi_sink);
|
||||
status = hdmi_port_clock_valid(hdmi, intel_hdmi_port_clock(clock, 8),
|
||||
true, has_hdmi_sink);
|
||||
|
||||
if (has_hdmi_sink) {
|
||||
/* if we can't do 8bpc we may still be able to do 12bpc */
|
||||
if (status != MODE_OK && !HAS_GMCH(dev_priv))
|
||||
status = hdmi_port_clock_valid(hdmi, clock * 3 / 2,
|
||||
status = hdmi_port_clock_valid(hdmi, intel_hdmi_port_clock(clock, 12),
|
||||
true, has_hdmi_sink);
|
||||
|
||||
/* if we can't do 8,12bpc we may still be able to do 10bpc */
|
||||
if (status != MODE_OK && INTEL_GEN(dev_priv) >= 11)
|
||||
status = hdmi_port_clock_valid(hdmi, clock * 5 / 4,
|
||||
status = hdmi_port_clock_valid(hdmi, intel_hdmi_port_clock(clock, 10),
|
||||
true, has_hdmi_sink);
|
||||
}
|
||||
if (status != MODE_OK)
|
||||
@ -2382,16 +2393,6 @@ intel_hdmi_ycbcr420_config(struct intel_crtc_state *crtc_state,
|
||||
return intel_pch_panel_fitting(crtc_state, conn_state);
|
||||
}
|
||||
|
||||
static int intel_hdmi_port_clock(int clock, int bpc)
|
||||
{
|
||||
/*
|
||||
* Need to adjust the port link by:
|
||||
* 1.5x for 12bpc
|
||||
* 1.25x for 10bpc
|
||||
*/
|
||||
return clock * bpc / 8;
|
||||
}
|
||||
|
||||
static int intel_hdmi_compute_bpc(struct intel_encoder *encoder,
|
||||
struct intel_crtc_state *crtc_state,
|
||||
int clock)
|
||||
@ -3137,11 +3138,45 @@ static u8 rkl_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port)
|
||||
return GMBUS_PIN_1_BXT + phy;
|
||||
}
|
||||
|
||||
static u8 gen9bc_tgp_port_to_ddc_pin(struct drm_i915_private *i915, enum port port)
|
||||
{
|
||||
enum phy phy = intel_port_to_phy(i915, port);
|
||||
|
||||
drm_WARN_ON(&i915->drm, port == PORT_A);
|
||||
|
||||
/*
|
||||
* Pin mapping for GEN9 BC depends on which PCH is present. With TGP,
|
||||
* final two outputs use type-c pins, even though they're actually
|
||||
* combo outputs. With CMP, the traditional DDI A-D pins are used for
|
||||
* all outputs.
|
||||
*/
|
||||
if (INTEL_PCH_TYPE(i915) >= PCH_TGP && phy >= PHY_C)
|
||||
return GMBUS_PIN_9_TC1_ICP + phy - PHY_C;
|
||||
|
||||
return GMBUS_PIN_1_BXT + phy;
|
||||
}
|
||||
|
||||
static u8 dg1_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port)
|
||||
{
|
||||
return intel_port_to_phy(dev_priv, port) + 1;
|
||||
}
|
||||
|
||||
static u8 adls_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port)
|
||||
{
|
||||
enum phy phy = intel_port_to_phy(dev_priv, port);
|
||||
|
||||
WARN_ON(port == PORT_B || port == PORT_C);
|
||||
|
||||
/*
|
||||
* Pin mapping for ADL-S requires TC pins for all combo phy outputs
|
||||
* except first combo output.
|
||||
*/
|
||||
if (phy == PHY_A)
|
||||
return GMBUS_PIN_1_BXT;
|
||||
|
||||
return GMBUS_PIN_9_TC1_ICP + phy - PHY_B;
|
||||
}
|
||||
|
||||
static u8 g4x_port_to_ddc_pin(struct drm_i915_private *dev_priv,
|
||||
enum port port)
|
||||
{
|
||||
@ -3179,10 +3214,14 @@ static u8 intel_hdmi_ddc_pin(struct intel_encoder *encoder)
|
||||
return ddc_pin;
|
||||
}
|
||||
|
||||
if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1)
|
||||
if (HAS_PCH_ADP(dev_priv))
|
||||
ddc_pin = adls_port_to_ddc_pin(dev_priv, port);
|
||||
else if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1)
|
||||
ddc_pin = dg1_port_to_ddc_pin(dev_priv, port);
|
||||
else if (IS_ROCKETLAKE(dev_priv))
|
||||
ddc_pin = rkl_port_to_ddc_pin(dev_priv, port);
|
||||
else if (IS_GEN9_BC(dev_priv) && HAS_PCH_TGP(dev_priv))
|
||||
ddc_pin = gen9bc_tgp_port_to_ddc_pin(dev_priv, port);
|
||||
else if (HAS_PCH_MCC(dev_priv))
|
||||
ddc_pin = mcc_port_to_ddc_pin(dev_priv, port);
|
||||
else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
|
||||
|
@ -80,6 +80,7 @@ static struct platform_device *
|
||||
lpe_audio_platdev_create(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
struct pci_dev *pdev = to_pci_dev(dev->dev);
|
||||
struct platform_device_info pinfo = {};
|
||||
struct resource *rsc;
|
||||
struct platform_device *platdev;
|
||||
@ -99,9 +100,9 @@ lpe_audio_platdev_create(struct drm_i915_private *dev_priv)
|
||||
rsc[0].flags = IORESOURCE_IRQ;
|
||||
rsc[0].name = "hdmi-lpe-audio-irq";
|
||||
|
||||
rsc[1].start = pci_resource_start(dev->pdev, 0) +
|
||||
rsc[1].start = pci_resource_start(pdev, 0) +
|
||||
I915_HDMI_LPE_AUDIO_BASE;
|
||||
rsc[1].end = pci_resource_start(dev->pdev, 0) +
|
||||
rsc[1].end = pci_resource_start(pdev, 0) +
|
||||
I915_HDMI_LPE_AUDIO_BASE + I915_HDMI_LPE_AUDIO_SIZE - 1;
|
||||
rsc[1].flags = IORESOURCE_MEM;
|
||||
rsc[1].name = "hdmi-lpe-audio-mmio";
|
||||
|
@ -247,7 +247,7 @@ static int swsci(struct drm_i915_private *dev_priv,
|
||||
u32 function, u32 parm, u32 *parm_out)
|
||||
{
|
||||
struct opregion_swsci *swsci = dev_priv->opregion.swsci;
|
||||
struct pci_dev *pdev = dev_priv->drm.pdev;
|
||||
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
|
||||
u32 main_function, sub_function, scic;
|
||||
u16 swsci_val;
|
||||
u32 dslp;
|
||||
@ -807,7 +807,7 @@ static int intel_load_vbt_firmware(struct drm_i915_private *dev_priv)
|
||||
if (!name || !*name)
|
||||
return -ENOENT;
|
||||
|
||||
ret = request_firmware(&fw, name, &dev_priv->drm.pdev->dev);
|
||||
ret = request_firmware(&fw, name, dev_priv->drm.dev);
|
||||
if (ret) {
|
||||
drm_err(&dev_priv->drm,
|
||||
"Requesting VBT firmware \"%s\" failed (%d)\n",
|
||||
@ -840,7 +840,7 @@ static int intel_load_vbt_firmware(struct drm_i915_private *dev_priv)
|
||||
int intel_opregion_setup(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct intel_opregion *opregion = &dev_priv->opregion;
|
||||
struct pci_dev *pdev = dev_priv->drm.pdev;
|
||||
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
|
||||
u32 asls, mboxes;
|
||||
char buf[sizeof(OPREGION_SIGNATURE)];
|
||||
int err = 0;
|
||||
|
@ -203,7 +203,7 @@ struct intel_overlay {
|
||||
static void i830_overlay_clock_gating(struct drm_i915_private *dev_priv,
|
||||
bool enable)
|
||||
{
|
||||
struct pci_dev *pdev = dev_priv->drm.pdev;
|
||||
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
|
||||
u8 val;
|
||||
|
||||
/* WA_OVERLAY_CLKGATE:alm */
|
||||
|
@ -596,7 +596,7 @@ static u32 i9xx_get_backlight(struct intel_connector *connector, enum pipe unuse
|
||||
if (panel->backlight.combination_mode) {
|
||||
u8 lbpc;
|
||||
|
||||
pci_read_config_byte(dev_priv->drm.pdev, LBPC, &lbpc);
|
||||
pci_read_config_byte(to_pci_dev(dev_priv->drm.dev), LBPC, &lbpc);
|
||||
val *= lbpc;
|
||||
}
|
||||
|
||||
@ -664,7 +664,7 @@ static void i9xx_set_backlight(const struct drm_connector_state *conn_state, u32
|
||||
|
||||
lbpc = level * 0xfe / panel->backlight.pwm_level_max + 1;
|
||||
level /= lbpc;
|
||||
pci_write_config_byte(dev_priv->drm.pdev, LBPC, lbpc);
|
||||
pci_write_config_byte(to_pci_dev(dev_priv->drm.dev), LBPC, lbpc);
|
||||
}
|
||||
|
||||
if (IS_GEN(dev_priv, 4)) {
|
||||
|
@ -6,6 +6,7 @@
|
||||
#include "i915_drv.h"
|
||||
#include "intel_display_types.h"
|
||||
#include "intel_dp.h"
|
||||
#include "intel_dpll.h"
|
||||
#include "intel_pps.h"
|
||||
|
||||
static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -18,7 +18,6 @@ struct intel_atomic_state;
|
||||
struct intel_plane_state;
|
||||
struct intel_plane;
|
||||
|
||||
#define CAN_PSR(dev_priv) (HAS_PSR(dev_priv) && dev_priv->psr.sink_support)
|
||||
void intel_psr_init_dpcd(struct intel_dp *intel_dp);
|
||||
void intel_psr_enable(struct intel_dp *intel_dp,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
@ -28,20 +27,19 @@ void intel_psr_disable(struct intel_dp *intel_dp,
|
||||
void intel_psr_update(struct intel_dp *intel_dp,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
const struct drm_connector_state *conn_state);
|
||||
int intel_psr_debug_set(struct drm_i915_private *dev_priv, u64 value);
|
||||
int intel_psr_debug_set(struct intel_dp *intel_dp, u64 value);
|
||||
void intel_psr_invalidate(struct drm_i915_private *dev_priv,
|
||||
unsigned frontbuffer_bits,
|
||||
enum fb_op_origin origin);
|
||||
void intel_psr_flush(struct drm_i915_private *dev_priv,
|
||||
unsigned frontbuffer_bits,
|
||||
enum fb_op_origin origin);
|
||||
void intel_psr_init(struct drm_i915_private *dev_priv);
|
||||
void intel_psr_init(struct intel_dp *intel_dp);
|
||||
void intel_psr_compute_config(struct intel_dp *intel_dp,
|
||||
struct intel_crtc_state *crtc_state);
|
||||
void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir);
|
||||
void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir);
|
||||
void intel_psr_short_pulse(struct intel_dp *intel_dp);
|
||||
int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state,
|
||||
u32 *out_value);
|
||||
void intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state);
|
||||
bool intel_psr_enabled(struct intel_dp *intel_dp);
|
||||
int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
|
||||
struct intel_crtc *crtc);
|
||||
|
@ -160,7 +160,7 @@ static struct intel_quirk intel_quirks[] = {
|
||||
|
||||
void intel_init_quirks(struct drm_i915_private *i915)
|
||||
{
|
||||
struct pci_dev *d = i915->drm.pdev;
|
||||
struct pci_dev *d = to_pci_dev(i915->drm.dev);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
|
||||
|
@ -3281,7 +3281,7 @@ static bool
|
||||
intel_sdvo_init_ddc_proxy(struct intel_sdvo *sdvo,
|
||||
struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct pci_dev *pdev = dev_priv->drm.pdev;
|
||||
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
|
||||
|
||||
sdvo->ddc.owner = THIS_MODULE;
|
||||
sdvo->ddc.class = I2C_CLASS_DDC;
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -38,9 +38,6 @@ void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state);
|
||||
int intel_plane_check_stride(const struct intel_plane_state *plane_state);
|
||||
int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state);
|
||||
int chv_plane_check_rotation(const struct intel_plane_state *plane_state);
|
||||
struct intel_plane *
|
||||
skl_universal_plane_create(struct drm_i915_private *dev_priv,
|
||||
enum pipe pipe, enum plane_id plane_id);
|
||||
|
||||
static inline u8 icl_hdr_plane_mask(void)
|
||||
{
|
||||
@ -48,10 +45,6 @@ static inline u8 icl_hdr_plane_mask(void)
|
||||
BIT(PLANE_SPRITE0) | BIT(PLANE_SPRITE1);
|
||||
}
|
||||
|
||||
bool icl_is_nv12_y_plane(struct drm_i915_private *dev_priv,
|
||||
enum plane_id plane_id);
|
||||
bool icl_is_hdr_plane(struct drm_i915_private *dev_priv, enum plane_id plane_id);
|
||||
|
||||
int ivb_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
|
||||
const struct intel_plane_state *plane_state);
|
||||
int hsw_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
|
||||
|
@ -327,6 +327,10 @@ enum vbt_gmbus_ddi {
|
||||
ICL_DDC_BUS_PORT_4,
|
||||
TGL_DDC_BUS_PORT_5,
|
||||
TGL_DDC_BUS_PORT_6,
|
||||
ADLS_DDC_BUS_PORT_TC1 = 0x2,
|
||||
ADLS_DDC_BUS_PORT_TC2,
|
||||
ADLS_DDC_BUS_PORT_TC3,
|
||||
ADLS_DDC_BUS_PORT_TC4
|
||||
};
|
||||
|
||||
#define DP_AUX_A 0x40
|
||||
@ -339,10 +343,21 @@ enum vbt_gmbus_ddi {
|
||||
#define DP_AUX_H 0x80
|
||||
#define DP_AUX_I 0x90
|
||||
|
||||
#define VBT_DP_MAX_LINK_RATE_HBR3 0
|
||||
#define VBT_DP_MAX_LINK_RATE_HBR2 1
|
||||
#define VBT_DP_MAX_LINK_RATE_HBR 2
|
||||
#define VBT_DP_MAX_LINK_RATE_LBR 3
|
||||
/* DP max link rate 216+ */
|
||||
#define BDB_216_VBT_DP_MAX_LINK_RATE_HBR3 0
|
||||
#define BDB_216_VBT_DP_MAX_LINK_RATE_HBR2 1
|
||||
#define BDB_216_VBT_DP_MAX_LINK_RATE_HBR 2
|
||||
#define BDB_216_VBT_DP_MAX_LINK_RATE_LBR 3
|
||||
|
||||
/* DP max link rate 230+ */
|
||||
#define BDB_230_VBT_DP_MAX_LINK_RATE_DEF 0
|
||||
#define BDB_230_VBT_DP_MAX_LINK_RATE_LBR 1
|
||||
#define BDB_230_VBT_DP_MAX_LINK_RATE_HBR 2
|
||||
#define BDB_230_VBT_DP_MAX_LINK_RATE_HBR2 3
|
||||
#define BDB_230_VBT_DP_MAX_LINK_RATE_HBR3 4
|
||||
#define BDB_230_VBT_DP_MAX_LINK_RATE_UHBR10 5
|
||||
#define BDB_230_VBT_DP_MAX_LINK_RATE_UHBR13P5 6
|
||||
#define BDB_230_VBT_DP_MAX_LINK_RATE_UHBR20 7
|
||||
|
||||
/*
|
||||
* The child device config, aka the display device data structure, provides a
|
||||
@ -441,8 +456,8 @@ struct child_device_config {
|
||||
u16 dp_gpio_pin_num; /* 195 */
|
||||
u8 dp_iboost_level:4; /* 196 */
|
||||
u8 hdmi_iboost_level:4; /* 196 */
|
||||
u8 dp_max_link_rate:2; /* 216 CNL+ */
|
||||
u8 dp_max_link_rate_reserved:6; /* 216 */
|
||||
u8 dp_max_link_rate:3; /* 216/230 CNL+ */
|
||||
u8 dp_max_link_rate_reserved:5; /* 216/230 */
|
||||
} __packed;
|
||||
|
||||
struct bdb_general_definitions {
|
||||
|
@ -25,7 +25,7 @@ static i915_reg_t intel_vga_cntrl_reg(struct drm_i915_private *i915)
|
||||
/* Disable the VGA plane that we never use */
|
||||
void intel_vga_disable(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct pci_dev *pdev = dev_priv->drm.pdev;
|
||||
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
|
||||
i915_reg_t vga_reg = intel_vga_cntrl_reg(dev_priv);
|
||||
u8 sr1;
|
||||
|
||||
@ -76,7 +76,7 @@ void intel_vga_redisable(struct drm_i915_private *i915)
|
||||
|
||||
void intel_vga_reset_io_mem(struct drm_i915_private *i915)
|
||||
{
|
||||
struct pci_dev *pdev = i915->drm.pdev;
|
||||
struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
|
||||
|
||||
/*
|
||||
* After we re-enable the power well, if we touch VGA register 0x3d5
|
||||
@ -136,7 +136,7 @@ intel_vga_set_decode(void *cookie, bool enable_decode)
|
||||
|
||||
int intel_vga_register(struct drm_i915_private *i915)
|
||||
{
|
||||
struct pci_dev *pdev = i915->drm.pdev;
|
||||
struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
|
||||
int ret;
|
||||
|
||||
/*
|
||||
@ -156,7 +156,7 @@ int intel_vga_register(struct drm_i915_private *i915)
|
||||
|
||||
void intel_vga_unregister(struct drm_i915_private *i915)
|
||||
{
|
||||
struct pci_dev *pdev = i915->drm.pdev;
|
||||
struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
|
||||
|
||||
vga_client_register(pdev, NULL, NULL, NULL);
|
||||
}
|
||||
|
556
drivers/gpu/drm/i915/display/skl_scaler.c
Normal file
556
drivers/gpu/drm/i915/display/skl_scaler.c
Normal file
@ -0,0 +1,556 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
/*
|
||||
* Copyright © 2020 Intel Corporation
|
||||
*/
|
||||
#include "intel_display_types.h"
|
||||
#include "skl_scaler.h"
|
||||
#include "skl_universal_plane.h"
|
||||
|
||||
/*
|
||||
* The hardware phase 0.0 refers to the center of the pixel.
|
||||
* We want to start from the top/left edge which is phase
|
||||
* -0.5. That matches how the hardware calculates the scaling
|
||||
* factors (from top-left of the first pixel to bottom-right
|
||||
* of the last pixel, as opposed to the pixel centers).
|
||||
*
|
||||
* For 4:2:0 subsampled chroma planes we obviously have to
|
||||
* adjust that so that the chroma sample position lands in
|
||||
* the right spot.
|
||||
*
|
||||
* Note that for packed YCbCr 4:2:2 formats there is no way to
|
||||
* control chroma siting. The hardware simply replicates the
|
||||
* chroma samples for both of the luma samples, and thus we don't
|
||||
* actually get the expected MPEG2 chroma siting convention :(
|
||||
* The same behaviour is observed on pre-SKL platforms as well.
|
||||
*
|
||||
* Theory behind the formula (note that we ignore sub-pixel
|
||||
* source coordinates):
|
||||
* s = source sample position
|
||||
* d = destination sample position
|
||||
*
|
||||
* Downscaling 4:1:
|
||||
* -0.5
|
||||
* | 0.0
|
||||
* | | 1.5 (initial phase)
|
||||
* | | |
|
||||
* v v v
|
||||
* | s | s | s | s |
|
||||
* | d |
|
||||
*
|
||||
* Upscaling 1:4:
|
||||
* -0.5
|
||||
* | -0.375 (initial phase)
|
||||
* | | 0.0
|
||||
* | | |
|
||||
* v v v
|
||||
* | s |
|
||||
* | d | d | d | d |
|
||||
*/
|
||||
static u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_cosited)
|
||||
{
|
||||
int phase = -0x8000;
|
||||
u16 trip = 0;
|
||||
|
||||
if (chroma_cosited)
|
||||
phase += (sub - 1) * 0x8000 / sub;
|
||||
|
||||
phase += scale / (2 * sub);
|
||||
|
||||
/*
|
||||
* Hardware initial phase limited to [-0.5:1.5].
|
||||
* Since the max hardware scale factor is 3.0, we
|
||||
* should never actually excdeed 1.0 here.
|
||||
*/
|
||||
WARN_ON(phase < -0x8000 || phase > 0x18000);
|
||||
|
||||
if (phase < 0)
|
||||
phase = 0x10000 + phase;
|
||||
else
|
||||
trip = PS_PHASE_TRIP;
|
||||
|
||||
return ((phase >> 2) & PS_PHASE_MASK) | trip;
|
||||
}
|
||||
|
||||
#define SKL_MIN_SRC_W 8
|
||||
#define SKL_MAX_SRC_W 4096
|
||||
#define SKL_MIN_SRC_H 8
|
||||
#define SKL_MAX_SRC_H 4096
|
||||
#define SKL_MIN_DST_W 8
|
||||
#define SKL_MAX_DST_W 4096
|
||||
#define SKL_MIN_DST_H 8
|
||||
#define SKL_MAX_DST_H 4096
|
||||
#define ICL_MAX_SRC_W 5120
|
||||
#define ICL_MAX_SRC_H 4096
|
||||
#define ICL_MAX_DST_W 5120
|
||||
#define ICL_MAX_DST_H 4096
|
||||
#define SKL_MIN_YUV_420_SRC_W 16
|
||||
#define SKL_MIN_YUV_420_SRC_H 16
|
||||
|
||||
static int
|
||||
skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
|
||||
unsigned int scaler_user, int *scaler_id,
|
||||
int src_w, int src_h, int dst_w, int dst_h,
|
||||
const struct drm_format_info *format,
|
||||
u64 modifier, bool need_scaler)
|
||||
{
|
||||
struct intel_crtc_scaler_state *scaler_state =
|
||||
&crtc_state->scaler_state;
|
||||
struct intel_crtc *intel_crtc =
|
||||
to_intel_crtc(crtc_state->uapi.crtc);
|
||||
struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
|
||||
const struct drm_display_mode *adjusted_mode =
|
||||
&crtc_state->hw.adjusted_mode;
|
||||
|
||||
/*
|
||||
* Src coordinates are already rotated by 270 degrees for
|
||||
* the 90/270 degree plane rotation cases (to match the
|
||||
* GTT mapping), hence no need to account for rotation here.
|
||||
*/
|
||||
if (src_w != dst_w || src_h != dst_h)
|
||||
need_scaler = true;
|
||||
|
||||
/*
|
||||
* Scaling/fitting not supported in IF-ID mode in GEN9+
|
||||
* TODO: Interlace fetch mode doesn't support YUV420 planar formats.
|
||||
* Once NV12 is enabled, handle it here while allocating scaler
|
||||
* for NV12.
|
||||
*/
|
||||
if (INTEL_GEN(dev_priv) >= 9 && crtc_state->hw.enable &&
|
||||
need_scaler && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"Pipe/Plane scaling not supported with IF-ID mode\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* if plane is being disabled or scaler is no more required or force detach
|
||||
* - free scaler binded to this plane/crtc
|
||||
* - in order to do this, update crtc->scaler_usage
|
||||
*
|
||||
* Here scaler state in crtc_state is set free so that
|
||||
* scaler can be assigned to other user. Actual register
|
||||
* update to free the scaler is done in plane/panel-fit programming.
|
||||
* For this purpose crtc/plane_state->scaler_id isn't reset here.
|
||||
*/
|
||||
if (force_detach || !need_scaler) {
|
||||
if (*scaler_id >= 0) {
|
||||
scaler_state->scaler_users &= ~(1 << scaler_user);
|
||||
scaler_state->scalers[*scaler_id].in_use = 0;
|
||||
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"scaler_user index %u.%u: "
|
||||
"Staged freeing scaler id %d scaler_users = 0x%x\n",
|
||||
intel_crtc->pipe, scaler_user, *scaler_id,
|
||||
scaler_state->scaler_users);
|
||||
*scaler_id = -1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (format && intel_format_info_is_yuv_semiplanar(format, modifier) &&
|
||||
(src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) {
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"Planar YUV: src dimensions not met\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* range checks */
|
||||
if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H ||
|
||||
dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H ||
|
||||
(INTEL_GEN(dev_priv) >= 11 &&
|
||||
(src_w > ICL_MAX_SRC_W || src_h > ICL_MAX_SRC_H ||
|
||||
dst_w > ICL_MAX_DST_W || dst_h > ICL_MAX_DST_H)) ||
|
||||
(INTEL_GEN(dev_priv) < 11 &&
|
||||
(src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
|
||||
dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H))) {
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"scaler_user index %u.%u: src %ux%u dst %ux%u "
|
||||
"size is out of scaler range\n",
|
||||
intel_crtc->pipe, scaler_user, src_w, src_h,
|
||||
dst_w, dst_h);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* mark this plane as a scaler user in crtc_state */
|
||||
scaler_state->scaler_users |= (1 << scaler_user);
|
||||
drm_dbg_kms(&dev_priv->drm, "scaler_user index %u.%u: "
|
||||
"staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n",
|
||||
intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h,
|
||||
scaler_state->scaler_users);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
const struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode;
|
||||
int width, height;
|
||||
|
||||
if (crtc_state->pch_pfit.enabled) {
|
||||
width = drm_rect_width(&crtc_state->pch_pfit.dst);
|
||||
height = drm_rect_height(&crtc_state->pch_pfit.dst);
|
||||
} else {
|
||||
width = pipe_mode->crtc_hdisplay;
|
||||
height = pipe_mode->crtc_vdisplay;
|
||||
}
|
||||
return skl_update_scaler(crtc_state, !crtc_state->hw.active,
|
||||
SKL_CRTC_INDEX,
|
||||
&crtc_state->scaler_state.scaler_id,
|
||||
crtc_state->pipe_src_w, crtc_state->pipe_src_h,
|
||||
width, height, NULL, 0,
|
||||
crtc_state->pch_pfit.enabled);
|
||||
}
|
||||
|
||||
/**
|
||||
* skl_update_scaler_plane - Stages update to scaler state for a given plane.
|
||||
* @crtc_state: crtc's scaler state
|
||||
* @plane_state: atomic plane state to update
|
||||
*
|
||||
* Return
|
||||
* 0 - scaler_usage updated successfully
|
||||
* error - requested scaling cannot be supported or other error condition
|
||||
*/
|
||||
int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
|
||||
struct intel_plane_state *plane_state)
|
||||
{
|
||||
struct intel_plane *intel_plane =
|
||||
to_intel_plane(plane_state->uapi.plane);
|
||||
struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
|
||||
struct drm_framebuffer *fb = plane_state->hw.fb;
|
||||
int ret;
|
||||
bool force_detach = !fb || !plane_state->uapi.visible;
|
||||
bool need_scaler = false;
|
||||
|
||||
/* Pre-gen11 and SDR planes always need a scaler for planar formats. */
|
||||
if (!icl_is_hdr_plane(dev_priv, intel_plane->id) &&
|
||||
fb && intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier))
|
||||
need_scaler = true;
|
||||
|
||||
ret = skl_update_scaler(crtc_state, force_detach,
|
||||
drm_plane_index(&intel_plane->base),
|
||||
&plane_state->scaler_id,
|
||||
drm_rect_width(&plane_state->uapi.src) >> 16,
|
||||
drm_rect_height(&plane_state->uapi.src) >> 16,
|
||||
drm_rect_width(&plane_state->uapi.dst),
|
||||
drm_rect_height(&plane_state->uapi.dst),
|
||||
fb ? fb->format : NULL,
|
||||
fb ? fb->modifier : 0,
|
||||
need_scaler);
|
||||
|
||||
if (ret || plane_state->scaler_id < 0)
|
||||
return ret;
|
||||
|
||||
/* check colorkey */
|
||||
if (plane_state->ckey.flags) {
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"[PLANE:%d:%s] scaling with color key not allowed",
|
||||
intel_plane->base.base.id,
|
||||
intel_plane->base.name);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Check src format */
|
||||
switch (fb->format->format) {
|
||||
case DRM_FORMAT_RGB565:
|
||||
case DRM_FORMAT_XBGR8888:
|
||||
case DRM_FORMAT_XRGB8888:
|
||||
case DRM_FORMAT_ABGR8888:
|
||||
case DRM_FORMAT_ARGB8888:
|
||||
case DRM_FORMAT_XRGB2101010:
|
||||
case DRM_FORMAT_XBGR2101010:
|
||||
case DRM_FORMAT_ARGB2101010:
|
||||
case DRM_FORMAT_ABGR2101010:
|
||||
case DRM_FORMAT_YUYV:
|
||||
case DRM_FORMAT_YVYU:
|
||||
case DRM_FORMAT_UYVY:
|
||||
case DRM_FORMAT_VYUY:
|
||||
case DRM_FORMAT_NV12:
|
||||
case DRM_FORMAT_XYUV8888:
|
||||
case DRM_FORMAT_P010:
|
||||
case DRM_FORMAT_P012:
|
||||
case DRM_FORMAT_P016:
|
||||
case DRM_FORMAT_Y210:
|
||||
case DRM_FORMAT_Y212:
|
||||
case DRM_FORMAT_Y216:
|
||||
case DRM_FORMAT_XVYU2101010:
|
||||
case DRM_FORMAT_XVYU12_16161616:
|
||||
case DRM_FORMAT_XVYU16161616:
|
||||
break;
|
||||
case DRM_FORMAT_XBGR16161616F:
|
||||
case DRM_FORMAT_ABGR16161616F:
|
||||
case DRM_FORMAT_XRGB16161616F:
|
||||
case DRM_FORMAT_ARGB16161616F:
|
||||
if (INTEL_GEN(dev_priv) >= 11)
|
||||
break;
|
||||
fallthrough;
|
||||
default:
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n",
|
||||
intel_plane->base.base.id, intel_plane->base.name,
|
||||
fb->base.id, fb->format->format);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cnl_coef_tap(int i)
|
||||
{
|
||||
return i % 7;
|
||||
}
|
||||
|
||||
static u16 cnl_nearest_filter_coef(int t)
|
||||
{
|
||||
return t == 3 ? 0x0800 : 0x3000;
|
||||
}
|
||||
|
||||
/*
|
||||
* Theory behind setting nearest-neighbor integer scaling:
|
||||
*
|
||||
* 17 phase of 7 taps requires 119 coefficients in 60 dwords per set.
|
||||
* The letter represents the filter tap (D is the center tap) and the number
|
||||
* represents the coefficient set for a phase (0-16).
|
||||
*
|
||||
* +------------+------------------------+------------------------+
|
||||
* |Index value | Data value coeffient 1 | Data value coeffient 2 |
|
||||
* +------------+------------------------+------------------------+
|
||||
* | 00h | B0 | A0 |
|
||||
* +------------+------------------------+------------------------+
|
||||
* | 01h | D0 | C0 |
|
||||
* +------------+------------------------+------------------------+
|
||||
* | 02h | F0 | E0 |
|
||||
* +------------+------------------------+------------------------+
|
||||
* | 03h | A1 | G0 |
|
||||
* +------------+------------------------+------------------------+
|
||||
* | 04h | C1 | B1 |
|
||||
* +------------+------------------------+------------------------+
|
||||
* | ... | ... | ... |
|
||||
* +------------+------------------------+------------------------+
|
||||
* | 38h | B16 | A16 |
|
||||
* +------------+------------------------+------------------------+
|
||||
* | 39h | D16 | C16 |
|
||||
* +------------+------------------------+------------------------+
|
||||
* | 3Ah | F16 | C16 |
|
||||
* +------------+------------------------+------------------------+
|
||||
* | 3Bh | Reserved | G16 |
|
||||
* +------------+------------------------+------------------------+
|
||||
*
|
||||
* To enable nearest-neighbor scaling: program scaler coefficents with
|
||||
* the center tap (Dxx) values set to 1 and all other values set to 0 as per
|
||||
* SCALER_COEFFICIENT_FORMAT
|
||||
*
|
||||
*/
|
||||
|
||||
static void cnl_program_nearest_filter_coefs(struct drm_i915_private *dev_priv,
|
||||
enum pipe pipe, int id, int set)
|
||||
{
|
||||
int i;
|
||||
|
||||
intel_de_write_fw(dev_priv, CNL_PS_COEF_INDEX_SET(pipe, id, set),
|
||||
PS_COEE_INDEX_AUTO_INC);
|
||||
|
||||
for (i = 0; i < 17 * 7; i += 2) {
|
||||
u32 tmp;
|
||||
int t;
|
||||
|
||||
t = cnl_coef_tap(i);
|
||||
tmp = cnl_nearest_filter_coef(t);
|
||||
|
||||
t = cnl_coef_tap(i + 1);
|
||||
tmp |= cnl_nearest_filter_coef(t) << 16;
|
||||
|
||||
intel_de_write_fw(dev_priv, CNL_PS_COEF_DATA_SET(pipe, id, set),
|
||||
tmp);
|
||||
}
|
||||
|
||||
intel_de_write_fw(dev_priv, CNL_PS_COEF_INDEX_SET(pipe, id, set), 0);
|
||||
}
|
||||
|
||||
static u32 skl_scaler_get_filter_select(enum drm_scaling_filter filter, int set)
|
||||
{
|
||||
if (filter == DRM_SCALING_FILTER_NEAREST_NEIGHBOR) {
|
||||
return (PS_FILTER_PROGRAMMED |
|
||||
PS_Y_VERT_FILTER_SELECT(set) |
|
||||
PS_Y_HORZ_FILTER_SELECT(set) |
|
||||
PS_UV_VERT_FILTER_SELECT(set) |
|
||||
PS_UV_HORZ_FILTER_SELECT(set));
|
||||
}
|
||||
|
||||
return PS_FILTER_MEDIUM;
|
||||
}
|
||||
|
||||
static void skl_scaler_setup_filter(struct drm_i915_private *dev_priv, enum pipe pipe,
|
||||
int id, int set, enum drm_scaling_filter filter)
|
||||
{
|
||||
switch (filter) {
|
||||
case DRM_SCALING_FILTER_DEFAULT:
|
||||
break;
|
||||
case DRM_SCALING_FILTER_NEAREST_NEIGHBOR:
|
||||
cnl_program_nearest_filter_coefs(dev_priv, pipe, id, set);
|
||||
break;
|
||||
default:
|
||||
MISSING_CASE(filter);
|
||||
}
|
||||
}
|
||||
|
||||
void skl_pfit_enable(const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
const struct intel_crtc_scaler_state *scaler_state =
|
||||
&crtc_state->scaler_state;
|
||||
struct drm_rect src = {
|
||||
.x2 = crtc_state->pipe_src_w << 16,
|
||||
.y2 = crtc_state->pipe_src_h << 16,
|
||||
};
|
||||
const struct drm_rect *dst = &crtc_state->pch_pfit.dst;
|
||||
u16 uv_rgb_hphase, uv_rgb_vphase;
|
||||
enum pipe pipe = crtc->pipe;
|
||||
int width = drm_rect_width(dst);
|
||||
int height = drm_rect_height(dst);
|
||||
int x = dst->x1;
|
||||
int y = dst->y1;
|
||||
int hscale, vscale;
|
||||
unsigned long irqflags;
|
||||
int id;
|
||||
u32 ps_ctrl;
|
||||
|
||||
if (!crtc_state->pch_pfit.enabled)
|
||||
return;
|
||||
|
||||
if (drm_WARN_ON(&dev_priv->drm,
|
||||
crtc_state->scaler_state.scaler_id < 0))
|
||||
return;
|
||||
|
||||
hscale = drm_rect_calc_hscale(&src, dst, 0, INT_MAX);
|
||||
vscale = drm_rect_calc_vscale(&src, dst, 0, INT_MAX);
|
||||
|
||||
uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false);
|
||||
uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false);
|
||||
|
||||
id = scaler_state->scaler_id;
|
||||
|
||||
ps_ctrl = skl_scaler_get_filter_select(crtc_state->hw.scaling_filter, 0);
|
||||
ps_ctrl |= PS_SCALER_EN | scaler_state->scalers[id].mode;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
|
||||
|
||||
skl_scaler_setup_filter(dev_priv, pipe, id, 0,
|
||||
crtc_state->hw.scaling_filter);
|
||||
|
||||
intel_de_write_fw(dev_priv, SKL_PS_CTRL(pipe, id), ps_ctrl);
|
||||
|
||||
intel_de_write_fw(dev_priv, SKL_PS_VPHASE(pipe, id),
|
||||
PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase));
|
||||
intel_de_write_fw(dev_priv, SKL_PS_HPHASE(pipe, id),
|
||||
PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase));
|
||||
intel_de_write_fw(dev_priv, SKL_PS_WIN_POS(pipe, id),
|
||||
x << 16 | y);
|
||||
intel_de_write_fw(dev_priv, SKL_PS_WIN_SZ(pipe, id),
|
||||
width << 16 | height);
|
||||
|
||||
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
|
||||
}
|
||||
|
||||
void
|
||||
skl_program_plane_scaler(struct intel_plane *plane,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
const struct intel_plane_state *plane_state)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
|
||||
const struct drm_framebuffer *fb = plane_state->hw.fb;
|
||||
enum pipe pipe = plane->pipe;
|
||||
int scaler_id = plane_state->scaler_id;
|
||||
const struct intel_scaler *scaler =
|
||||
&crtc_state->scaler_state.scalers[scaler_id];
|
||||
int crtc_x = plane_state->uapi.dst.x1;
|
||||
int crtc_y = plane_state->uapi.dst.y1;
|
||||
u32 crtc_w = drm_rect_width(&plane_state->uapi.dst);
|
||||
u32 crtc_h = drm_rect_height(&plane_state->uapi.dst);
|
||||
u16 y_hphase, uv_rgb_hphase;
|
||||
u16 y_vphase, uv_rgb_vphase;
|
||||
int hscale, vscale;
|
||||
u32 ps_ctrl;
|
||||
|
||||
hscale = drm_rect_calc_hscale(&plane_state->uapi.src,
|
||||
&plane_state->uapi.dst,
|
||||
0, INT_MAX);
|
||||
vscale = drm_rect_calc_vscale(&plane_state->uapi.src,
|
||||
&plane_state->uapi.dst,
|
||||
0, INT_MAX);
|
||||
|
||||
/* TODO: handle sub-pixel coordinates */
|
||||
if (intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier) &&
|
||||
!icl_is_hdr_plane(dev_priv, plane->id)) {
|
||||
y_hphase = skl_scaler_calc_phase(1, hscale, false);
|
||||
y_vphase = skl_scaler_calc_phase(1, vscale, false);
|
||||
|
||||
/* MPEG2 chroma siting convention */
|
||||
uv_rgb_hphase = skl_scaler_calc_phase(2, hscale, true);
|
||||
uv_rgb_vphase = skl_scaler_calc_phase(2, vscale, false);
|
||||
} else {
|
||||
/* not used */
|
||||
y_hphase = 0;
|
||||
y_vphase = 0;
|
||||
|
||||
uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false);
|
||||
uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false);
|
||||
}
|
||||
|
||||
ps_ctrl = skl_scaler_get_filter_select(plane_state->hw.scaling_filter, 0);
|
||||
ps_ctrl |= PS_SCALER_EN | PS_PLANE_SEL(plane->id) | scaler->mode;
|
||||
|
||||
skl_scaler_setup_filter(dev_priv, pipe, scaler_id, 0,
|
||||
plane_state->hw.scaling_filter);
|
||||
|
||||
intel_de_write_fw(dev_priv, SKL_PS_CTRL(pipe, scaler_id), ps_ctrl);
|
||||
intel_de_write_fw(dev_priv, SKL_PS_VPHASE(pipe, scaler_id),
|
||||
PS_Y_PHASE(y_vphase) | PS_UV_RGB_PHASE(uv_rgb_vphase));
|
||||
intel_de_write_fw(dev_priv, SKL_PS_HPHASE(pipe, scaler_id),
|
||||
PS_Y_PHASE(y_hphase) | PS_UV_RGB_PHASE(uv_rgb_hphase));
|
||||
intel_de_write_fw(dev_priv, SKL_PS_WIN_POS(pipe, scaler_id),
|
||||
(crtc_x << 16) | crtc_y);
|
||||
intel_de_write_fw(dev_priv, SKL_PS_WIN_SZ(pipe, scaler_id),
|
||||
(crtc_w << 16) | crtc_h);
|
||||
}
|
||||
|
||||
static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
|
||||
{
|
||||
struct drm_device *dev = intel_crtc->base.dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
unsigned long irqflags;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
|
||||
|
||||
intel_de_write_fw(dev_priv, SKL_PS_CTRL(intel_crtc->pipe, id), 0);
|
||||
intel_de_write_fw(dev_priv, SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
|
||||
intel_de_write_fw(dev_priv, SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0);
|
||||
|
||||
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
|
||||
}
|
||||
|
||||
/*
|
||||
* This function detaches (aka. unbinds) unused scalers in hardware
|
||||
*/
|
||||
void skl_detach_scalers(const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
const struct intel_crtc_scaler_state *scaler_state =
|
||||
&crtc_state->scaler_state;
|
||||
int i;
|
||||
|
||||
/* loop through and disable scalers that aren't in use */
|
||||
for (i = 0; i < intel_crtc->num_scalers; i++) {
|
||||
if (!scaler_state->scalers[i].in_use)
|
||||
skl_detach_scaler(intel_crtc, i);
|
||||
}
|
||||
}
|
||||
|
||||
void skl_scaler_disable(const struct intel_crtc_state *old_crtc_state)
|
||||
{
|
||||
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < crtc->num_scalers; i++)
|
||||
skl_detach_scaler(crtc, i);
|
||||
}
|
29
drivers/gpu/drm/i915/display/skl_scaler.h
Normal file
29
drivers/gpu/drm/i915/display/skl_scaler.h
Normal file
@ -0,0 +1,29 @@
|
||||
/* SPDX-License-Identifier: MIT */
|
||||
/*
|
||||
* Copyright © 2020 Intel Corporation
|
||||
*/
|
||||
#ifndef INTEL_SCALER_H
|
||||
#define INTEL_SCALER_H
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
enum drm_scaling_filter;
|
||||
struct drm_i915_private;
|
||||
struct intel_crtc_state;
|
||||
struct intel_plane_state;
|
||||
struct intel_plane;
|
||||
enum pipe;
|
||||
|
||||
int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state);
|
||||
|
||||
int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
|
||||
struct intel_plane_state *plane_state);
|
||||
|
||||
void skl_pfit_enable(const struct intel_crtc_state *crtc_state);
|
||||
|
||||
void skl_program_plane_scaler(struct intel_plane *plane,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
const struct intel_plane_state *plane_state);
|
||||
void skl_detach_scalers(const struct intel_crtc_state *crtc_state);
|
||||
void skl_scaler_disable(const struct intel_crtc_state *old_crtc_state);
|
||||
#endif
|
2266
drivers/gpu/drm/i915/display/skl_universal_plane.c
Normal file
2266
drivers/gpu/drm/i915/display/skl_universal_plane.c
Normal file
File diff suppressed because it is too large
Load Diff
37
drivers/gpu/drm/i915/display/skl_universal_plane.h
Normal file
37
drivers/gpu/drm/i915/display/skl_universal_plane.h
Normal file
@ -0,0 +1,37 @@
|
||||
/* SPDX-License-Identifier: MIT */
|
||||
/*
|
||||
* Copyright © 2020 Intel Corporation
|
||||
*/
|
||||
|
||||
#ifndef _SKL_UNIVERSAL_PLANE_H_
|
||||
#define _SKL_UNIVERSAL_PLANE_H_
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
struct drm_framebuffer;
|
||||
struct drm_i915_private;
|
||||
struct intel_crtc;
|
||||
struct intel_initial_plane_config;
|
||||
struct intel_plane_state;
|
||||
|
||||
enum pipe;
|
||||
enum plane_id;
|
||||
|
||||
struct intel_plane *
|
||||
skl_universal_plane_create(struct drm_i915_private *dev_priv,
|
||||
enum pipe pipe, enum plane_id plane_id);
|
||||
|
||||
void skl_get_initial_plane_config(struct intel_crtc *crtc,
|
||||
struct intel_initial_plane_config *plane_config);
|
||||
|
||||
int skl_format_to_fourcc(int format, bool rgb_order, bool alpha);
|
||||
|
||||
int skl_ccs_to_main_plane(const struct drm_framebuffer *fb, int ccs_plane);
|
||||
int skl_calc_main_surface_offset(const struct intel_plane_state *plane_state,
|
||||
int *x, int *y, u32 *offset);
|
||||
|
||||
bool icl_is_nv12_y_plane(struct drm_i915_private *dev_priv,
|
||||
enum plane_id plane_id);
|
||||
bool icl_is_hdr_plane(struct drm_i915_private *dev_priv, enum plane_id plane_id);
|
||||
|
||||
#endif
|
@ -38,6 +38,7 @@
|
||||
#include "intel_fifo_underrun.h"
|
||||
#include "intel_panel.h"
|
||||
#include "intel_sideband.h"
|
||||
#include "skl_scaler.h"
|
||||
|
||||
/* return pixels in terms of txbyteclkhs */
|
||||
static u16 txbyteclkhs(u16 pixels, int bpp, int lane_count,
|
||||
|
@ -35,7 +35,7 @@ static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
|
||||
* to handle all possible callers, and given typical object sizes,
|
||||
* the alignment of the buddy allocation will naturally match.
|
||||
*/
|
||||
vaddr = dma_alloc_coherent(&obj->base.dev->pdev->dev,
|
||||
vaddr = dma_alloc_coherent(obj->base.dev->dev,
|
||||
roundup_pow_of_two(obj->base.size),
|
||||
&dma, GFP_KERNEL);
|
||||
if (!vaddr)
|
||||
@ -83,7 +83,7 @@ static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
|
||||
err_st:
|
||||
kfree(st);
|
||||
err_pci:
|
||||
dma_free_coherent(&obj->base.dev->pdev->dev,
|
||||
dma_free_coherent(obj->base.dev->dev,
|
||||
roundup_pow_of_two(obj->base.size),
|
||||
vaddr, dma);
|
||||
return -ENOMEM;
|
||||
@ -129,7 +129,7 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
|
||||
sg_free_table(pages);
|
||||
kfree(pages);
|
||||
|
||||
dma_free_coherent(&obj->base.dev->pdev->dev,
|
||||
dma_free_coherent(obj->base.dev->dev,
|
||||
roundup_pow_of_two(obj->base.size),
|
||||
vaddr, dma);
|
||||
}
|
||||
|
@ -172,7 +172,7 @@ rebuild_st:
|
||||
max_segment = PAGE_SIZE;
|
||||
goto rebuild_st;
|
||||
} else {
|
||||
dev_warn(&i915->drm.pdev->dev,
|
||||
dev_warn(i915->drm.dev,
|
||||
"Failed to DMA remap %lu pages\n",
|
||||
page_count);
|
||||
goto err_pages;
|
||||
|
@ -1274,7 +1274,7 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine)
|
||||
|
||||
/* Waiting to drain ELSP? */
|
||||
if (execlists_active(&engine->execlists)) {
|
||||
synchronize_hardirq(engine->i915->drm.pdev->irq);
|
||||
synchronize_hardirq(to_pci_dev(engine->i915->drm.dev)->irq);
|
||||
|
||||
intel_engine_flush_submission(engine);
|
||||
|
||||
|
@ -792,7 +792,7 @@ static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl)
|
||||
static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
|
||||
{
|
||||
struct drm_i915_private *i915 = ggtt->vm.i915;
|
||||
struct pci_dev *pdev = i915->drm.pdev;
|
||||
struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
|
||||
phys_addr_t phys_addr;
|
||||
int ret;
|
||||
|
||||
@ -862,7 +862,7 @@ static struct resource pci_resource(struct pci_dev *pdev, int bar)
|
||||
static int gen8_gmch_probe(struct i915_ggtt *ggtt)
|
||||
{
|
||||
struct drm_i915_private *i915 = ggtt->vm.i915;
|
||||
struct pci_dev *pdev = i915->drm.pdev;
|
||||
struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
|
||||
unsigned int size;
|
||||
u16 snb_gmch_ctl;
|
||||
|
||||
@ -1006,7 +1006,7 @@ static u64 iris_pte_encode(dma_addr_t addr,
|
||||
static int gen6_gmch_probe(struct i915_ggtt *ggtt)
|
||||
{
|
||||
struct drm_i915_private *i915 = ggtt->vm.i915;
|
||||
struct pci_dev *pdev = i915->drm.pdev;
|
||||
struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
|
||||
unsigned int size;
|
||||
u16 snb_gmch_ctl;
|
||||
|
||||
@ -1069,7 +1069,7 @@ static int i915_gmch_probe(struct i915_ggtt *ggtt)
|
||||
phys_addr_t gmadr_base;
|
||||
int ret;
|
||||
|
||||
ret = intel_gmch_probe(i915->bridge_dev, i915->drm.pdev, NULL);
|
||||
ret = intel_gmch_probe(i915->bridge_dev, to_pci_dev(i915->drm.dev), NULL);
|
||||
if (!ret) {
|
||||
drm_err(&i915->drm, "failed to set up gmch\n");
|
||||
return -EIO;
|
||||
@ -1114,7 +1114,7 @@ static int ggtt_probe_hw(struct i915_ggtt *ggtt, struct intel_gt *gt)
|
||||
|
||||
ggtt->vm.gt = gt;
|
||||
ggtt->vm.i915 = i915;
|
||||
ggtt->vm.dma = &i915->drm.pdev->dev;
|
||||
ggtt->vm.dma = i915->drm.dev;
|
||||
|
||||
if (INTEL_GEN(i915) <= 5)
|
||||
ret = i915_gmch_probe(ggtt);
|
||||
|
@ -301,7 +301,7 @@ void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt)
|
||||
|
||||
ppgtt->vm.gt = gt;
|
||||
ppgtt->vm.i915 = i915;
|
||||
ppgtt->vm.dma = &i915->drm.pdev->dev;
|
||||
ppgtt->vm.dma = i915->drm.dev;
|
||||
ppgtt->vm.total = BIT_ULL(INTEL_INFO(i915)->ppgtt_size);
|
||||
|
||||
i915_address_space_init(&ppgtt->vm, VM_CLASS_PPGTT);
|
||||
|
@ -485,14 +485,14 @@ static bool rc6_supported(struct intel_rc6 *rc6)
|
||||
static void rpm_get(struct intel_rc6 *rc6)
|
||||
{
|
||||
GEM_BUG_ON(rc6->wakeref);
|
||||
pm_runtime_get_sync(&rc6_to_i915(rc6)->drm.pdev->dev);
|
||||
pm_runtime_get_sync(rc6_to_i915(rc6)->drm.dev);
|
||||
rc6->wakeref = true;
|
||||
}
|
||||
|
||||
static void rpm_put(struct intel_rc6 *rc6)
|
||||
{
|
||||
GEM_BUG_ON(!rc6->wakeref);
|
||||
pm_runtime_put(&rc6_to_i915(rc6)->drm.pdev->dev);
|
||||
pm_runtime_put(rc6_to_i915(rc6)->drm.dev);
|
||||
rc6->wakeref = false;
|
||||
}
|
||||
|
||||
|
@ -26,12 +26,12 @@ static int init_fake_lmem_bar(struct intel_memory_region *mem)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
mem->remap_addr = dma_map_resource(&i915->drm.pdev->dev,
|
||||
mem->remap_addr = dma_map_resource(i915->drm.dev,
|
||||
mem->region.start,
|
||||
mem->fake_mappable.size,
|
||||
PCI_DMA_BIDIRECTIONAL,
|
||||
DMA_ATTR_FORCE_CONTIGUOUS);
|
||||
if (dma_mapping_error(&i915->drm.pdev->dev, mem->remap_addr)) {
|
||||
if (dma_mapping_error(i915->drm.dev, mem->remap_addr)) {
|
||||
drm_mm_remove_node(&mem->fake_mappable);
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -56,7 +56,7 @@ static void release_fake_lmem_bar(struct intel_memory_region *mem)
|
||||
|
||||
drm_mm_remove_node(&mem->fake_mappable);
|
||||
|
||||
dma_unmap_resource(&mem->i915->drm.pdev->dev,
|
||||
dma_unmap_resource(mem->i915->drm.dev,
|
||||
mem->remap_addr,
|
||||
mem->fake_mappable.size,
|
||||
PCI_DMA_BIDIRECTIONAL,
|
||||
@ -104,7 +104,7 @@ static const struct intel_memory_region_ops intel_region_lmem_ops = {
|
||||
struct intel_memory_region *
|
||||
intel_setup_fake_lmem(struct drm_i915_private *i915)
|
||||
{
|
||||
struct pci_dev *pdev = i915->drm.pdev;
|
||||
struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
|
||||
struct intel_memory_region *mem;
|
||||
resource_size_t mappable_end;
|
||||
resource_size_t io_start;
|
||||
|
@ -178,7 +178,7 @@ static int i915_do_reset(struct intel_gt *gt,
|
||||
intel_engine_mask_t engine_mask,
|
||||
unsigned int retry)
|
||||
{
|
||||
struct pci_dev *pdev = gt->i915->drm.pdev;
|
||||
struct pci_dev *pdev = to_pci_dev(gt->i915->drm.dev);
|
||||
int err;
|
||||
|
||||
/* Assert reset for at least 20 usec, and wait for acknowledgement. */
|
||||
@ -207,7 +207,7 @@ static int g33_do_reset(struct intel_gt *gt,
|
||||
intel_engine_mask_t engine_mask,
|
||||
unsigned int retry)
|
||||
{
|
||||
struct pci_dev *pdev = gt->i915->drm.pdev;
|
||||
struct pci_dev *pdev = to_pci_dev(gt->i915->drm.dev);
|
||||
|
||||
pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
|
||||
return wait_for_atomic(g4x_reset_complete(pdev), 50);
|
||||
@ -217,7 +217,7 @@ static int g4x_do_reset(struct intel_gt *gt,
|
||||
intel_engine_mask_t engine_mask,
|
||||
unsigned int retry)
|
||||
{
|
||||
struct pci_dev *pdev = gt->i915->drm.pdev;
|
||||
struct pci_dev *pdev = to_pci_dev(gt->i915->drm.dev);
|
||||
struct intel_uncore *uncore = gt->uncore;
|
||||
int ret;
|
||||
|
||||
|
@ -71,17 +71,25 @@ const struct i915_rev_steppings kbl_revids[] = {
|
||||
[7] = { .gt_stepping = KBL_REVID_G0, .disp_stepping = KBL_REVID_C0 },
|
||||
};
|
||||
|
||||
const struct i915_rev_steppings tgl_uy_revids[] = {
|
||||
[0] = { .gt_stepping = TGL_REVID_A0, .disp_stepping = TGL_REVID_A0 },
|
||||
[1] = { .gt_stepping = TGL_REVID_B0, .disp_stepping = TGL_REVID_C0 },
|
||||
[2] = { .gt_stepping = TGL_REVID_B1, .disp_stepping = TGL_REVID_C0 },
|
||||
[3] = { .gt_stepping = TGL_REVID_C0, .disp_stepping = TGL_REVID_D0 },
|
||||
const struct i915_rev_steppings tgl_uy_revid_step_tbl[] = {
|
||||
[0] = { .gt_stepping = STEP_A0, .disp_stepping = STEP_A0 },
|
||||
[1] = { .gt_stepping = STEP_B0, .disp_stepping = STEP_C0 },
|
||||
[2] = { .gt_stepping = STEP_B1, .disp_stepping = STEP_C0 },
|
||||
[3] = { .gt_stepping = STEP_C0, .disp_stepping = STEP_D0 },
|
||||
};
|
||||
|
||||
/* Same GT stepping between tgl_uy_revids and tgl_revids don't mean the same HW */
|
||||
const struct i915_rev_steppings tgl_revids[] = {
|
||||
[0] = { .gt_stepping = TGL_REVID_A0, .disp_stepping = TGL_REVID_B0 },
|
||||
[1] = { .gt_stepping = TGL_REVID_B0, .disp_stepping = TGL_REVID_D0 },
|
||||
const struct i915_rev_steppings tgl_revid_step_tbl[] = {
|
||||
[0] = { .gt_stepping = STEP_A0, .disp_stepping = STEP_B0 },
|
||||
[1] = { .gt_stepping = STEP_B0, .disp_stepping = STEP_D0 },
|
||||
};
|
||||
|
||||
const struct i915_rev_steppings adls_revid_step_tbl[] = {
|
||||
[0x0] = { .gt_stepping = STEP_A0, .disp_stepping = STEP_A0 },
|
||||
[0x1] = { .gt_stepping = STEP_A0, .disp_stepping = STEP_A2 },
|
||||
[0x4] = { .gt_stepping = STEP_B0, .disp_stepping = STEP_B0 },
|
||||
[0x8] = { .gt_stepping = STEP_C0, .disp_stepping = STEP_B0 },
|
||||
[0xC] = { .gt_stepping = STEP_D0, .disp_stepping = STEP_C0 },
|
||||
};
|
||||
|
||||
static void wa_init_start(struct i915_wa_list *wal, const char *name, const char *engine_name)
|
||||
@ -722,7 +730,8 @@ __intel_engine_init_ctx_wa(struct intel_engine_cs *engine,
|
||||
|
||||
if (IS_DG1(i915))
|
||||
dg1_ctx_workarounds_init(engine, wal);
|
||||
else if (IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915))
|
||||
else if (IS_ALDERLAKE_S(i915) || IS_ROCKETLAKE(i915) ||
|
||||
IS_TIGERLAKE(i915))
|
||||
tgl_ctx_workarounds_init(engine, wal);
|
||||
else if (IS_GEN(i915, 12))
|
||||
gen12_ctx_workarounds_init(engine, wal);
|
||||
@ -1123,19 +1132,19 @@ tgl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
|
||||
gen12_gt_workarounds_init(i915, wal);
|
||||
|
||||
/* Wa_1409420604:tgl */
|
||||
if (IS_TGL_UY_GT_REVID(i915, TGL_REVID_A0, TGL_REVID_A0))
|
||||
if (IS_TGL_UY_GT_STEPPING(i915, STEP_A0, STEP_A0))
|
||||
wa_write_or(wal,
|
||||
SUBSLICE_UNIT_LEVEL_CLKGATE2,
|
||||
CPSSUNIT_CLKGATE_DIS);
|
||||
|
||||
/* Wa_1607087056:tgl also know as BUG:1409180338 */
|
||||
if (IS_TGL_UY_GT_REVID(i915, TGL_REVID_A0, TGL_REVID_A0))
|
||||
if (IS_TGL_UY_GT_STEPPING(i915, STEP_A0, STEP_A0))
|
||||
wa_write_or(wal,
|
||||
SLICE_UNIT_LEVEL_CLKGATE,
|
||||
L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS);
|
||||
|
||||
/* Wa_1408615072:tgl[a0] */
|
||||
if (IS_TGL_UY_GT_REVID(i915, TGL_REVID_A0, TGL_REVID_A0))
|
||||
if (IS_TGL_UY_GT_STEPPING(i915, STEP_A0, STEP_A0))
|
||||
wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2,
|
||||
VSUNIT_CLKGATE_DIS_TGL);
|
||||
}
|
||||
@ -1613,7 +1622,7 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
|
||||
struct drm_i915_private *i915 = engine->i915;
|
||||
|
||||
if (IS_DG1_REVID(i915, DG1_REVID_A0, DG1_REVID_A0) ||
|
||||
IS_TGL_UY_GT_REVID(i915, TGL_REVID_A0, TGL_REVID_A0)) {
|
||||
IS_TGL_UY_GT_STEPPING(i915, STEP_A0, STEP_A0)) {
|
||||
/*
|
||||
* Wa_1607138336:tgl[a0],dg1[a0]
|
||||
* Wa_1607063988:tgl[a0],dg1[a0]
|
||||
@ -1623,7 +1632,7 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
|
||||
GEN12_DISABLE_POSH_BUSY_FF_DOP_CG);
|
||||
}
|
||||
|
||||
if (IS_TGL_UY_GT_REVID(i915, TGL_REVID_A0, TGL_REVID_A0)) {
|
||||
if (IS_TGL_UY_GT_STEPPING(i915, STEP_A0, STEP_A0)) {
|
||||
/*
|
||||
* Wa_1606679103:tgl
|
||||
* (see also Wa_1606682166:icl)
|
||||
@ -1633,45 +1642,45 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
|
||||
GEN7_DISABLE_SAMPLER_PREFETCH);
|
||||
}
|
||||
|
||||
if (IS_DG1(i915) || IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) {
|
||||
/* Wa_1606931601:tgl,rkl,dg1 */
|
||||
if (IS_ALDERLAKE_S(i915) || IS_DG1(i915) ||
|
||||
IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) {
|
||||
/* Wa_1606931601:tgl,rkl,dg1,adl-s */
|
||||
wa_masked_en(wal, GEN7_ROW_CHICKEN2, GEN12_DISABLE_EARLY_READ);
|
||||
|
||||
/*
|
||||
* Wa_1407928979:tgl A*
|
||||
* Wa_18011464164:tgl[B0+],dg1[B0+]
|
||||
* Wa_22010931296:tgl[B0+],dg1[B0+]
|
||||
* Wa_14010919138:rkl, dg1
|
||||
* Wa_14010919138:rkl,dg1,adl-s
|
||||
*/
|
||||
wa_write_or(wal, GEN7_FF_THREAD_MODE,
|
||||
GEN12_FF_TESSELATION_DOP_GATE_DISABLE);
|
||||
|
||||
/*
|
||||
* Wa_1606700617:tgl,dg1
|
||||
* Wa_22010271021:tgl,rkl,dg1
|
||||
* Wa_22010271021:tgl,rkl,dg1, adl-s
|
||||
*/
|
||||
wa_masked_en(wal,
|
||||
GEN9_CS_DEBUG_MODE1,
|
||||
FF_DOP_CLOCK_GATE_DISABLE);
|
||||
|
||||
/* Wa_1406941453:tgl,rkl,dg1 */
|
||||
wa_masked_en(wal,
|
||||
GEN10_SAMPLER_MODE,
|
||||
ENABLE_SMALLPL);
|
||||
}
|
||||
|
||||
if (IS_DG1_REVID(i915, DG1_REVID_A0, DG1_REVID_A0) ||
|
||||
if (IS_ALDERLAKE_S(i915) || IS_DG1_REVID(i915, DG1_REVID_A0, DG1_REVID_A0) ||
|
||||
IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) {
|
||||
/* Wa_1409804808:tgl,rkl,dg1[a0] */
|
||||
/* Wa_1409804808:tgl,rkl,dg1[a0],adl-s */
|
||||
wa_masked_en(wal, GEN7_ROW_CHICKEN2,
|
||||
GEN12_PUSH_CONST_DEREF_HOLD_DIS);
|
||||
|
||||
/*
|
||||
* Wa_1409085225:tgl
|
||||
* Wa_14010229206:tgl,rkl,dg1[a0]
|
||||
* Wa_14010229206:tgl,rkl,dg1[a0],adl-s
|
||||
*/
|
||||
wa_masked_en(wal, GEN9_ROW_CHICKEN4, GEN12_DISABLE_TDL_PUSH);
|
||||
}
|
||||
|
||||
|
||||
if (IS_DG1_REVID(i915, DG1_REVID_A0, DG1_REVID_A0) ||
|
||||
IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) {
|
||||
/*
|
||||
* Wa_1607030317:tgl
|
||||
* Wa_1607186500:tgl
|
||||
@ -1688,6 +1697,13 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
|
||||
GEN8_RC_SEMA_IDLE_MSG_DISABLE);
|
||||
}
|
||||
|
||||
if (IS_DG1(i915) || IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) {
|
||||
/* Wa_1406941453:tgl,rkl,dg1 */
|
||||
wa_masked_en(wal,
|
||||
GEN10_SAMPLER_MODE,
|
||||
ENABLE_SMALLPL);
|
||||
}
|
||||
|
||||
if (IS_GEN(i915, 11)) {
|
||||
/* This is not an Wa. Enable for better image quality */
|
||||
wa_masked_en(wal,
|
||||
|
@ -44,9 +44,11 @@ void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
|
||||
* List of required GuC and HuC binaries per-platform.
|
||||
* Must be ordered based on platform + revid, from newer to older.
|
||||
*
|
||||
* Note that RKL uses the same firmware as TGL.
|
||||
* Note that RKL and ADL-S have the same GuC/HuC device ID's and use the same
|
||||
* firmware as TGL.
|
||||
*/
|
||||
#define INTEL_UC_FIRMWARE_DEFS(fw_def, guc_def, huc_def) \
|
||||
fw_def(ALDERLAKE_S, 0, guc_def(tgl, 49, 0, 1), huc_def(tgl, 7, 5, 0)) \
|
||||
fw_def(ROCKETLAKE, 0, guc_def(tgl, 49, 0, 1), huc_def(tgl, 7, 5, 0)) \
|
||||
fw_def(TIGERLAKE, 0, guc_def(tgl, 49, 0, 1), huc_def(tgl, 7, 5, 0)) \
|
||||
fw_def(JASPERLAKE, 0, guc_def(ehl, 49, 0, 1), huc_def(ehl, 9, 0, 0)) \
|
||||
|
@ -374,6 +374,7 @@ void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu,
|
||||
bool primary)
|
||||
{
|
||||
struct intel_gvt *gvt = vgpu->gvt;
|
||||
struct pci_dev *pdev = to_pci_dev(gvt->gt->i915->drm.dev);
|
||||
const struct intel_gvt_device_info *info = &gvt->device_info;
|
||||
u16 *gmch_ctl;
|
||||
u8 next;
|
||||
@ -407,9 +408,9 @@ void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu,
|
||||
memset(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_OPREGION, 0, 4);
|
||||
|
||||
vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].size =
|
||||
pci_resource_len(gvt->gt->i915->drm.pdev, 0);
|
||||
pci_resource_len(pdev, 0);
|
||||
vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].size =
|
||||
pci_resource_len(gvt->gt->i915->drm.pdev, 2);
|
||||
pci_resource_len(pdev, 2);
|
||||
|
||||
memset(vgpu_cfg_space(vgpu) + PCI_ROM_ADDRESS, 0, 4);
|
||||
|
||||
|
@ -516,11 +516,27 @@ static void clean_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num)
|
||||
port->dpcd = NULL;
|
||||
}
|
||||
|
||||
static enum hrtimer_restart vblank_timer_fn(struct hrtimer *data)
|
||||
{
|
||||
struct intel_vgpu_vblank_timer *vblank_timer;
|
||||
struct intel_vgpu *vgpu;
|
||||
|
||||
vblank_timer = container_of(data, struct intel_vgpu_vblank_timer, timer);
|
||||
vgpu = container_of(vblank_timer, struct intel_vgpu, vblank_timer);
|
||||
|
||||
/* Set vblank emulation request per-vGPU bit */
|
||||
intel_gvt_request_service(vgpu->gvt,
|
||||
INTEL_GVT_REQUEST_EMULATE_VBLANK + vgpu->id);
|
||||
hrtimer_add_expires_ns(&vblank_timer->timer, vblank_timer->period);
|
||||
return HRTIMER_RESTART;
|
||||
}
|
||||
|
||||
static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num,
|
||||
int type, unsigned int resolution)
|
||||
{
|
||||
struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
|
||||
struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num);
|
||||
struct intel_vgpu_vblank_timer *vblank_timer = &vgpu->vblank_timer;
|
||||
|
||||
if (drm_WARN_ON(&i915->drm, resolution >= GVT_EDID_NUM))
|
||||
return -EINVAL;
|
||||
@ -544,6 +560,14 @@ static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num,
|
||||
port->dpcd->data[DPCD_SINK_COUNT] = 0x1;
|
||||
port->type = type;
|
||||
port->id = resolution;
|
||||
port->vrefresh_k = GVT_DEFAULT_REFRESH_RATE * MSEC_PER_SEC;
|
||||
vgpu->display.port_num = port_num;
|
||||
|
||||
/* Init hrtimer based on default refresh rate */
|
||||
hrtimer_init(&vblank_timer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
|
||||
vblank_timer->timer.function = vblank_timer_fn;
|
||||
vblank_timer->vrefresh_k = port->vrefresh_k;
|
||||
vblank_timer->period = DIV64_U64_ROUND_CLOSEST(NSEC_PER_SEC * MSEC_PER_SEC, vblank_timer->vrefresh_k);
|
||||
|
||||
emulate_monitor_status_change(vgpu);
|
||||
|
||||
@ -551,41 +575,44 @@ static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num,
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_gvt_check_vblank_emulation - check if vblank emulation timer should
|
||||
* be turned on/off when a virtual pipe is enabled/disabled.
|
||||
* @gvt: a GVT device
|
||||
* vgpu_update_vblank_emulation - Update per-vGPU vblank_timer
|
||||
* @vgpu: vGPU operated
|
||||
* @turnon: Turn ON/OFF vblank_timer
|
||||
*
|
||||
* This function is used to turn on/off vblank timer according to currently
|
||||
* enabled/disabled virtual pipes.
|
||||
* This function is used to turn on/off or update the per-vGPU vblank_timer
|
||||
* when PIPECONF is enabled or disabled. vblank_timer period is also updated
|
||||
* if guest changed the refresh rate.
|
||||
*
|
||||
*/
|
||||
void intel_gvt_check_vblank_emulation(struct intel_gvt *gvt)
|
||||
void vgpu_update_vblank_emulation(struct intel_vgpu *vgpu, bool turnon)
|
||||
{
|
||||
struct intel_gvt_irq *irq = &gvt->irq;
|
||||
struct intel_vgpu *vgpu;
|
||||
int pipe, id;
|
||||
int found = false;
|
||||
struct intel_vgpu_vblank_timer *vblank_timer = &vgpu->vblank_timer;
|
||||
struct intel_vgpu_port *port =
|
||||
intel_vgpu_port(vgpu, vgpu->display.port_num);
|
||||
|
||||
mutex_lock(&gvt->lock);
|
||||
for_each_active_vgpu(gvt, vgpu, id) {
|
||||
for (pipe = 0; pipe < I915_MAX_PIPES; pipe++) {
|
||||
if (pipe_is_enabled(vgpu, pipe)) {
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
if (turnon) {
|
||||
/*
|
||||
* Skip the re-enable if already active and vrefresh unchanged.
|
||||
* Otherwise, stop timer if already active and restart with new
|
||||
* period.
|
||||
*/
|
||||
if (vblank_timer->vrefresh_k != port->vrefresh_k ||
|
||||
!hrtimer_active(&vblank_timer->timer)) {
|
||||
/* Stop timer before start with new period if active */
|
||||
if (hrtimer_active(&vblank_timer->timer))
|
||||
hrtimer_cancel(&vblank_timer->timer);
|
||||
|
||||
/* Make sure new refresh rate updated to timer period */
|
||||
vblank_timer->vrefresh_k = port->vrefresh_k;
|
||||
vblank_timer->period = DIV64_U64_ROUND_CLOSEST(NSEC_PER_SEC * MSEC_PER_SEC, vblank_timer->vrefresh_k);
|
||||
hrtimer_start(&vblank_timer->timer,
|
||||
ktime_add_ns(ktime_get(), vblank_timer->period),
|
||||
HRTIMER_MODE_ABS);
|
||||
}
|
||||
if (found)
|
||||
break;
|
||||
} else {
|
||||
/* Caller request to stop vblank */
|
||||
hrtimer_cancel(&vblank_timer->timer);
|
||||
}
|
||||
|
||||
/* all the pipes are disabled */
|
||||
if (!found)
|
||||
hrtimer_cancel(&irq->vblank_timer.timer);
|
||||
else
|
||||
hrtimer_start(&irq->vblank_timer.timer,
|
||||
ktime_add_ns(ktime_get(), irq->vblank_timer.period),
|
||||
HRTIMER_MODE_ABS);
|
||||
mutex_unlock(&gvt->lock);
|
||||
}
|
||||
|
||||
static void emulate_vblank_on_pipe(struct intel_vgpu *vgpu, int pipe)
|
||||
@ -617,7 +644,7 @@ static void emulate_vblank_on_pipe(struct intel_vgpu *vgpu, int pipe)
|
||||
}
|
||||
}
|
||||
|
||||
static void emulate_vblank(struct intel_vgpu *vgpu)
|
||||
void intel_vgpu_emulate_vblank(struct intel_vgpu *vgpu)
|
||||
{
|
||||
int pipe;
|
||||
|
||||
@ -627,24 +654,6 @@ static void emulate_vblank(struct intel_vgpu *vgpu)
|
||||
mutex_unlock(&vgpu->vgpu_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_gvt_emulate_vblank - trigger vblank events for vGPUs on GVT device
|
||||
* @gvt: a GVT device
|
||||
*
|
||||
* This function is used to trigger vblank interrupts for vGPUs on GVT device
|
||||
*
|
||||
*/
|
||||
void intel_gvt_emulate_vblank(struct intel_gvt *gvt)
|
||||
{
|
||||
struct intel_vgpu *vgpu;
|
||||
int id;
|
||||
|
||||
mutex_lock(&gvt->lock);
|
||||
for_each_active_vgpu(gvt, vgpu, id)
|
||||
emulate_vblank(vgpu);
|
||||
mutex_unlock(&gvt->lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_vgpu_emulate_hotplug - trigger hotplug event for vGPU
|
||||
* @vgpu: a vGPU
|
||||
@ -753,6 +762,8 @@ void intel_vgpu_clean_display(struct intel_vgpu *vgpu)
|
||||
clean_virtual_dp_monitor(vgpu, PORT_D);
|
||||
else
|
||||
clean_virtual_dp_monitor(vgpu, PORT_B);
|
||||
|
||||
vgpu_update_vblank_emulation(vgpu, false);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -36,6 +36,7 @@
|
||||
#define _GVT_DISPLAY_H_
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/hrtimer.h>
|
||||
|
||||
struct intel_gvt;
|
||||
struct intel_vgpu;
|
||||
@ -157,6 +158,7 @@ enum intel_vgpu_edid {
|
||||
GVT_EDID_NUM,
|
||||
};
|
||||
|
||||
#define GVT_DEFAULT_REFRESH_RATE 60
|
||||
struct intel_vgpu_port {
|
||||
/* per display EDID information */
|
||||
struct intel_vgpu_edid_data *edid;
|
||||
@ -164,6 +166,14 @@ struct intel_vgpu_port {
|
||||
struct intel_vgpu_dpcd_data *dpcd;
|
||||
int type;
|
||||
enum intel_vgpu_edid id;
|
||||
/* x1000 to get accurate 59.94, 24.976, 29.94, etc. in timing std. */
|
||||
u32 vrefresh_k;
|
||||
};
|
||||
|
||||
struct intel_vgpu_vblank_timer {
|
||||
struct hrtimer timer;
|
||||
u32 vrefresh_k;
|
||||
u64 period;
|
||||
};
|
||||
|
||||
static inline char *vgpu_edid_str(enum intel_vgpu_edid id)
|
||||
@ -202,8 +212,8 @@ static inline unsigned int vgpu_edid_yres(enum intel_vgpu_edid id)
|
||||
}
|
||||
}
|
||||
|
||||
void intel_gvt_emulate_vblank(struct intel_gvt *gvt);
|
||||
void intel_gvt_check_vblank_emulation(struct intel_gvt *gvt);
|
||||
void intel_vgpu_emulate_vblank(struct intel_vgpu *vgpu);
|
||||
void vgpu_update_vblank_emulation(struct intel_vgpu *vgpu, bool turnon);
|
||||
|
||||
int intel_vgpu_init_display(struct intel_vgpu *vgpu, u64 resolution);
|
||||
void intel_vgpu_reset_display(struct intel_vgpu *vgpu);
|
||||
|
@ -76,7 +76,7 @@ static int mmio_snapshot_handler(struct intel_gvt *gvt, u32 offset, void *data)
|
||||
static int expose_firmware_sysfs(struct intel_gvt *gvt)
|
||||
{
|
||||
struct intel_gvt_device_info *info = &gvt->device_info;
|
||||
struct pci_dev *pdev = gvt->gt->i915->drm.pdev;
|
||||
struct pci_dev *pdev = to_pci_dev(gvt->gt->i915->drm.dev);
|
||||
struct gvt_firmware_header *h;
|
||||
void *firmware;
|
||||
void *p;
|
||||
@ -127,7 +127,7 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt)
|
||||
|
||||
static void clean_firmware_sysfs(struct intel_gvt *gvt)
|
||||
{
|
||||
struct pci_dev *pdev = gvt->gt->i915->drm.pdev;
|
||||
struct pci_dev *pdev = to_pci_dev(gvt->gt->i915->drm.dev);
|
||||
|
||||
device_remove_bin_file(&pdev->dev, &firmware_attr);
|
||||
vfree(firmware_attr.private);
|
||||
@ -151,7 +151,7 @@ static int verify_firmware(struct intel_gvt *gvt,
|
||||
const struct firmware *fw)
|
||||
{
|
||||
struct intel_gvt_device_info *info = &gvt->device_info;
|
||||
struct pci_dev *pdev = gvt->gt->i915->drm.pdev;
|
||||
struct pci_dev *pdev = to_pci_dev(gvt->gt->i915->drm.dev);
|
||||
struct gvt_firmware_header *h;
|
||||
unsigned long id, crc32_start;
|
||||
const void *mem;
|
||||
@ -205,7 +205,7 @@ invalid_firmware:
|
||||
int intel_gvt_load_firmware(struct intel_gvt *gvt)
|
||||
{
|
||||
struct intel_gvt_device_info *info = &gvt->device_info;
|
||||
struct pci_dev *pdev = gvt->gt->i915->drm.pdev;
|
||||
struct pci_dev *pdev = to_pci_dev(gvt->gt->i915->drm.dev);
|
||||
struct intel_gvt_firmware *firmware = &gvt->firmware;
|
||||
struct gvt_firmware_header *h;
|
||||
const struct firmware *fw;
|
||||
@ -240,7 +240,7 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt)
|
||||
|
||||
gvt_dbg_core("request hw state firmware %s...\n", path);
|
||||
|
||||
ret = request_firmware(&fw, path, &gvt->gt->i915->drm.pdev->dev);
|
||||
ret = request_firmware(&fw, path, gvt->gt->i915->drm.dev);
|
||||
kfree(path);
|
||||
|
||||
if (ret)
|
||||
|
@ -746,7 +746,7 @@ static int detach_oos_page(struct intel_vgpu *vgpu,
|
||||
|
||||
static void ppgtt_free_spt(struct intel_vgpu_ppgtt_spt *spt)
|
||||
{
|
||||
struct device *kdev = &spt->vgpu->gvt->gt->i915->drm.pdev->dev;
|
||||
struct device *kdev = spt->vgpu->gvt->gt->i915->drm.dev;
|
||||
|
||||
trace_spt_free(spt->vgpu->id, spt, spt->guest_page.type);
|
||||
|
||||
@ -831,7 +831,7 @@ static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt);
|
||||
static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt(
|
||||
struct intel_vgpu *vgpu, enum intel_gvt_gtt_type type)
|
||||
{
|
||||
struct device *kdev = &vgpu->gvt->gt->i915->drm.pdev->dev;
|
||||
struct device *kdev = vgpu->gvt->gt->i915->drm.dev;
|
||||
struct intel_vgpu_ppgtt_spt *spt = NULL;
|
||||
dma_addr_t daddr;
|
||||
int ret;
|
||||
@ -1159,8 +1159,8 @@ static inline void ppgtt_generate_shadow_entry(struct intel_gvt_gtt_entry *se,
|
||||
* @vgpu: target vgpu
|
||||
* @entry: target pfn's gtt entry
|
||||
*
|
||||
* Return 1 if 2MB huge gtt shadowing is possilbe, 0 if miscondition,
|
||||
* negtive if found err.
|
||||
* Return 1 if 2MB huge gtt shadowing is possible, 0 if miscondition,
|
||||
* negative if found err.
|
||||
*/
|
||||
static int is_2MB_gtt_possible(struct intel_vgpu *vgpu,
|
||||
struct intel_gvt_gtt_entry *entry)
|
||||
@ -2402,7 +2402,7 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
|
||||
vgpu->gvt->device_info.gtt_entry_size_shift;
|
||||
void *scratch_pt;
|
||||
int i;
|
||||
struct device *dev = &vgpu->gvt->gt->i915->drm.pdev->dev;
|
||||
struct device *dev = vgpu->gvt->gt->i915->drm.dev;
|
||||
dma_addr_t daddr;
|
||||
|
||||
if (drm_WARN_ON(&i915->drm,
|
||||
@ -2460,7 +2460,7 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
|
||||
static int release_scratch_page_tree(struct intel_vgpu *vgpu)
|
||||
{
|
||||
int i;
|
||||
struct device *dev = &vgpu->gvt->gt->i915->drm.pdev->dev;
|
||||
struct device *dev = vgpu->gvt->gt->i915->drm.dev;
|
||||
dma_addr_t daddr;
|
||||
|
||||
for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) {
|
||||
@ -2732,7 +2732,7 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
|
||||
{
|
||||
int ret;
|
||||
void *page;
|
||||
struct device *dev = &gvt->gt->i915->drm.pdev->dev;
|
||||
struct device *dev = gvt->gt->i915->drm.dev;
|
||||
dma_addr_t daddr;
|
||||
|
||||
gvt_dbg_core("init gtt\n");
|
||||
@ -2781,7 +2781,7 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
|
||||
*/
|
||||
void intel_gvt_clean_gtt(struct intel_gvt *gvt)
|
||||
{
|
||||
struct device *dev = &gvt->gt->i915->drm.pdev->dev;
|
||||
struct device *dev = gvt->gt->i915->drm.dev;
|
||||
dma_addr_t daddr = (dma_addr_t)(gvt->gtt.scratch_mfn <<
|
||||
I915_GTT_PAGE_SHIFT);
|
||||
|
||||
|
@ -50,7 +50,7 @@ static struct intel_vgpu_type *intel_gvt_find_vgpu_type(struct intel_gvt *gvt,
|
||||
const char *name)
|
||||
{
|
||||
const char *driver_name =
|
||||
dev_driver_string(&gvt->gt->i915->drm.pdev->dev);
|
||||
dev_driver_string(gvt->gt->i915->drm.dev);
|
||||
int i;
|
||||
|
||||
name += strlen(driver_name) + 1;
|
||||
@ -189,7 +189,7 @@ static const struct intel_gvt_ops intel_gvt_ops = {
|
||||
static void init_device_info(struct intel_gvt *gvt)
|
||||
{
|
||||
struct intel_gvt_device_info *info = &gvt->device_info;
|
||||
struct pci_dev *pdev = gvt->gt->i915->drm.pdev;
|
||||
struct pci_dev *pdev = to_pci_dev(gvt->gt->i915->drm.dev);
|
||||
|
||||
info->max_support_vgpus = 8;
|
||||
info->cfg_space_size = PCI_CFG_SPACE_EXP_SIZE;
|
||||
@ -203,6 +203,22 @@ static void init_device_info(struct intel_gvt *gvt)
|
||||
info->msi_cap_offset = pdev->msi_cap;
|
||||
}
|
||||
|
||||
static void intel_gvt_test_and_emulate_vblank(struct intel_gvt *gvt)
|
||||
{
|
||||
struct intel_vgpu *vgpu;
|
||||
int id;
|
||||
|
||||
mutex_lock(&gvt->lock);
|
||||
idr_for_each_entry((&(gvt)->vgpu_idr), (vgpu), (id)) {
|
||||
if (test_and_clear_bit(INTEL_GVT_REQUEST_EMULATE_VBLANK + id,
|
||||
(void *)&gvt->service_request)) {
|
||||
if (vgpu->active)
|
||||
intel_vgpu_emulate_vblank(vgpu);
|
||||
}
|
||||
}
|
||||
mutex_unlock(&gvt->lock);
|
||||
}
|
||||
|
||||
static int gvt_service_thread(void *data)
|
||||
{
|
||||
struct intel_gvt *gvt = (struct intel_gvt *)data;
|
||||
@ -220,9 +236,7 @@ static int gvt_service_thread(void *data)
|
||||
if (WARN_ONCE(ret, "service thread is waken up by signal.\n"))
|
||||
continue;
|
||||
|
||||
if (test_and_clear_bit(INTEL_GVT_REQUEST_EMULATE_VBLANK,
|
||||
(void *)&gvt->service_request))
|
||||
intel_gvt_emulate_vblank(gvt);
|
||||
intel_gvt_test_and_emulate_vblank(gvt);
|
||||
|
||||
if (test_bit(INTEL_GVT_REQUEST_SCHED,
|
||||
(void *)&gvt->service_request) ||
|
||||
@ -278,7 +292,6 @@ void intel_gvt_clean_device(struct drm_i915_private *i915)
|
||||
intel_gvt_clean_sched_policy(gvt);
|
||||
intel_gvt_clean_workload_scheduler(gvt);
|
||||
intel_gvt_clean_gtt(gvt);
|
||||
intel_gvt_clean_irq(gvt);
|
||||
intel_gvt_free_firmware(gvt);
|
||||
intel_gvt_clean_mmio_info(gvt);
|
||||
idr_destroy(&gvt->vgpu_idr);
|
||||
@ -337,7 +350,7 @@ int intel_gvt_init_device(struct drm_i915_private *i915)
|
||||
|
||||
ret = intel_gvt_init_gtt(gvt);
|
||||
if (ret)
|
||||
goto out_clean_irq;
|
||||
goto out_free_firmware;
|
||||
|
||||
ret = intel_gvt_init_workload_scheduler(gvt);
|
||||
if (ret)
|
||||
@ -376,7 +389,7 @@ int intel_gvt_init_device(struct drm_i915_private *i915)
|
||||
intel_gvt_debugfs_init(gvt);
|
||||
|
||||
gvt_dbg_core("gvt device initialization is done\n");
|
||||
intel_gvt_host.dev = &i915->drm.pdev->dev;
|
||||
intel_gvt_host.dev = i915->drm.dev;
|
||||
intel_gvt_host.initialized = true;
|
||||
return 0;
|
||||
|
||||
@ -392,8 +405,6 @@ out_clean_workload_scheduler:
|
||||
intel_gvt_clean_workload_scheduler(gvt);
|
||||
out_clean_gtt:
|
||||
intel_gvt_clean_gtt(gvt);
|
||||
out_clean_irq:
|
||||
intel_gvt_clean_irq(gvt);
|
||||
out_free_firmware:
|
||||
intel_gvt_free_firmware(gvt);
|
||||
out_clean_mmio_info:
|
||||
|
@ -133,6 +133,7 @@ struct intel_vgpu_display {
|
||||
struct intel_vgpu_i2c_edid i2c_edid;
|
||||
struct intel_vgpu_port ports[I915_MAX_PORTS];
|
||||
struct intel_vgpu_sbi sbi;
|
||||
enum port port_num;
|
||||
};
|
||||
|
||||
struct vgpu_sched_ctl {
|
||||
@ -214,6 +215,7 @@ struct intel_vgpu {
|
||||
struct list_head dmabuf_obj_list_head;
|
||||
struct mutex dmabuf_lock;
|
||||
struct idr object_idr;
|
||||
struct intel_vgpu_vblank_timer vblank_timer;
|
||||
|
||||
u32 scan_nonprivbb;
|
||||
};
|
||||
@ -346,13 +348,16 @@ static inline struct intel_gvt *to_gvt(struct drm_i915_private *i915)
|
||||
}
|
||||
|
||||
enum {
|
||||
INTEL_GVT_REQUEST_EMULATE_VBLANK = 0,
|
||||
|
||||
/* Scheduling trigger by timer */
|
||||
INTEL_GVT_REQUEST_SCHED = 1,
|
||||
INTEL_GVT_REQUEST_SCHED = 0,
|
||||
|
||||
/* Scheduling trigger by event */
|
||||
INTEL_GVT_REQUEST_EVENT_SCHED = 2,
|
||||
INTEL_GVT_REQUEST_EVENT_SCHED = 1,
|
||||
|
||||
/* per-vGPU vblank emulation request */
|
||||
INTEL_GVT_REQUEST_EMULATE_VBLANK = 2,
|
||||
INTEL_GVT_REQUEST_EMULATE_VBLANK_MAX = INTEL_GVT_REQUEST_EMULATE_VBLANK
|
||||
+ GVT_MAX_VGPU,
|
||||
};
|
||||
|
||||
static inline void intel_gvt_request_service(struct intel_gvt *gvt,
|
||||
|
@ -39,6 +39,7 @@
|
||||
#include "i915_drv.h"
|
||||
#include "gvt.h"
|
||||
#include "i915_pvinfo.h"
|
||||
#include "display/intel_display_types.h"
|
||||
|
||||
/* XXX FIXME i915 has changed PP_XXX definition */
|
||||
#define PCH_PP_STATUS _MMIO(0xc7200)
|
||||
@ -443,6 +444,254 @@ static int dpy_reg_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Only PIPE_A is enabled in current vGPU display and PIPE_A is tied to
|
||||
* TRANSCODER_A in HW. DDI/PORT could be PORT_x depends on
|
||||
* setup_virtual_dp_monitor().
|
||||
* emulate_monitor_status_change() set up PLL for PORT_x as the initial enabled
|
||||
* DPLL. Later guest driver may setup a different DPLLx when setting mode.
|
||||
* So the correct sequence to find DP stream clock is:
|
||||
* Check TRANS_DDI_FUNC_CTL on TRANSCODER_A to get PORT_x.
|
||||
* Check correct PLLx for PORT_x to get PLL frequency and DP bitrate.
|
||||
* Then Refresh rate then can be calculated based on follow equations:
|
||||
* Pixel clock = h_total * v_total * refresh_rate
|
||||
* stream clock = Pixel clock
|
||||
* ls_clk = DP bitrate
|
||||
* Link M/N = strm_clk / ls_clk
|
||||
*/
|
||||
|
||||
static u32 bdw_vgpu_get_dp_bitrate(struct intel_vgpu *vgpu, enum port port)
|
||||
{
|
||||
u32 dp_br = 0;
|
||||
u32 ddi_pll_sel = vgpu_vreg_t(vgpu, PORT_CLK_SEL(port));
|
||||
|
||||
switch (ddi_pll_sel) {
|
||||
case PORT_CLK_SEL_LCPLL_2700:
|
||||
dp_br = 270000 * 2;
|
||||
break;
|
||||
case PORT_CLK_SEL_LCPLL_1350:
|
||||
dp_br = 135000 * 2;
|
||||
break;
|
||||
case PORT_CLK_SEL_LCPLL_810:
|
||||
dp_br = 81000 * 2;
|
||||
break;
|
||||
case PORT_CLK_SEL_SPLL:
|
||||
{
|
||||
switch (vgpu_vreg_t(vgpu, SPLL_CTL) & SPLL_FREQ_MASK) {
|
||||
case SPLL_FREQ_810MHz:
|
||||
dp_br = 81000 * 2;
|
||||
break;
|
||||
case SPLL_FREQ_1350MHz:
|
||||
dp_br = 135000 * 2;
|
||||
break;
|
||||
case SPLL_FREQ_2700MHz:
|
||||
dp_br = 270000 * 2;
|
||||
break;
|
||||
default:
|
||||
gvt_dbg_dpy("vgpu-%d PORT_%c can't get freq from SPLL 0x%08x\n",
|
||||
vgpu->id, port_name(port), vgpu_vreg_t(vgpu, SPLL_CTL));
|
||||
break;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case PORT_CLK_SEL_WRPLL1:
|
||||
case PORT_CLK_SEL_WRPLL2:
|
||||
{
|
||||
u32 wrpll_ctl;
|
||||
int refclk, n, p, r;
|
||||
|
||||
if (ddi_pll_sel == PORT_CLK_SEL_WRPLL1)
|
||||
wrpll_ctl = vgpu_vreg_t(vgpu, WRPLL_CTL(DPLL_ID_WRPLL1));
|
||||
else
|
||||
wrpll_ctl = vgpu_vreg_t(vgpu, WRPLL_CTL(DPLL_ID_WRPLL2));
|
||||
|
||||
switch (wrpll_ctl & WRPLL_REF_MASK) {
|
||||
case WRPLL_REF_PCH_SSC:
|
||||
refclk = vgpu->gvt->gt->i915->dpll.ref_clks.ssc;
|
||||
break;
|
||||
case WRPLL_REF_LCPLL:
|
||||
refclk = 2700000;
|
||||
break;
|
||||
default:
|
||||
gvt_dbg_dpy("vgpu-%d PORT_%c WRPLL can't get refclk 0x%08x\n",
|
||||
vgpu->id, port_name(port), wrpll_ctl);
|
||||
goto out;
|
||||
}
|
||||
|
||||
r = wrpll_ctl & WRPLL_DIVIDER_REF_MASK;
|
||||
p = (wrpll_ctl & WRPLL_DIVIDER_POST_MASK) >> WRPLL_DIVIDER_POST_SHIFT;
|
||||
n = (wrpll_ctl & WRPLL_DIVIDER_FB_MASK) >> WRPLL_DIVIDER_FB_SHIFT;
|
||||
|
||||
dp_br = (refclk * n / 10) / (p * r) * 2;
|
||||
break;
|
||||
}
|
||||
default:
|
||||
gvt_dbg_dpy("vgpu-%d PORT_%c has invalid clock select 0x%08x\n",
|
||||
vgpu->id, port_name(port), vgpu_vreg_t(vgpu, PORT_CLK_SEL(port)));
|
||||
break;
|
||||
}
|
||||
|
||||
out:
|
||||
return dp_br;
|
||||
}
|
||||
|
||||
static u32 bxt_vgpu_get_dp_bitrate(struct intel_vgpu *vgpu, enum port port)
|
||||
{
|
||||
u32 dp_br = 0;
|
||||
int refclk = vgpu->gvt->gt->i915->dpll.ref_clks.nssc;
|
||||
enum dpio_phy phy = DPIO_PHY0;
|
||||
enum dpio_channel ch = DPIO_CH0;
|
||||
struct dpll clock = {0};
|
||||
u32 temp;
|
||||
|
||||
/* Port to PHY mapping is fixed, see bxt_ddi_phy_info{} */
|
||||
switch (port) {
|
||||
case PORT_A:
|
||||
phy = DPIO_PHY1;
|
||||
ch = DPIO_CH0;
|
||||
break;
|
||||
case PORT_B:
|
||||
phy = DPIO_PHY0;
|
||||
ch = DPIO_CH0;
|
||||
break;
|
||||
case PORT_C:
|
||||
phy = DPIO_PHY0;
|
||||
ch = DPIO_CH1;
|
||||
break;
|
||||
default:
|
||||
gvt_dbg_dpy("vgpu-%d no PHY for PORT_%c\n", vgpu->id, port_name(port));
|
||||
goto out;
|
||||
}
|
||||
|
||||
temp = vgpu_vreg_t(vgpu, BXT_PORT_PLL_ENABLE(port));
|
||||
if (!(temp & PORT_PLL_ENABLE) || !(temp & PORT_PLL_LOCK)) {
|
||||
gvt_dbg_dpy("vgpu-%d PORT_%c PLL_ENABLE 0x%08x isn't enabled or locked\n",
|
||||
vgpu->id, port_name(port), temp);
|
||||
goto out;
|
||||
}
|
||||
|
||||
clock.m1 = 2;
|
||||
clock.m2 = (vgpu_vreg_t(vgpu, BXT_PORT_PLL(phy, ch, 0)) & PORT_PLL_M2_MASK) << 22;
|
||||
if (vgpu_vreg_t(vgpu, BXT_PORT_PLL(phy, ch, 3)) & PORT_PLL_M2_FRAC_ENABLE)
|
||||
clock.m2 |= vgpu_vreg_t(vgpu, BXT_PORT_PLL(phy, ch, 2)) & PORT_PLL_M2_FRAC_MASK;
|
||||
clock.n = (vgpu_vreg_t(vgpu, BXT_PORT_PLL(phy, ch, 1)) & PORT_PLL_N_MASK) >> PORT_PLL_N_SHIFT;
|
||||
clock.p1 = (vgpu_vreg_t(vgpu, BXT_PORT_PLL_EBB_0(phy, ch)) & PORT_PLL_P1_MASK) >> PORT_PLL_P1_SHIFT;
|
||||
clock.p2 = (vgpu_vreg_t(vgpu, BXT_PORT_PLL_EBB_0(phy, ch)) & PORT_PLL_P2_MASK) >> PORT_PLL_P2_SHIFT;
|
||||
clock.m = clock.m1 * clock.m2;
|
||||
clock.p = clock.p1 * clock.p2;
|
||||
|
||||
if (clock.n == 0 || clock.p == 0) {
|
||||
gvt_dbg_dpy("vgpu-%d PORT_%c PLL has invalid divider\n", vgpu->id, port_name(port));
|
||||
goto out;
|
||||
}
|
||||
|
||||
clock.vco = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(refclk, clock.m), clock.n << 22);
|
||||
clock.dot = DIV_ROUND_CLOSEST(clock.vco, clock.p);
|
||||
|
||||
dp_br = clock.dot / 5;
|
||||
|
||||
out:
|
||||
return dp_br;
|
||||
}
|
||||
|
||||
static u32 skl_vgpu_get_dp_bitrate(struct intel_vgpu *vgpu, enum port port)
|
||||
{
|
||||
u32 dp_br = 0;
|
||||
enum intel_dpll_id dpll_id = DPLL_ID_SKL_DPLL0;
|
||||
|
||||
/* Find the enabled DPLL for the DDI/PORT */
|
||||
if (!(vgpu_vreg_t(vgpu, DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_OFF(port)) &&
|
||||
(vgpu_vreg_t(vgpu, DPLL_CTRL2) & DPLL_CTRL2_DDI_SEL_OVERRIDE(port))) {
|
||||
dpll_id += (vgpu_vreg_t(vgpu, DPLL_CTRL2) &
|
||||
DPLL_CTRL2_DDI_CLK_SEL_MASK(port)) >>
|
||||
DPLL_CTRL2_DDI_CLK_SEL_SHIFT(port);
|
||||
} else {
|
||||
gvt_dbg_dpy("vgpu-%d DPLL for PORT_%c isn't turned on\n",
|
||||
vgpu->id, port_name(port));
|
||||
return dp_br;
|
||||
}
|
||||
|
||||
/* Find PLL output frequency from correct DPLL, and get bir rate */
|
||||
switch ((vgpu_vreg_t(vgpu, DPLL_CTRL1) &
|
||||
DPLL_CTRL1_LINK_RATE_MASK(dpll_id)) >>
|
||||
DPLL_CTRL1_LINK_RATE_SHIFT(dpll_id)) {
|
||||
case DPLL_CTRL1_LINK_RATE_810:
|
||||
dp_br = 81000 * 2;
|
||||
break;
|
||||
case DPLL_CTRL1_LINK_RATE_1080:
|
||||
dp_br = 108000 * 2;
|
||||
break;
|
||||
case DPLL_CTRL1_LINK_RATE_1350:
|
||||
dp_br = 135000 * 2;
|
||||
break;
|
||||
case DPLL_CTRL1_LINK_RATE_1620:
|
||||
dp_br = 162000 * 2;
|
||||
break;
|
||||
case DPLL_CTRL1_LINK_RATE_2160:
|
||||
dp_br = 216000 * 2;
|
||||
break;
|
||||
case DPLL_CTRL1_LINK_RATE_2700:
|
||||
dp_br = 270000 * 2;
|
||||
break;
|
||||
default:
|
||||
dp_br = 0;
|
||||
gvt_dbg_dpy("vgpu-%d PORT_%c fail to get DPLL-%d freq\n",
|
||||
vgpu->id, port_name(port), dpll_id);
|
||||
}
|
||||
|
||||
return dp_br;
|
||||
}
|
||||
|
||||
static void vgpu_update_refresh_rate(struct intel_vgpu *vgpu)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
|
||||
enum port port;
|
||||
u32 dp_br, link_m, link_n, htotal, vtotal;
|
||||
|
||||
/* Find DDI/PORT assigned to TRANSCODER_A, expect B or D */
|
||||
port = (vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &
|
||||
TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
|
||||
if (port != PORT_B && port != PORT_D) {
|
||||
gvt_dbg_dpy("vgpu-%d unsupported PORT_%c\n", vgpu->id, port_name(port));
|
||||
return;
|
||||
}
|
||||
|
||||
/* Calculate DP bitrate from PLL */
|
||||
if (IS_BROADWELL(dev_priv))
|
||||
dp_br = bdw_vgpu_get_dp_bitrate(vgpu, port);
|
||||
else if (IS_BROXTON(dev_priv))
|
||||
dp_br = bxt_vgpu_get_dp_bitrate(vgpu, port);
|
||||
else
|
||||
dp_br = skl_vgpu_get_dp_bitrate(vgpu, port);
|
||||
|
||||
/* Get DP link symbol clock M/N */
|
||||
link_m = vgpu_vreg_t(vgpu, PIPE_LINK_M1(TRANSCODER_A));
|
||||
link_n = vgpu_vreg_t(vgpu, PIPE_LINK_N1(TRANSCODER_A));
|
||||
|
||||
/* Get H/V total from transcoder timing */
|
||||
htotal = (vgpu_vreg_t(vgpu, HTOTAL(TRANSCODER_A)) >> TRANS_HTOTAL_SHIFT) + 1;
|
||||
vtotal = (vgpu_vreg_t(vgpu, VTOTAL(TRANSCODER_A)) >> TRANS_VTOTAL_SHIFT) + 1;
|
||||
|
||||
if (dp_br && link_n && htotal && vtotal) {
|
||||
u64 pixel_clk = 0;
|
||||
u32 new_rate = 0;
|
||||
u32 *old_rate = &(intel_vgpu_port(vgpu, vgpu->display.port_num)->vrefresh_k);
|
||||
|
||||
/* Calcuate pixel clock by (ls_clk * M / N) */
|
||||
pixel_clk = div_u64(mul_u32_u32(link_m, dp_br), link_n);
|
||||
pixel_clk *= MSEC_PER_SEC;
|
||||
|
||||
/* Calcuate refresh rate by (pixel_clk / (h_total * v_total)) */
|
||||
new_rate = DIV64_U64_ROUND_CLOSEST(pixel_clk, div64_u64(mul_u32_u32(htotal, vtotal), MSEC_PER_SEC));
|
||||
|
||||
if (*old_rate != new_rate)
|
||||
*old_rate = new_rate;
|
||||
|
||||
gvt_dbg_dpy("vgpu-%d PIPE_%c refresh rate updated to %d\n",
|
||||
vgpu->id, pipe_name(PIPE_A), new_rate);
|
||||
}
|
||||
}
|
||||
|
||||
static int pipeconf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
|
||||
void *p_data, unsigned int bytes)
|
||||
{
|
||||
@ -451,14 +700,14 @@ static int pipeconf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
|
||||
write_vreg(vgpu, offset, p_data, bytes);
|
||||
data = vgpu_vreg(vgpu, offset);
|
||||
|
||||
if (data & PIPECONF_ENABLE)
|
||||
if (data & PIPECONF_ENABLE) {
|
||||
vgpu_vreg(vgpu, offset) |= I965_PIPECONF_ACTIVE;
|
||||
else
|
||||
vgpu_update_refresh_rate(vgpu);
|
||||
vgpu_update_vblank_emulation(vgpu, true);
|
||||
} else {
|
||||
vgpu_vreg(vgpu, offset) &= ~I965_PIPECONF_ACTIVE;
|
||||
/* vgpu_lock already hold by emulate mmio r/w */
|
||||
mutex_unlock(&vgpu->vgpu_lock);
|
||||
intel_gvt_check_vblank_emulation(vgpu->gvt);
|
||||
mutex_lock(&vgpu->vgpu_lock);
|
||||
vgpu_update_vblank_emulation(vgpu, false);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -647,38 +647,6 @@ static void init_events(
|
||||
}
|
||||
}
|
||||
|
||||
static enum hrtimer_restart vblank_timer_fn(struct hrtimer *data)
|
||||
{
|
||||
struct intel_gvt_vblank_timer *vblank_timer;
|
||||
struct intel_gvt_irq *irq;
|
||||
struct intel_gvt *gvt;
|
||||
|
||||
vblank_timer = container_of(data, struct intel_gvt_vblank_timer, timer);
|
||||
irq = container_of(vblank_timer, struct intel_gvt_irq, vblank_timer);
|
||||
gvt = container_of(irq, struct intel_gvt, irq);
|
||||
|
||||
intel_gvt_request_service(gvt, INTEL_GVT_REQUEST_EMULATE_VBLANK);
|
||||
hrtimer_add_expires_ns(&vblank_timer->timer, vblank_timer->period);
|
||||
return HRTIMER_RESTART;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_gvt_clean_irq - clean up GVT-g IRQ emulation subsystem
|
||||
* @gvt: a GVT device
|
||||
*
|
||||
* This function is called at driver unloading stage, to clean up GVT-g IRQ
|
||||
* emulation subsystem.
|
||||
*
|
||||
*/
|
||||
void intel_gvt_clean_irq(struct intel_gvt *gvt)
|
||||
{
|
||||
struct intel_gvt_irq *irq = &gvt->irq;
|
||||
|
||||
hrtimer_cancel(&irq->vblank_timer.timer);
|
||||
}
|
||||
|
||||
#define VBLANK_TIMER_PERIOD 16000000
|
||||
|
||||
/**
|
||||
* intel_gvt_init_irq - initialize GVT-g IRQ emulation subsystem
|
||||
* @gvt: a GVT device
|
||||
@ -692,7 +660,6 @@ void intel_gvt_clean_irq(struct intel_gvt *gvt)
|
||||
int intel_gvt_init_irq(struct intel_gvt *gvt)
|
||||
{
|
||||
struct intel_gvt_irq *irq = &gvt->irq;
|
||||
struct intel_gvt_vblank_timer *vblank_timer = &irq->vblank_timer;
|
||||
|
||||
gvt_dbg_core("init irq framework\n");
|
||||
|
||||
@ -707,9 +674,5 @@ int intel_gvt_init_irq(struct intel_gvt *gvt)
|
||||
|
||||
init_irq_map(irq);
|
||||
|
||||
hrtimer_init(&vblank_timer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
|
||||
vblank_timer->timer.function = vblank_timer_fn;
|
||||
vblank_timer->period = VBLANK_TIMER_PERIOD;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -201,11 +201,6 @@ struct intel_gvt_irq_map {
|
||||
u32 down_irq_bitmask;
|
||||
};
|
||||
|
||||
struct intel_gvt_vblank_timer {
|
||||
struct hrtimer timer;
|
||||
u64 period;
|
||||
};
|
||||
|
||||
/* structure containing device specific IRQ state */
|
||||
struct intel_gvt_irq {
|
||||
struct intel_gvt_irq_ops *ops;
|
||||
@ -214,11 +209,9 @@ struct intel_gvt_irq {
|
||||
struct intel_gvt_event_info events[INTEL_GVT_EVENT_MAX];
|
||||
DECLARE_BITMAP(pending_events, INTEL_GVT_EVENT_MAX);
|
||||
struct intel_gvt_irq_map *irq_map;
|
||||
struct intel_gvt_vblank_timer vblank_timer;
|
||||
};
|
||||
|
||||
int intel_gvt_init_irq(struct intel_gvt *gvt);
|
||||
void intel_gvt_clean_irq(struct intel_gvt *gvt);
|
||||
|
||||
void intel_vgpu_trigger_virtual_event(struct intel_vgpu *vgpu,
|
||||
enum intel_gvt_event_type event);
|
||||
|
@ -221,7 +221,7 @@ err:
|
||||
static int gvt_dma_map_page(struct intel_vgpu *vgpu, unsigned long gfn,
|
||||
dma_addr_t *dma_addr, unsigned long size)
|
||||
{
|
||||
struct device *dev = &vgpu->gvt->gt->i915->drm.pdev->dev;
|
||||
struct device *dev = vgpu->gvt->gt->i915->drm.dev;
|
||||
struct page *page = NULL;
|
||||
int ret;
|
||||
|
||||
@ -244,7 +244,7 @@ static int gvt_dma_map_page(struct intel_vgpu *vgpu, unsigned long gfn,
|
||||
static void gvt_dma_unmap_page(struct intel_vgpu *vgpu, unsigned long gfn,
|
||||
dma_addr_t dma_addr, unsigned long size)
|
||||
{
|
||||
struct device *dev = &vgpu->gvt->gt->i915->drm.pdev->dev;
|
||||
struct device *dev = vgpu->gvt->gt->i915->drm.dev;
|
||||
|
||||
dma_unmap_page(dev, dma_addr, size, PCI_DMA_BIDIRECTIONAL);
|
||||
gvt_unpin_guest_page(vgpu, gfn, size);
|
||||
|
@ -300,8 +300,6 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
|
||||
mutex_unlock(&vgpu->vgpu_lock);
|
||||
|
||||
mutex_lock(&gvt->lock);
|
||||
if (idr_is_empty(&gvt->vgpu_idr))
|
||||
intel_gvt_clean_irq(gvt);
|
||||
intel_gvt_update_vgpu_types(gvt);
|
||||
mutex_unlock(&gvt->lock);
|
||||
|
||||
|
@ -677,7 +677,7 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
|
||||
static int i915_runtime_pm_status(struct seq_file *m, void *unused)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = node_to_i915(m->private);
|
||||
struct pci_dev *pdev = dev_priv->drm.pdev;
|
||||
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
|
||||
|
||||
if (!HAS_RUNTIME_PM(dev_priv))
|
||||
seq_puts(m, "Runtime power management not supported\n");
|
||||
|
@ -38,7 +38,6 @@
|
||||
#include <linux/slab.h>
|
||||
#include <linux/vga_switcheroo.h>
|
||||
#include <linux/vt.h>
|
||||
#include <acpi/video.h>
|
||||
|
||||
#include <drm/drm_atomic_helper.h>
|
||||
#include <drm/drm_ioctl.h>
|
||||
@ -47,11 +46,9 @@
|
||||
#include <drm/drm_probe_helper.h>
|
||||
|
||||
#include "display/intel_acpi.h"
|
||||
#include "display/intel_audio.h"
|
||||
#include "display/intel_bw.h"
|
||||
#include "display/intel_cdclk.h"
|
||||
#include "display/intel_csr.h"
|
||||
#include "display/intel_display_debugfs.h"
|
||||
#include "display/intel_display_types.h"
|
||||
#include "display/intel_dp.h"
|
||||
#include "display/intel_fbdev.h"
|
||||
@ -93,7 +90,7 @@ static const struct drm_driver driver;
|
||||
|
||||
static int i915_get_bridge_dev(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
int domain = pci_domain_nr(dev_priv->drm.pdev->bus);
|
||||
int domain = pci_domain_nr(to_pci_dev(dev_priv->drm.dev)->bus);
|
||||
|
||||
dev_priv->bridge_dev =
|
||||
pci_get_domain_bus_and_slot(domain, 0, PCI_DEVFN(0, 0));
|
||||
@ -352,7 +349,6 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
|
||||
intel_irq_init(dev_priv);
|
||||
intel_init_display_hooks(dev_priv);
|
||||
intel_init_clock_gating_hooks(dev_priv);
|
||||
intel_init_audio_hooks(dev_priv);
|
||||
|
||||
intel_detect_preproduction_hw(dev_priv);
|
||||
|
||||
@ -461,7 +457,6 @@ static void intel_sanitize_options(struct drm_i915_private *dev_priv)
|
||||
*/
|
||||
static int i915_set_dma_info(struct drm_i915_private *i915)
|
||||
{
|
||||
struct pci_dev *pdev = i915->drm.pdev;
|
||||
unsigned int mask_size = INTEL_INFO(i915)->dma_mask_size;
|
||||
int ret;
|
||||
|
||||
@ -471,9 +466,9 @@ static int i915_set_dma_info(struct drm_i915_private *i915)
|
||||
* We don't have a max segment size, so set it to the max so sg's
|
||||
* debugging layer doesn't complain
|
||||
*/
|
||||
dma_set_max_seg_size(&pdev->dev, UINT_MAX);
|
||||
dma_set_max_seg_size(i915->drm.dev, UINT_MAX);
|
||||
|
||||
ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(mask_size));
|
||||
ret = dma_set_mask(i915->drm.dev, DMA_BIT_MASK(mask_size));
|
||||
if (ret)
|
||||
goto mask_err;
|
||||
|
||||
@ -493,7 +488,7 @@ static int i915_set_dma_info(struct drm_i915_private *i915)
|
||||
if (IS_I965G(i915) || IS_I965GM(i915))
|
||||
mask_size = 32;
|
||||
|
||||
ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(mask_size));
|
||||
ret = dma_set_coherent_mask(i915->drm.dev, DMA_BIT_MASK(mask_size));
|
||||
if (ret)
|
||||
goto mask_err;
|
||||
|
||||
@ -513,7 +508,7 @@ mask_err:
|
||||
*/
|
||||
static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct pci_dev *pdev = dev_priv->drm.pdev;
|
||||
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
|
||||
int ret;
|
||||
|
||||
if (i915_inject_probe_failure(dev_priv))
|
||||
@ -641,7 +636,7 @@ err_perf:
|
||||
*/
|
||||
static void i915_driver_hw_remove(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct pci_dev *pdev = dev_priv->drm.pdev;
|
||||
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
|
||||
|
||||
i915_perf_fini(dev_priv);
|
||||
|
||||
@ -666,43 +661,21 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
|
||||
intel_vgpu_register(dev_priv);
|
||||
|
||||
/* Reveal our presence to userspace */
|
||||
if (drm_dev_register(dev, 0) == 0) {
|
||||
i915_debugfs_register(dev_priv);
|
||||
if (HAS_DISPLAY(dev_priv))
|
||||
intel_display_debugfs_register(dev_priv);
|
||||
i915_setup_sysfs(dev_priv);
|
||||
|
||||
/* Depends on sysfs having been initialized */
|
||||
i915_perf_register(dev_priv);
|
||||
} else
|
||||
if (drm_dev_register(dev, 0)) {
|
||||
drm_err(&dev_priv->drm,
|
||||
"Failed to register driver for userspace access!\n");
|
||||
|
||||
if (HAS_DISPLAY(dev_priv)) {
|
||||
/* Must be done after probing outputs */
|
||||
intel_opregion_register(dev_priv);
|
||||
acpi_video_register();
|
||||
return;
|
||||
}
|
||||
|
||||
i915_debugfs_register(dev_priv);
|
||||
i915_setup_sysfs(dev_priv);
|
||||
|
||||
/* Depends on sysfs having been initialized */
|
||||
i915_perf_register(dev_priv);
|
||||
|
||||
intel_gt_driver_register(&dev_priv->gt);
|
||||
|
||||
intel_audio_init(dev_priv);
|
||||
|
||||
/*
|
||||
* Some ports require correctly set-up hpd registers for detection to
|
||||
* work properly (leading to ghost connected connector status), e.g. VGA
|
||||
* on gm45. Hence we can only set up the initial fbdev config after hpd
|
||||
* irqs are fully enabled. We do it last so that the async config
|
||||
* cannot run before the connectors are registered.
|
||||
*/
|
||||
intel_fbdev_initial_config_async(dev);
|
||||
|
||||
/*
|
||||
* We need to coordinate the hotplugs with the asynchronous fbdev
|
||||
* configuration, for which we use the fbdev->async_cookie.
|
||||
*/
|
||||
if (HAS_DISPLAY(dev_priv))
|
||||
drm_kms_helper_poll_init(dev);
|
||||
intel_display_driver_register(dev_priv);
|
||||
|
||||
intel_power_domains_enable(dev_priv);
|
||||
intel_runtime_pm_enable(&dev_priv->runtime_pm);
|
||||
@ -726,20 +699,9 @@ static void i915_driver_unregister(struct drm_i915_private *dev_priv)
|
||||
intel_runtime_pm_disable(&dev_priv->runtime_pm);
|
||||
intel_power_domains_disable(dev_priv);
|
||||
|
||||
intel_fbdev_unregister(dev_priv);
|
||||
intel_audio_deinit(dev_priv);
|
||||
|
||||
/*
|
||||
* After flushing the fbdev (incl. a late async config which will
|
||||
* have delayed queuing of a hotplug event), then flush the hotplug
|
||||
* events.
|
||||
*/
|
||||
drm_kms_helper_poll_fini(&dev_priv->drm);
|
||||
drm_atomic_helper_shutdown(&dev_priv->drm);
|
||||
intel_display_driver_unregister(dev_priv);
|
||||
|
||||
intel_gt_driver_unregister(&dev_priv->gt);
|
||||
acpi_video_unregister();
|
||||
intel_opregion_unregister(dev_priv);
|
||||
|
||||
i915_perf_unregister(dev_priv);
|
||||
i915_pmu_unregister(dev_priv);
|
||||
@ -1049,6 +1011,8 @@ static void intel_shutdown_encoders(struct drm_i915_private *dev_priv)
|
||||
void i915_driver_shutdown(struct drm_i915_private *i915)
|
||||
{
|
||||
disable_rpm_wakeref_asserts(&i915->runtime_pm);
|
||||
intel_runtime_pm_disable(&i915->runtime_pm);
|
||||
intel_power_domains_disable(i915);
|
||||
|
||||
i915_gem_suspend(i915);
|
||||
|
||||
@ -1064,7 +1028,15 @@ void i915_driver_shutdown(struct drm_i915_private *i915)
|
||||
intel_suspend_encoders(i915);
|
||||
intel_shutdown_encoders(i915);
|
||||
|
||||
/*
|
||||
* The only requirement is to reboot with display DC states disabled,
|
||||
* for now leaving all display power wells in the INIT power domain
|
||||
* enabled matching the driver reload sequence.
|
||||
*/
|
||||
intel_power_domains_driver_remove(i915);
|
||||
enable_rpm_wakeref_asserts(&i915->runtime_pm);
|
||||
|
||||
intel_runtime_pm_driver_release(&i915->runtime_pm);
|
||||
}
|
||||
|
||||
static bool suspend_to_idle(struct drm_i915_private *dev_priv)
|
||||
@ -1094,7 +1066,7 @@ static int i915_drm_prepare(struct drm_device *dev)
|
||||
static int i915_drm_suspend(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct pci_dev *pdev = dev_priv->drm.pdev;
|
||||
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
|
||||
pci_power_t opregion_target_state;
|
||||
|
||||
disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
|
||||
@ -1151,7 +1123,7 @@ get_suspend_mode(struct drm_i915_private *dev_priv, bool hibernate)
|
||||
static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct pci_dev *pdev = dev_priv->drm.pdev;
|
||||
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
|
||||
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
|
||||
int ret;
|
||||
|
||||
@ -1281,7 +1253,7 @@ static int i915_drm_resume(struct drm_device *dev)
|
||||
static int i915_drm_resume_early(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct pci_dev *pdev = dev_priv->drm.pdev;
|
||||
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
|
||||
int ret;
|
||||
|
||||
/*
|
||||
|
@ -475,42 +475,6 @@ struct i915_drrs {
|
||||
enum drrs_support_type type;
|
||||
};
|
||||
|
||||
struct i915_psr {
|
||||
struct mutex lock;
|
||||
|
||||
#define I915_PSR_DEBUG_MODE_MASK 0x0f
|
||||
#define I915_PSR_DEBUG_DEFAULT 0x00
|
||||
#define I915_PSR_DEBUG_DISABLE 0x01
|
||||
#define I915_PSR_DEBUG_ENABLE 0x02
|
||||
#define I915_PSR_DEBUG_FORCE_PSR1 0x03
|
||||
#define I915_PSR_DEBUG_IRQ 0x10
|
||||
|
||||
u32 debug;
|
||||
bool sink_support;
|
||||
bool enabled;
|
||||
struct intel_dp *dp;
|
||||
enum pipe pipe;
|
||||
enum transcoder transcoder;
|
||||
bool active;
|
||||
struct work_struct work;
|
||||
unsigned busy_frontbuffer_bits;
|
||||
bool sink_psr2_support;
|
||||
bool link_standby;
|
||||
bool colorimetry_support;
|
||||
bool psr2_enabled;
|
||||
bool psr2_sel_fetch_enabled;
|
||||
u8 sink_sync_latency;
|
||||
ktime_t last_entry_attempt;
|
||||
ktime_t last_exit;
|
||||
bool sink_not_reliable;
|
||||
bool irq_aux_error;
|
||||
u16 su_x_granularity;
|
||||
bool dc3co_enabled;
|
||||
u32 dc3co_exit_delay;
|
||||
struct delayed_work dc3co_work;
|
||||
struct drm_dp_vsc_sdp vsc;
|
||||
};
|
||||
|
||||
#define QUIRK_LVDS_SSC_DISABLE (1<<1)
|
||||
#define QUIRK_INVERT_BRIGHTNESS (1<<2)
|
||||
#define QUIRK_BACKLIGHT_PRESENT (1<<3)
|
||||
@ -1038,8 +1002,6 @@ struct drm_i915_private {
|
||||
|
||||
struct i915_power_domains power_domains;
|
||||
|
||||
struct i915_psr psr;
|
||||
|
||||
struct i915_gpu_error gpu_error;
|
||||
|
||||
struct drm_i915_gem_object *vlv_pctx;
|
||||
@ -1133,7 +1095,9 @@ struct drm_i915_private {
|
||||
INTEL_DRAM_DDR3,
|
||||
INTEL_DRAM_DDR4,
|
||||
INTEL_DRAM_LPDDR3,
|
||||
INTEL_DRAM_LPDDR4
|
||||
INTEL_DRAM_LPDDR4,
|
||||
INTEL_DRAM_DDR5,
|
||||
INTEL_DRAM_LPDDR5,
|
||||
} type;
|
||||
u8 num_qgv_points;
|
||||
} dram_info;
|
||||
@ -1280,7 +1244,7 @@ static inline struct drm_i915_private *pdev_to_i915(struct pci_dev *pdev)
|
||||
#define INTEL_DEVID(dev_priv) (RUNTIME_INFO(dev_priv)->device_id)
|
||||
|
||||
#define REVID_FOREVER 0xff
|
||||
#define INTEL_REVID(dev_priv) ((dev_priv)->drm.pdev->revision)
|
||||
#define INTEL_REVID(dev_priv) (to_pci_dev((dev_priv)->drm.dev)->revision)
|
||||
|
||||
#define INTEL_GEN_MASK(s, e) ( \
|
||||
BUILD_BUG_ON_ZERO(!__builtin_constant_p(s)) + \
|
||||
@ -1408,6 +1372,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
|
||||
#define IS_TIGERLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_TIGERLAKE)
|
||||
#define IS_ROCKETLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_ROCKETLAKE)
|
||||
#define IS_DG1(dev_priv) IS_PLATFORM(dev_priv, INTEL_DG1)
|
||||
#define IS_ALDERLAKE_S(dev_priv) IS_PLATFORM(dev_priv, INTEL_ALDERLAKE_S)
|
||||
#define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \
|
||||
(INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00)
|
||||
#define IS_BDW_ULT(dev_priv) \
|
||||
@ -1550,54 +1515,60 @@ extern const struct i915_rev_steppings kbl_revids[];
|
||||
(IS_JSL_EHL(p) && IS_REVID(p, since, until))
|
||||
|
||||
enum {
|
||||
TGL_REVID_A0,
|
||||
TGL_REVID_B0,
|
||||
TGL_REVID_B1,
|
||||
TGL_REVID_C0,
|
||||
TGL_REVID_D0,
|
||||
STEP_A0,
|
||||
STEP_A2,
|
||||
STEP_B0,
|
||||
STEP_B1,
|
||||
STEP_C0,
|
||||
STEP_D0,
|
||||
};
|
||||
|
||||
#define TGL_UY_REVIDS_SIZE 4
|
||||
#define TGL_REVIDS_SIZE 2
|
||||
#define TGL_UY_REVID_STEP_TBL_SIZE 4
|
||||
#define TGL_REVID_STEP_TBL_SIZE 2
|
||||
#define ADLS_REVID_STEP_TBL_SIZE 13
|
||||
|
||||
extern const struct i915_rev_steppings tgl_uy_revids[TGL_UY_REVIDS_SIZE];
|
||||
extern const struct i915_rev_steppings tgl_revids[TGL_REVIDS_SIZE];
|
||||
extern const struct i915_rev_steppings tgl_uy_revid_step_tbl[TGL_UY_REVID_STEP_TBL_SIZE];
|
||||
extern const struct i915_rev_steppings tgl_revid_step_tbl[TGL_REVID_STEP_TBL_SIZE];
|
||||
extern const struct i915_rev_steppings adls_revid_step_tbl[ADLS_REVID_STEP_TBL_SIZE];
|
||||
|
||||
static inline const struct i915_rev_steppings *
|
||||
tgl_revids_get(struct drm_i915_private *dev_priv)
|
||||
tgl_stepping_get(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
u8 revid = INTEL_REVID(dev_priv);
|
||||
u8 size;
|
||||
const struct i915_rev_steppings *tgl_revid_tbl;
|
||||
const struct i915_rev_steppings *revid_step_tbl;
|
||||
|
||||
if (IS_TGL_U(dev_priv) || IS_TGL_Y(dev_priv)) {
|
||||
tgl_revid_tbl = tgl_uy_revids;
|
||||
size = ARRAY_SIZE(tgl_uy_revids);
|
||||
if (IS_ALDERLAKE_S(dev_priv)) {
|
||||
revid_step_tbl = adls_revid_step_tbl;
|
||||
size = ARRAY_SIZE(adls_revid_step_tbl);
|
||||
} else if (IS_TGL_U(dev_priv) || IS_TGL_Y(dev_priv)) {
|
||||
revid_step_tbl = tgl_uy_revid_step_tbl;
|
||||
size = ARRAY_SIZE(tgl_uy_revid_step_tbl);
|
||||
} else {
|
||||
tgl_revid_tbl = tgl_revids;
|
||||
size = ARRAY_SIZE(tgl_revids);
|
||||
revid_step_tbl = tgl_revid_step_tbl;
|
||||
size = ARRAY_SIZE(tgl_revid_step_tbl);
|
||||
}
|
||||
|
||||
revid = min_t(u8, revid, size - 1);
|
||||
|
||||
return &tgl_revid_tbl[revid];
|
||||
return &revid_step_tbl[revid];
|
||||
}
|
||||
|
||||
#define IS_TGL_DISP_REVID(p, since, until) \
|
||||
#define IS_TGL_DISP_STEPPING(p, since, until) \
|
||||
(IS_TIGERLAKE(p) && \
|
||||
tgl_revids_get(p)->disp_stepping >= (since) && \
|
||||
tgl_revids_get(p)->disp_stepping <= (until))
|
||||
tgl_stepping_get(p)->disp_stepping >= (since) && \
|
||||
tgl_stepping_get(p)->disp_stepping <= (until))
|
||||
|
||||
#define IS_TGL_UY_GT_REVID(p, since, until) \
|
||||
#define IS_TGL_UY_GT_STEPPING(p, since, until) \
|
||||
((IS_TGL_U(p) || IS_TGL_Y(p)) && \
|
||||
tgl_revids_get(p)->gt_stepping >= (since) && \
|
||||
tgl_revids_get(p)->gt_stepping <= (until))
|
||||
tgl_stepping_get(p)->gt_stepping >= (since) && \
|
||||
tgl_stepping_get(p)->gt_stepping <= (until))
|
||||
|
||||
#define IS_TGL_GT_REVID(p, since, until) \
|
||||
#define IS_TGL_GT_STEPPING(p, since, until) \
|
||||
(IS_TIGERLAKE(p) && \
|
||||
!(IS_TGL_U(p) || IS_TGL_Y(p)) && \
|
||||
tgl_revids_get(p)->gt_stepping >= (since) && \
|
||||
tgl_revids_get(p)->gt_stepping <= (until))
|
||||
tgl_stepping_get(p)->gt_stepping >= (since) && \
|
||||
tgl_stepping_get(p)->gt_stepping <= (until))
|
||||
|
||||
#define RKL_REVID_A0 0x0
|
||||
#define RKL_REVID_B0 0x1
|
||||
@ -1612,6 +1583,22 @@ tgl_revids_get(struct drm_i915_private *dev_priv)
|
||||
#define IS_DG1_REVID(p, since, until) \
|
||||
(IS_DG1(p) && IS_REVID(p, since, until))
|
||||
|
||||
#define ADLS_REVID_A0 0x0
|
||||
#define ADLS_REVID_A2 0x1
|
||||
#define ADLS_REVID_B0 0x4
|
||||
#define ADLS_REVID_G0 0x8
|
||||
#define ADLS_REVID_C0 0xC /*Same as H0 ADLS SOC stepping*/
|
||||
|
||||
#define IS_ADLS_DISP_STEPPING(p, since, until) \
|
||||
(IS_ALDERLAKE_S(p) && \
|
||||
tgl_stepping_get(p)->disp_stepping >= (since) && \
|
||||
tgl_stepping_get(p)->disp_stepping <= (until))
|
||||
|
||||
#define IS_ADLS_GT_STEPPING(p, since, until) \
|
||||
(IS_ALDERLAKE_S(p) && \
|
||||
tgl_stepping_get(p)->gt_stepping >= (since) && \
|
||||
tgl_stepping_get(p)->gt_stepping <= (until))
|
||||
|
||||
#define IS_LP(dev_priv) (INTEL_INFO(dev_priv)->is_lp)
|
||||
#define IS_GEN9_LP(dev_priv) (IS_GEN(dev_priv, 9) && IS_LP(dev_priv))
|
||||
#define IS_GEN9_BC(dev_priv) (IS_GEN(dev_priv, 9) && !IS_LP(dev_priv))
|
||||
@ -1703,7 +1690,7 @@ tgl_revids_get(struct drm_i915_private *dev_priv)
|
||||
#define HAS_DP_MST(dev_priv) (INTEL_INFO(dev_priv)->display.has_dp_mst)
|
||||
|
||||
#define HAS_DDI(dev_priv) (INTEL_INFO(dev_priv)->display.has_ddi)
|
||||
#define HAS_FPGA_DBG_UNCLAIMED(dev_priv) (INTEL_INFO(dev_priv)->has_fpga_dbg)
|
||||
#define HAS_FPGA_DBG_UNCLAIMED(dev_priv) (INTEL_INFO(dev_priv)->display.has_fpga_dbg)
|
||||
#define HAS_PSR(dev_priv) (INTEL_INFO(dev_priv)->display.has_psr)
|
||||
#define HAS_PSR_HW_TRACKING(dev_priv) \
|
||||
(INTEL_INFO(dev_priv)->display.has_psr_hw_tracking)
|
||||
@ -1718,6 +1705,8 @@ tgl_revids_get(struct drm_i915_private *dev_priv)
|
||||
|
||||
#define HAS_CSR(dev_priv) (INTEL_INFO(dev_priv)->display.has_csr)
|
||||
|
||||
#define HAS_MSO(i915) (INTEL_GEN(i915) >= 12)
|
||||
|
||||
#define HAS_RUNTIME_PM(dev_priv) (INTEL_INFO(dev_priv)->has_runtime_pm)
|
||||
#define HAS_64BIT_RELOC(dev_priv) (INTEL_INFO(dev_priv)->has_64bit_reloc)
|
||||
|
||||
@ -1735,7 +1724,7 @@ tgl_revids_get(struct drm_i915_private *dev_priv)
|
||||
|
||||
#define HAS_GMCH(dev_priv) (INTEL_INFO(dev_priv)->display.has_gmch)
|
||||
|
||||
#define HAS_LSPCON(dev_priv) (INTEL_GEN(dev_priv) >= 9)
|
||||
#define HAS_LSPCON(dev_priv) (IS_GEN_RANGE(dev_priv, 9, 10))
|
||||
|
||||
/* DPF == dynamic parity feature */
|
||||
#define HAS_L3_DPF(dev_priv) (INTEL_INFO(dev_priv)->has_l3_dpf)
|
||||
@ -1760,6 +1749,9 @@ static inline bool run_as_guest(void)
|
||||
return !hypervisor_is_type(X86_HYPER_NATIVE);
|
||||
}
|
||||
|
||||
#define HAS_D12_PLANE_MINIMIZATION(dev_priv) (IS_ROCKETLAKE(dev_priv) || \
|
||||
IS_ALDERLAKE_S(dev_priv))
|
||||
|
||||
static inline bool intel_vtd_active(void)
|
||||
{
|
||||
#ifdef CONFIG_INTEL_IOMMU
|
||||
|
@ -28,7 +28,7 @@ int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
|
||||
struct sg_table *pages)
|
||||
{
|
||||
do {
|
||||
if (dma_map_sg_attrs(&obj->base.dev->pdev->dev,
|
||||
if (dma_map_sg_attrs(obj->base.dev->dev,
|
||||
pages->sgl, pages->nents,
|
||||
PCI_DMA_BIDIRECTIONAL,
|
||||
DMA_ATTR_SKIP_CPU_SYNC |
|
||||
@ -63,8 +63,7 @@ void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
|
||||
/* Wait a bit, in the hope it avoids the hang */
|
||||
usleep_range(100, 250);
|
||||
|
||||
dma_unmap_sg(&i915->drm.pdev->dev,
|
||||
pages->sgl, pages->nents,
|
||||
dma_unmap_sg(i915->drm.dev, pages->sgl, pages->nents,
|
||||
PCI_DMA_BIDIRECTIONAL);
|
||||
}
|
||||
|
||||
|
@ -12,6 +12,7 @@ int i915_getparam_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(dev);
|
||||
struct pci_dev *pdev = to_pci_dev(dev->dev);
|
||||
const struct sseu_dev_info *sseu = &i915->gt.info.sseu;
|
||||
drm_i915_getparam_t *param = data;
|
||||
int value;
|
||||
@ -24,10 +25,10 @@ int i915_getparam_ioctl(struct drm_device *dev, void *data,
|
||||
/* Reject all old ums/dri params. */
|
||||
return -ENODEV;
|
||||
case I915_PARAM_CHIPSET_ID:
|
||||
value = i915->drm.pdev->device;
|
||||
value = pdev->device;
|
||||
break;
|
||||
case I915_PARAM_REVISION:
|
||||
value = i915->drm.pdev->revision;
|
||||
value = pdev->revision;
|
||||
break;
|
||||
case I915_PARAM_NUM_FENCES_AVAIL:
|
||||
value = i915->ggtt.num_fences;
|
||||
|
@ -644,7 +644,7 @@ static void err_print_params(struct drm_i915_error_state_buf *m,
|
||||
static void err_print_pciid(struct drm_i915_error_state_buf *m,
|
||||
struct drm_i915_private *i915)
|
||||
{
|
||||
struct pci_dev *pdev = i915->drm.pdev;
|
||||
struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
|
||||
|
||||
err_printf(m, "PCI ID: 0x%04x\n", pdev->device);
|
||||
err_printf(m, "PCI Revision: 0x%02x\n", pdev->revision);
|
||||
|
@ -209,8 +209,7 @@ static void intel_hpd_init_pins(struct drm_i915_private *dev_priv)
|
||||
|
||||
if (HAS_PCH_DG1(dev_priv))
|
||||
hpd->pch_hpd = hpd_sde_dg1;
|
||||
else if (HAS_PCH_TGP(dev_priv) || HAS_PCH_JSP(dev_priv) ||
|
||||
HAS_PCH_ICP(dev_priv) || HAS_PCH_MCC(dev_priv))
|
||||
else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
|
||||
hpd->pch_hpd = hpd_icp;
|
||||
else if (HAS_PCH_CNP(dev_priv) || HAS_PCH_SPT(dev_priv))
|
||||
hpd->pch_hpd = hpd_spt;
|
||||
@ -795,7 +794,7 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
|
||||
int position, vtotal;
|
||||
|
||||
if (!crtc->active)
|
||||
return -1;
|
||||
return 0;
|
||||
|
||||
vblank = &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
|
||||
mode = &vblank->hwmode;
|
||||
@ -2095,10 +2094,19 @@ static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
|
||||
ivb_err_int_handler(dev_priv);
|
||||
|
||||
if (de_iir & DE_EDP_PSR_INT_HSW) {
|
||||
u32 psr_iir = intel_uncore_read(&dev_priv->uncore, EDP_PSR_IIR);
|
||||
struct intel_encoder *encoder;
|
||||
|
||||
intel_psr_irq_handler(dev_priv, psr_iir);
|
||||
intel_uncore_write(&dev_priv->uncore, EDP_PSR_IIR, psr_iir);
|
||||
for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
|
||||
|
||||
u32 psr_iir = intel_uncore_read(&dev_priv->uncore,
|
||||
EDP_PSR_IIR);
|
||||
|
||||
intel_psr_irq_handler(intel_dp, psr_iir);
|
||||
intel_uncore_write(&dev_priv->uncore,
|
||||
EDP_PSR_IIR, psr_iir);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (de_iir & DE_AUX_CHANNEL_A_IVB)
|
||||
@ -2290,7 +2298,7 @@ static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
|
||||
|
||||
static u32 gen8_de_pipe_fault_mask(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
if (IS_ROCKETLAKE(dev_priv))
|
||||
if (HAS_D12_PLANE_MINIMIZATION(dev_priv))
|
||||
return RKL_DE_PIPE_IRQ_FAULT_ERRORS;
|
||||
else if (INTEL_GEN(dev_priv) >= 11)
|
||||
return GEN11_DE_PIPE_IRQ_FAULT_ERRORS;
|
||||
@ -2311,21 +2319,30 @@ gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
|
||||
}
|
||||
|
||||
if (iir & GEN8_DE_EDP_PSR) {
|
||||
struct intel_encoder *encoder;
|
||||
u32 psr_iir;
|
||||
i915_reg_t iir_reg;
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 12)
|
||||
iir_reg = TRANS_PSR_IIR(dev_priv->psr.transcoder);
|
||||
else
|
||||
iir_reg = EDP_PSR_IIR;
|
||||
for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
|
||||
|
||||
psr_iir = intel_uncore_read(&dev_priv->uncore, iir_reg);
|
||||
intel_uncore_write(&dev_priv->uncore, iir_reg, psr_iir);
|
||||
if (INTEL_GEN(dev_priv) >= 12)
|
||||
iir_reg = TRANS_PSR_IIR(intel_dp->psr.transcoder);
|
||||
else
|
||||
iir_reg = EDP_PSR_IIR;
|
||||
|
||||
if (psr_iir)
|
||||
found = true;
|
||||
psr_iir = intel_uncore_read(&dev_priv->uncore, iir_reg);
|
||||
intel_uncore_write(&dev_priv->uncore, iir_reg, psr_iir);
|
||||
|
||||
intel_psr_irq_handler(dev_priv, psr_iir);
|
||||
if (psr_iir)
|
||||
found = true;
|
||||
|
||||
intel_psr_irq_handler(intel_dp, psr_iir);
|
||||
|
||||
/* prior GEN12 only have one EDP PSR */
|
||||
if (INTEL_GEN(dev_priv) < 12)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!found)
|
||||
@ -3023,6 +3040,24 @@ static void valleyview_irq_reset(struct drm_i915_private *dev_priv)
|
||||
spin_unlock_irq(&dev_priv->irq_lock);
|
||||
}
|
||||
|
||||
static void cnp_display_clock_wa(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct intel_uncore *uncore = &dev_priv->uncore;
|
||||
|
||||
/*
|
||||
* Wa_14010685332:cnp/cmp,tgp,adp
|
||||
* TODO: Clarify which platforms this applies to
|
||||
* TODO: Figure out if this workaround can be applied in the s0ix suspend/resume handlers as
|
||||
* on earlier platforms and whether the workaround is also needed for runtime suspend/resume
|
||||
*/
|
||||
if (INTEL_PCH_TYPE(dev_priv) == PCH_CNP ||
|
||||
(INTEL_PCH_TYPE(dev_priv) >= PCH_TGP && INTEL_PCH_TYPE(dev_priv) < PCH_DG1)) {
|
||||
intel_uncore_rmw(uncore, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS,
|
||||
SBCLK_RUN_REFCLK_DIS);
|
||||
intel_uncore_rmw(uncore, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, 0);
|
||||
}
|
||||
}
|
||||
|
||||
static void gen8_irq_reset(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct intel_uncore *uncore = &dev_priv->uncore;
|
||||
@ -3046,6 +3081,8 @@ static void gen8_irq_reset(struct drm_i915_private *dev_priv)
|
||||
|
||||
if (HAS_PCH_SPLIT(dev_priv))
|
||||
ibx_irq_reset(dev_priv);
|
||||
|
||||
cnp_display_clock_wa(dev_priv);
|
||||
}
|
||||
|
||||
static void gen11_display_irq_reset(struct drm_i915_private *dev_priv)
|
||||
@ -3087,15 +3124,7 @@ static void gen11_display_irq_reset(struct drm_i915_private *dev_priv)
|
||||
if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
|
||||
GEN3_IRQ_RESET(uncore, SDE);
|
||||
|
||||
/* Wa_14010685332:cnp/cmp,tgp,adp */
|
||||
if (INTEL_PCH_TYPE(dev_priv) == PCH_CNP ||
|
||||
(INTEL_PCH_TYPE(dev_priv) >= PCH_TGP &&
|
||||
INTEL_PCH_TYPE(dev_priv) < PCH_DG1)) {
|
||||
intel_uncore_rmw(uncore, SOUTH_CHICKEN1,
|
||||
SBCLK_RUN_REFCLK_DIS, SBCLK_RUN_REFCLK_DIS);
|
||||
intel_uncore_rmw(uncore, SOUTH_CHICKEN1,
|
||||
SBCLK_RUN_REFCLK_DIS, 0);
|
||||
}
|
||||
cnp_display_clock_wa(dev_priv);
|
||||
}
|
||||
|
||||
static void gen11_irq_reset(struct drm_i915_private *dev_priv)
|
||||
@ -3747,9 +3776,19 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
|
||||
}
|
||||
}
|
||||
|
||||
static void icp_irq_postinstall(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct intel_uncore *uncore = &dev_priv->uncore;
|
||||
u32 mask = SDE_GMBUS_ICP;
|
||||
|
||||
GEN3_IRQ_INIT(uncore, SDE, ~mask, 0xffffffff);
|
||||
}
|
||||
|
||||
static void gen8_irq_postinstall(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
if (HAS_PCH_SPLIT(dev_priv))
|
||||
if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
|
||||
icp_irq_postinstall(dev_priv);
|
||||
else if (HAS_PCH_SPLIT(dev_priv))
|
||||
ibx_irq_postinstall(dev_priv);
|
||||
|
||||
gen8_gt_irq_postinstall(&dev_priv->gt);
|
||||
@ -3758,13 +3797,6 @@ static void gen8_irq_postinstall(struct drm_i915_private *dev_priv)
|
||||
gen8_master_intr_enable(dev_priv->uncore.regs);
|
||||
}
|
||||
|
||||
static void icp_irq_postinstall(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct intel_uncore *uncore = &dev_priv->uncore;
|
||||
u32 mask = SDE_GMBUS_ICP;
|
||||
|
||||
GEN3_IRQ_INIT(uncore, SDE, ~mask, 0xffffffff);
|
||||
}
|
||||
|
||||
static void gen11_irq_postinstall(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
@ -4287,6 +4319,8 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
|
||||
dev_priv->display.hpd_irq_setup = gen11_hpd_irq_setup;
|
||||
else if (IS_GEN9_LP(dev_priv))
|
||||
dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
|
||||
else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
|
||||
dev_priv->display.hpd_irq_setup = icp_hpd_irq_setup;
|
||||
else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
|
||||
dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
|
||||
else
|
||||
@ -4392,7 +4426,7 @@ static void intel_irq_postinstall(struct drm_i915_private *dev_priv)
|
||||
*/
|
||||
int intel_irq_install(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
int irq = dev_priv->drm.pdev->irq;
|
||||
int irq = to_pci_dev(dev_priv->drm.dev)->irq;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
@ -4427,7 +4461,7 @@ int intel_irq_install(struct drm_i915_private *dev_priv)
|
||||
*/
|
||||
void intel_irq_uninstall(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
int irq = dev_priv->drm.pdev->irq;
|
||||
int irq = to_pci_dev(dev_priv->drm.dev)->irq;
|
||||
|
||||
/*
|
||||
* FIXME we can get called twice during driver probe
|
||||
@ -4487,5 +4521,5 @@ bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
|
||||
|
||||
void intel_synchronize_irq(struct drm_i915_private *i915)
|
||||
{
|
||||
synchronize_irq(i915->drm.pdev->irq);
|
||||
synchronize_irq(to_pci_dev(i915->drm.dev)->irq);
|
||||
}
|
||||
|
@ -54,8 +54,8 @@ struct drm_printer;
|
||||
param(int, enable_dc, -1, 0400) \
|
||||
param(int, enable_fbc, -1, 0600) \
|
||||
param(int, enable_psr, -1, 0600) \
|
||||
param(bool, psr_safest_params, false, 0600) \
|
||||
param(bool, enable_psr2_sel_fetch, false, 0600) \
|
||||
param(bool, psr_safest_params, false, 0400) \
|
||||
param(bool, enable_psr2_sel_fetch, false, 0400) \
|
||||
param(int, disable_power_well, -1, 0400) \
|
||||
param(int, enable_ips, 1, 0600) \
|
||||
param(int, invert_brightness, 0, 0600) \
|
||||
|
@ -538,7 +538,7 @@ static const struct intel_device_info vlv_info = {
|
||||
.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \
|
||||
BIT(TRANSCODER_C) | BIT(TRANSCODER_EDP), \
|
||||
.display.has_ddi = 1, \
|
||||
.has_fpga_dbg = 1, \
|
||||
.display.has_fpga_dbg = 1, \
|
||||
.display.has_psr = 1, \
|
||||
.display.has_psr_hw_tracking = 1, \
|
||||
.display.has_dp_mst = 1, \
|
||||
@ -689,7 +689,7 @@ static const struct intel_device_info skl_gt4_info = {
|
||||
BIT(TRANSCODER_DSI_A) | BIT(TRANSCODER_DSI_C), \
|
||||
.has_64bit_reloc = 1, \
|
||||
.display.has_ddi = 1, \
|
||||
.has_fpga_dbg = 1, \
|
||||
.display.has_fpga_dbg = 1, \
|
||||
.display.has_fbc = 1, \
|
||||
.display.has_hdcp = 1, \
|
||||
.display.has_psr = 1, \
|
||||
@ -897,7 +897,6 @@ static const struct intel_device_info rkl_info = {
|
||||
.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
|
||||
.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
|
||||
BIT(TRANSCODER_C),
|
||||
.require_force_probe = 1,
|
||||
.display.has_hti = 1,
|
||||
.display.has_psr_hw_tracking = 0,
|
||||
.platform_engine_mask =
|
||||
@ -924,6 +923,18 @@ static const struct intel_device_info dg1_info __maybe_unused = {
|
||||
.ppgtt_size = 47,
|
||||
};
|
||||
|
||||
static const struct intel_device_info adl_s_info = {
|
||||
GEN12_FEATURES,
|
||||
PLATFORM(INTEL_ALDERLAKE_S),
|
||||
.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
|
||||
.require_force_probe = 1,
|
||||
.display.has_hti = 1,
|
||||
.display.has_psr_hw_tracking = 0,
|
||||
.platform_engine_mask =
|
||||
BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2),
|
||||
.dma_mask_size = 46,
|
||||
};
|
||||
|
||||
#undef GEN
|
||||
#undef PLATFORM
|
||||
|
||||
@ -1000,6 +1011,7 @@ static const struct pci_device_id pciidlist[] = {
|
||||
INTEL_JSL_IDS(&jsl_info),
|
||||
INTEL_TGL_12_IDS(&tgl_info),
|
||||
INTEL_RKL_IDS(&rkl_info),
|
||||
INTEL_ADLS_IDS(&adl_s_info),
|
||||
{0, 0, 0}
|
||||
};
|
||||
MODULE_DEVICE_TABLE(pci, pciidlist);
|
||||
|
@ -302,7 +302,7 @@ static u32 i915_oa_max_sample_rate = 100000;
|
||||
* code assumes all reports have a power-of-two size and ~(size - 1) can
|
||||
* be used as a mask to align the OA tail pointer.
|
||||
*/
|
||||
static const struct i915_oa_format hsw_oa_formats[I915_OA_FORMAT_MAX] = {
|
||||
static const struct i915_oa_format oa_formats[I915_OA_FORMAT_MAX] = {
|
||||
[I915_OA_FORMAT_A13] = { 0, 64 },
|
||||
[I915_OA_FORMAT_A29] = { 1, 128 },
|
||||
[I915_OA_FORMAT_A13_B8_C8] = { 2, 128 },
|
||||
@ -311,17 +311,9 @@ static const struct i915_oa_format hsw_oa_formats[I915_OA_FORMAT_MAX] = {
|
||||
[I915_OA_FORMAT_A45_B8_C8] = { 5, 256 },
|
||||
[I915_OA_FORMAT_B4_C8_A16] = { 6, 128 },
|
||||
[I915_OA_FORMAT_C4_B8] = { 7, 64 },
|
||||
};
|
||||
|
||||
static const struct i915_oa_format gen8_plus_oa_formats[I915_OA_FORMAT_MAX] = {
|
||||
[I915_OA_FORMAT_A12] = { 0, 64 },
|
||||
[I915_OA_FORMAT_A12_B8_C8] = { 2, 128 },
|
||||
[I915_OA_FORMAT_A32u40_A4u32_B8_C8] = { 5, 256 },
|
||||
[I915_OA_FORMAT_C4_B8] = { 7, 64 },
|
||||
};
|
||||
|
||||
static const struct i915_oa_format gen12_oa_formats[I915_OA_FORMAT_MAX] = {
|
||||
[I915_OA_FORMAT_A32u40_A4u32_B8_C8] = { 5, 256 },
|
||||
};
|
||||
|
||||
#define SAMPLE_OA_REPORT (1<<0)
|
||||
@ -603,7 +595,6 @@ static int append_oa_sample(struct i915_perf_stream *stream,
|
||||
{
|
||||
int report_size = stream->oa_buffer.format_size;
|
||||
struct drm_i915_perf_record_header header;
|
||||
u32 sample_flags = stream->sample_flags;
|
||||
|
||||
header.type = DRM_I915_PERF_RECORD_SAMPLE;
|
||||
header.pad = 0;
|
||||
@ -617,10 +608,8 @@ static int append_oa_sample(struct i915_perf_stream *stream,
|
||||
return -EFAULT;
|
||||
buf += sizeof(header);
|
||||
|
||||
if (sample_flags & SAMPLE_OA_REPORT) {
|
||||
if (copy_to_user(buf, report, report_size))
|
||||
return -EFAULT;
|
||||
}
|
||||
if (copy_to_user(buf, report, report_size))
|
||||
return -EFAULT;
|
||||
|
||||
(*offset) += header.size;
|
||||
|
||||
@ -733,11 +722,6 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
|
||||
(IS_GEN(stream->perf->i915, 12) ?
|
||||
OAREPORT_REASON_MASK_EXTENDED :
|
||||
OAREPORT_REASON_MASK));
|
||||
if (reason == 0) {
|
||||
if (__ratelimit(&stream->perf->spurious_report_rs))
|
||||
DRM_NOTE("Skipping spurious, invalid OA report\n");
|
||||
continue;
|
||||
}
|
||||
|
||||
ctx_id = report32[2] & stream->specific_ctx_id_mask;
|
||||
|
||||
@ -2682,7 +2666,7 @@ static void i915_oa_stream_enable(struct i915_perf_stream *stream)
|
||||
|
||||
stream->perf->ops.oa_enable(stream);
|
||||
|
||||
if (stream->periodic)
|
||||
if (stream->sample_flags & SAMPLE_OA_REPORT)
|
||||
hrtimer_start(&stream->poll_check_timer,
|
||||
ns_to_ktime(stream->poll_oa_period),
|
||||
HRTIMER_MODE_REL_PINNED);
|
||||
@ -2745,7 +2729,7 @@ static void i915_oa_stream_disable(struct i915_perf_stream *stream)
|
||||
{
|
||||
stream->perf->ops.oa_disable(stream);
|
||||
|
||||
if (stream->periodic)
|
||||
if (stream->sample_flags & SAMPLE_OA_REPORT)
|
||||
hrtimer_cancel(&stream->poll_check_timer);
|
||||
}
|
||||
|
||||
@ -3028,7 +3012,7 @@ static ssize_t i915_perf_read(struct file *file,
|
||||
* disabled stream as an error. In particular it might otherwise lead
|
||||
* to a deadlock for blocking file descriptors...
|
||||
*/
|
||||
if (!stream->enabled)
|
||||
if (!stream->enabled || !(stream->sample_flags & SAMPLE_OA_REPORT))
|
||||
return -EIO;
|
||||
|
||||
if (!(file->f_flags & O_NONBLOCK)) {
|
||||
@ -3524,6 +3508,18 @@ static u64 oa_exponent_to_ns(struct i915_perf *perf, int exponent)
|
||||
2ULL << exponent);
|
||||
}
|
||||
|
||||
static __always_inline bool
|
||||
oa_format_valid(struct i915_perf *perf, enum drm_i915_oa_format format)
|
||||
{
|
||||
return test_bit(format, perf->format_mask);
|
||||
}
|
||||
|
||||
static __always_inline void
|
||||
oa_format_add(struct i915_perf *perf, enum drm_i915_oa_format format)
|
||||
{
|
||||
__set_bit(format, perf->format_mask);
|
||||
}
|
||||
|
||||
/**
|
||||
* read_properties_unlocked - validate + copy userspace stream open properties
|
||||
* @perf: i915 perf instance
|
||||
@ -3615,7 +3611,7 @@ static int read_properties_unlocked(struct i915_perf *perf,
|
||||
value);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (!perf->oa_formats[value].size) {
|
||||
if (!oa_format_valid(perf, value)) {
|
||||
DRM_DEBUG("Unsupported OA report format %llu\n",
|
||||
value);
|
||||
return -EINVAL;
|
||||
@ -4259,6 +4255,50 @@ static struct ctl_table dev_root[] = {
|
||||
{}
|
||||
};
|
||||
|
||||
static void oa_init_supported_formats(struct i915_perf *perf)
|
||||
{
|
||||
struct drm_i915_private *i915 = perf->i915;
|
||||
enum intel_platform platform = INTEL_INFO(i915)->platform;
|
||||
|
||||
switch (platform) {
|
||||
case INTEL_HASWELL:
|
||||
oa_format_add(perf, I915_OA_FORMAT_A13);
|
||||
oa_format_add(perf, I915_OA_FORMAT_A13);
|
||||
oa_format_add(perf, I915_OA_FORMAT_A29);
|
||||
oa_format_add(perf, I915_OA_FORMAT_A13_B8_C8);
|
||||
oa_format_add(perf, I915_OA_FORMAT_B4_C8);
|
||||
oa_format_add(perf, I915_OA_FORMAT_A45_B8_C8);
|
||||
oa_format_add(perf, I915_OA_FORMAT_B4_C8_A16);
|
||||
oa_format_add(perf, I915_OA_FORMAT_C4_B8);
|
||||
break;
|
||||
|
||||
case INTEL_BROADWELL:
|
||||
case INTEL_CHERRYVIEW:
|
||||
case INTEL_SKYLAKE:
|
||||
case INTEL_BROXTON:
|
||||
case INTEL_KABYLAKE:
|
||||
case INTEL_GEMINILAKE:
|
||||
case INTEL_COFFEELAKE:
|
||||
case INTEL_COMETLAKE:
|
||||
case INTEL_CANNONLAKE:
|
||||
case INTEL_ICELAKE:
|
||||
case INTEL_ELKHARTLAKE:
|
||||
case INTEL_JASPERLAKE:
|
||||
case INTEL_TIGERLAKE:
|
||||
case INTEL_ROCKETLAKE:
|
||||
case INTEL_DG1:
|
||||
case INTEL_ALDERLAKE_S:
|
||||
oa_format_add(perf, I915_OA_FORMAT_A12);
|
||||
oa_format_add(perf, I915_OA_FORMAT_A12_B8_C8);
|
||||
oa_format_add(perf, I915_OA_FORMAT_A32u40_A4u32_B8_C8);
|
||||
oa_format_add(perf, I915_OA_FORMAT_C4_B8);
|
||||
break;
|
||||
|
||||
default:
|
||||
MISSING_CASE(platform);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* i915_perf_init - initialize i915-perf state on module bind
|
||||
* @i915: i915 device instance
|
||||
@ -4274,6 +4314,7 @@ void i915_perf_init(struct drm_i915_private *i915)
|
||||
|
||||
/* XXX const struct i915_perf_ops! */
|
||||
|
||||
perf->oa_formats = oa_formats;
|
||||
if (IS_HASWELL(i915)) {
|
||||
perf->ops.is_valid_b_counter_reg = gen7_is_valid_b_counter_addr;
|
||||
perf->ops.is_valid_mux_reg = hsw_is_valid_mux_addr;
|
||||
@ -4284,8 +4325,6 @@ void i915_perf_init(struct drm_i915_private *i915)
|
||||
perf->ops.oa_disable = gen7_oa_disable;
|
||||
perf->ops.read = gen7_oa_read;
|
||||
perf->ops.oa_hw_tail_read = gen7_oa_hw_tail_read;
|
||||
|
||||
perf->oa_formats = hsw_oa_formats;
|
||||
} else if (HAS_LOGICAL_RING_CONTEXTS(i915)) {
|
||||
/* Note: that although we could theoretically also support the
|
||||
* legacy ringbuffer mode on BDW (and earlier iterations of
|
||||
@ -4296,8 +4335,6 @@ void i915_perf_init(struct drm_i915_private *i915)
|
||||
perf->ops.read = gen8_oa_read;
|
||||
|
||||
if (IS_GEN_RANGE(i915, 8, 9)) {
|
||||
perf->oa_formats = gen8_plus_oa_formats;
|
||||
|
||||
perf->ops.is_valid_b_counter_reg =
|
||||
gen7_is_valid_b_counter_addr;
|
||||
perf->ops.is_valid_mux_reg =
|
||||
@ -4328,8 +4365,6 @@ void i915_perf_init(struct drm_i915_private *i915)
|
||||
perf->gen8_valid_ctx_bit = BIT(16);
|
||||
}
|
||||
} else if (IS_GEN_RANGE(i915, 10, 11)) {
|
||||
perf->oa_formats = gen8_plus_oa_formats;
|
||||
|
||||
perf->ops.is_valid_b_counter_reg =
|
||||
gen7_is_valid_b_counter_addr;
|
||||
perf->ops.is_valid_mux_reg =
|
||||
@ -4352,8 +4387,6 @@ void i915_perf_init(struct drm_i915_private *i915)
|
||||
}
|
||||
perf->gen8_valid_ctx_bit = BIT(16);
|
||||
} else if (IS_GEN(i915, 12)) {
|
||||
perf->oa_formats = gen12_oa_formats;
|
||||
|
||||
perf->ops.is_valid_b_counter_reg =
|
||||
gen12_is_valid_b_counter_addr;
|
||||
perf->ops.is_valid_mux_reg =
|
||||
@ -4408,6 +4441,8 @@ void i915_perf_init(struct drm_i915_private *i915)
|
||||
500 * 1000 /* 500us */);
|
||||
|
||||
perf->i915 = i915;
|
||||
|
||||
oa_init_supported_formats(perf);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -15,6 +15,7 @@
|
||||
#include <linux/types.h>
|
||||
#include <linux/uuid.h>
|
||||
#include <linux/wait.h>
|
||||
#include <uapi/drm/i915_drm.h>
|
||||
|
||||
#include "gt/intel_sseu.h"
|
||||
#include "i915_reg.h"
|
||||
@ -441,6 +442,13 @@ struct i915_perf {
|
||||
struct i915_oa_ops ops;
|
||||
const struct i915_oa_format *oa_formats;
|
||||
|
||||
/**
|
||||
* Use a format mask to store the supported formats
|
||||
* for a platform.
|
||||
*/
|
||||
#define FORMAT_MASK_SIZE DIV_ROUND_UP(I915_OA_FORMAT_MAX - 1, BITS_PER_LONG)
|
||||
unsigned long format_mask[FORMAT_MASK_SIZE];
|
||||
|
||||
atomic64_t noa_programming_delay;
|
||||
};
|
||||
|
||||
|
@ -1124,7 +1124,7 @@ static void i915_pmu_unregister_cpuhp_state(struct i915_pmu *pmu)
|
||||
|
||||
static bool is_igp(struct drm_i915_private *i915)
|
||||
{
|
||||
struct pci_dev *pdev = i915->drm.pdev;
|
||||
struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
|
||||
|
||||
/* IGP is 0000:00:02.0 */
|
||||
return pci_domain_nr(pdev->bus) == 0 &&
|
||||
|
@ -1874,10 +1874,13 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
|
||||
#define _ICL_COMBOPHY_B 0x6C000
|
||||
#define _EHL_COMBOPHY_C 0x160000
|
||||
#define _RKL_COMBOPHY_D 0x161000
|
||||
#define _ADL_COMBOPHY_E 0x16B000
|
||||
|
||||
#define _ICL_COMBOPHY(phy) _PICK(phy, _ICL_COMBOPHY_A, \
|
||||
_ICL_COMBOPHY_B, \
|
||||
_EHL_COMBOPHY_C, \
|
||||
_RKL_COMBOPHY_D)
|
||||
_RKL_COMBOPHY_D, \
|
||||
_ADL_COMBOPHY_E)
|
||||
|
||||
/* CNL/ICL Port CL_DW registers */
|
||||
#define _ICL_PORT_CL_DW(dw, phy) (_ICL_COMBOPHY(phy) + \
|
||||
@ -2927,7 +2930,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
|
||||
#define MBUS_BBOX_CTL_S2 _MMIO(0x45044)
|
||||
|
||||
#define HDPORT_STATE _MMIO(0x45050)
|
||||
#define HDPORT_DPLL_USED_MASK REG_GENMASK(14, 12)
|
||||
#define HDPORT_DPLL_USED_MASK REG_GENMASK(15, 12)
|
||||
#define HDPORT_DDI_USED(phy) REG_BIT(2 * (phy) + 1)
|
||||
#define HDPORT_ENABLED REG_BIT(0)
|
||||
|
||||
@ -3316,7 +3319,18 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
|
||||
|
||||
#define ILK_DISPLAY_CHICKEN1 _MMIO(0x42000)
|
||||
#define ILK_FBCQ_DIS (1 << 22)
|
||||
#define ILK_PABSTRETCH_DIS (1 << 21)
|
||||
#define ILK_PABSTRETCH_DIS REG_BIT(21)
|
||||
#define ILK_SABSTRETCH_DIS REG_BIT(20)
|
||||
#define IVB_PRI_STRETCH_MAX_MASK REG_GENMASK(21, 20)
|
||||
#define IVB_PRI_STRETCH_MAX_X8 REG_FIELD_PREP(IVB_PRI_STRETCH_MAX_MASK, 0)
|
||||
#define IVB_PRI_STRETCH_MAX_X4 REG_FIELD_PREP(IVB_PRI_STRETCH_MAX_MASK, 1)
|
||||
#define IVB_PRI_STRETCH_MAX_X2 REG_FIELD_PREP(IVB_PRI_STRETCH_MAX_MASK, 2)
|
||||
#define IVB_PRI_STRETCH_MAX_X1 REG_FIELD_PREP(IVB_PRI_STRETCH_MAX_MASK, 3)
|
||||
#define IVB_SPR_STRETCH_MAX_MASK REG_GENMASK(19, 18)
|
||||
#define IVB_SPR_STRETCH_MAX_X8 REG_FIELD_PREP(IVB_SPR_STRETCH_MAX_MASK, 0)
|
||||
#define IVB_SPR_STRETCH_MAX_X4 REG_FIELD_PREP(IVB_SPR_STRETCH_MAX_MASK, 1)
|
||||
#define IVB_SPR_STRETCH_MAX_X2 REG_FIELD_PREP(IVB_SPR_STRETCH_MAX_MASK, 2)
|
||||
#define IVB_SPR_STRETCH_MAX_X1 REG_FIELD_PREP(IVB_SPR_STRETCH_MAX_MASK, 3)
|
||||
|
||||
|
||||
/*
|
||||
@ -8039,6 +8053,16 @@ enum {
|
||||
|
||||
#define _CHICKEN_PIPESL_1_A 0x420b0
|
||||
#define _CHICKEN_PIPESL_1_B 0x420b4
|
||||
#define HSW_PRI_STRETCH_MAX_MASK REG_GENMASK(28, 27)
|
||||
#define HSW_PRI_STRETCH_MAX_X8 REG_FIELD_PREP(HSW_PRI_STRETCH_MAX_MASK, 0)
|
||||
#define HSW_PRI_STRETCH_MAX_X4 REG_FIELD_PREP(HSW_PRI_STRETCH_MAX_MASK, 1)
|
||||
#define HSW_PRI_STRETCH_MAX_X2 REG_FIELD_PREP(HSW_PRI_STRETCH_MAX_MASK, 2)
|
||||
#define HSW_PRI_STRETCH_MAX_X1 REG_FIELD_PREP(HSW_PRI_STRETCH_MAX_MASK, 3)
|
||||
#define HSW_SPR_STRETCH_MAX_MASK REG_GENMASK(26, 25)
|
||||
#define HSW_SPR_STRETCH_MAX_X8 REG_FIELD_PREP(HSW_SPR_STRETCH_MAX_MASK, 0)
|
||||
#define HSW_SPR_STRETCH_MAX_X4 REG_FIELD_PREP(HSW_SPR_STRETCH_MAX_MASK, 1)
|
||||
#define HSW_SPR_STRETCH_MAX_X2 REG_FIELD_PREP(HSW_SPR_STRETCH_MAX_MASK, 2)
|
||||
#define HSW_SPR_STRETCH_MAX_X1 REG_FIELD_PREP(HSW_SPR_STRETCH_MAX_MASK, 3)
|
||||
#define HSW_FBCQ_DIS (1 << 22)
|
||||
#define BDW_DPRS_MASK_VBLANK_SRD (1 << 0)
|
||||
#define CHICKEN_PIPESL_1(pipe) _MMIO_PIPE(pipe, _CHICKEN_PIPESL_1_A, _CHICKEN_PIPESL_1_B)
|
||||
@ -10357,7 +10381,7 @@ enum skl_power_gate {
|
||||
|
||||
/* ICL Clocks */
|
||||
#define ICL_DPCLKA_CFGCR0 _MMIO(0x164280)
|
||||
#define ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy) (1 << _PICK(phy, 10, 11, 24))
|
||||
#define ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy) (1 << _PICK(phy, 10, 11, 24, 4, 5))
|
||||
#define RKL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy) REG_BIT((phy) + 10)
|
||||
#define ICL_DPCLKA_CFGCR0_TC_CLK_OFF(tc_port) (1 << ((tc_port) < TC_PORT_4 ? \
|
||||
(tc_port) + 12 : \
|
||||
@ -10392,14 +10416,38 @@ enum skl_power_gate {
|
||||
#define DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_DPLL_MAP(clk_sel, phy) \
|
||||
(((clk_sel) >> DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy)) + _DG1_PHY_DPLL_MAP(phy))
|
||||
|
||||
/* ADLS Clocks */
|
||||
#define _ADLS_DPCLKA_CFGCR0 0x164280
|
||||
#define _ADLS_DPCLKA_CFGCR1 0x1642BC
|
||||
#define ADLS_DPCLKA_CFGCR(phy) _MMIO_PHY((phy) / 3, \
|
||||
_ADLS_DPCLKA_CFGCR0, \
|
||||
_ADLS_DPCLKA_CFGCR1)
|
||||
#define ADLS_DPCLKA_CFGCR_DDI_SHIFT(phy) (((phy) % 3) * 2)
|
||||
/* ADLS DPCLKA_CFGCR0 DDI mask */
|
||||
#define ADLS_DPCLKA_DDII_SEL_MASK REG_GENMASK(5, 4)
|
||||
#define ADLS_DPCLKA_DDIB_SEL_MASK REG_GENMASK(3, 2)
|
||||
#define ADLS_DPCLKA_DDIA_SEL_MASK REG_GENMASK(1, 0)
|
||||
/* ADLS DPCLKA_CFGCR1 DDI mask */
|
||||
#define ADLS_DPCLKA_DDIK_SEL_MASK REG_GENMASK(3, 2)
|
||||
#define ADLS_DPCLKA_DDIJ_SEL_MASK REG_GENMASK(1, 0)
|
||||
#define ADLS_DPCLKA_CFGCR_DDI_CLK_SEL_MASK(phy) _PICK((phy), \
|
||||
ADLS_DPCLKA_DDIA_SEL_MASK, \
|
||||
ADLS_DPCLKA_DDIB_SEL_MASK, \
|
||||
ADLS_DPCLKA_DDII_SEL_MASK, \
|
||||
ADLS_DPCLKA_DDIJ_SEL_MASK, \
|
||||
ADLS_DPCLKA_DDIK_SEL_MASK)
|
||||
|
||||
/* CNL PLL */
|
||||
#define DPLL0_ENABLE 0x46010
|
||||
#define DPLL1_ENABLE 0x46014
|
||||
#define _ADLS_DPLL2_ENABLE 0x46018
|
||||
#define _ADLS_DPLL3_ENABLE 0x46030
|
||||
#define PLL_ENABLE (1 << 31)
|
||||
#define PLL_LOCK (1 << 30)
|
||||
#define PLL_POWER_ENABLE (1 << 27)
|
||||
#define PLL_POWER_STATE (1 << 26)
|
||||
#define CNL_DPLL_ENABLE(pll) _MMIO_PLL(pll, DPLL0_ENABLE, DPLL1_ENABLE)
|
||||
#define CNL_DPLL_ENABLE(pll) _MMIO_PLL3(pll, DPLL0_ENABLE, DPLL1_ENABLE, \
|
||||
_ADLS_DPLL2_ENABLE, _ADLS_DPLL3_ENABLE)
|
||||
|
||||
#define TBT_PLL_ENABLE _MMIO(0x46020)
|
||||
|
||||
@ -10645,6 +10693,21 @@ enum skl_power_gate {
|
||||
_DG1_DPLL2_CFGCR1, \
|
||||
_DG1_DPLL3_CFGCR1)
|
||||
|
||||
/* For ADL-S DPLL4_CFGCR0/1 are used to control DPLL2 */
|
||||
#define _ADLS_DPLL3_CFGCR0 0x1642C0
|
||||
#define _ADLS_DPLL4_CFGCR0 0x164294
|
||||
#define ADLS_DPLL_CFGCR0(pll) _MMIO_PLL3(pll, _TGL_DPLL0_CFGCR0, \
|
||||
_TGL_DPLL1_CFGCR0, \
|
||||
_ADLS_DPLL4_CFGCR0, \
|
||||
_ADLS_DPLL3_CFGCR0)
|
||||
|
||||
#define _ADLS_DPLL3_CFGCR1 0x1642C4
|
||||
#define _ADLS_DPLL4_CFGCR1 0x164298
|
||||
#define ADLS_DPLL_CFGCR1(pll) _MMIO_PLL3(pll, _TGL_DPLL0_CFGCR1, \
|
||||
_TGL_DPLL1_CFGCR1, \
|
||||
_ADLS_DPLL4_CFGCR1, \
|
||||
_ADLS_DPLL3_CFGCR1)
|
||||
|
||||
#define _DKL_PHY1_BASE 0x168000
|
||||
#define _DKL_PHY2_BASE 0x169000
|
||||
#define _DKL_PHY3_BASE 0x16A000
|
||||
@ -11406,6 +11469,9 @@ enum skl_power_gate {
|
||||
#define BIG_JOINER_ENABLE (1 << 29)
|
||||
#define MASTER_BIG_JOINER_ENABLE (1 << 28)
|
||||
#define VGA_CENTERING_ENABLE (1 << 27)
|
||||
#define SPLITTER_CONFIGURATION_MASK REG_GENMASK(26, 25)
|
||||
#define SPLITTER_CONFIGURATION_2_SEGMENT REG_FIELD_PREP(SPLITTER_CONFIGURATION_MASK, 0)
|
||||
#define SPLITTER_CONFIGURATION_4_SEGMENT REG_FIELD_PREP(SPLITTER_CONFIGURATION_MASK, 1)
|
||||
|
||||
#define _ICL_PIPE_DSS_CTL2_PB 0x78204
|
||||
#define _ICL_PIPE_DSS_CTL2_PC 0x78404
|
||||
|
@ -85,7 +85,7 @@ static void intel_restore_swf(struct drm_i915_private *dev_priv)
|
||||
|
||||
void i915_save_display(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct pci_dev *pdev = dev_priv->drm.pdev;
|
||||
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
|
||||
|
||||
/* Display arbitration control */
|
||||
if (INTEL_GEN(dev_priv) <= 4)
|
||||
@ -100,7 +100,7 @@ void i915_save_display(struct drm_i915_private *dev_priv)
|
||||
|
||||
void i915_restore_display(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct pci_dev *pdev = dev_priv->drm.pdev;
|
||||
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
|
||||
|
||||
intel_restore_swf(dev_priv);
|
||||
|
||||
|
@ -54,14 +54,14 @@ static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
|
||||
|
||||
int i915_switcheroo_register(struct drm_i915_private *i915)
|
||||
{
|
||||
struct pci_dev *pdev = i915->drm.pdev;
|
||||
struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
|
||||
|
||||
return vga_switcheroo_register_client(pdev, &i915_switcheroo_ops, false);
|
||||
}
|
||||
|
||||
void i915_switcheroo_unregister(struct drm_i915_private *i915)
|
||||
{
|
||||
struct pci_dev *pdev = i915->drm.pdev;
|
||||
struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
|
||||
|
||||
vga_switcheroo_unregister_client(pdev);
|
||||
}
|
||||
|
@ -61,7 +61,7 @@
|
||||
*/
|
||||
void intel_vgpu_detect(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct pci_dev *pdev = dev_priv->drm.pdev;
|
||||
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
|
||||
u64 magic;
|
||||
u16 version_major;
|
||||
void __iomem *shared_area;
|
||||
|
@ -66,6 +66,7 @@ static const char * const platform_names[] = {
|
||||
PLATFORM_NAME(TIGERLAKE),
|
||||
PLATFORM_NAME(ROCKETLAKE),
|
||||
PLATFORM_NAME(DG1),
|
||||
PLATFORM_NAME(ALDERLAKE_S),
|
||||
};
|
||||
#undef PLATFORM_NAME
|
||||
|
||||
@ -204,7 +205,7 @@ void intel_device_info_subplatform_init(struct drm_i915_private *i915)
|
||||
}
|
||||
|
||||
if (IS_TIGERLAKE(i915)) {
|
||||
struct pci_dev *root, *pdev = i915->drm.pdev;
|
||||
struct pci_dev *root, *pdev = to_pci_dev(i915->drm.dev);
|
||||
|
||||
root = list_first_entry(&pdev->bus->devices, typeof(*root), bus_list);
|
||||
|
||||
@ -249,7 +250,11 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
|
||||
struct intel_runtime_info *runtime = RUNTIME_INFO(dev_priv);
|
||||
enum pipe pipe;
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 10) {
|
||||
/* Wa_14011765242: adl-s A0 */
|
||||
if (IS_ADLS_DISP_STEPPING(dev_priv, STEP_A0, STEP_A0))
|
||||
for_each_pipe(dev_priv, pipe)
|
||||
runtime->num_scalers[pipe] = 0;
|
||||
else if (INTEL_GEN(dev_priv) >= 10) {
|
||||
for_each_pipe(dev_priv, pipe)
|
||||
runtime->num_scalers[pipe] = 2;
|
||||
} else if (IS_GEN(dev_priv, 9)) {
|
||||
@ -260,7 +265,7 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
|
||||
|
||||
BUILD_BUG_ON(BITS_PER_TYPE(intel_engine_mask_t) < I915_NUM_ENGINES);
|
||||
|
||||
if (IS_ROCKETLAKE(dev_priv))
|
||||
if (HAS_D12_PLANE_MINIMIZATION(dev_priv))
|
||||
for_each_pipe(dev_priv, pipe)
|
||||
runtime->num_sprites[pipe] = 4;
|
||||
else if (INTEL_GEN(dev_priv) >= 11)
|
||||
|
@ -84,6 +84,7 @@ enum intel_platform {
|
||||
INTEL_TIGERLAKE,
|
||||
INTEL_ROCKETLAKE,
|
||||
INTEL_DG1,
|
||||
INTEL_ALDERLAKE_S,
|
||||
INTEL_MAX_PLATFORMS
|
||||
};
|
||||
|
||||
@ -116,7 +117,6 @@ enum intel_ppgtt_type {
|
||||
func(has_64bit_reloc); \
|
||||
func(gpu_reset_clobbers_display); \
|
||||
func(has_reset_engine); \
|
||||
func(has_fpga_dbg); \
|
||||
func(has_global_mocs); \
|
||||
func(has_gt_uc); \
|
||||
func(has_l3_dpf); \
|
||||
@ -143,6 +143,7 @@ enum intel_ppgtt_type {
|
||||
func(has_dsb); \
|
||||
func(has_dsc); \
|
||||
func(has_fbc); \
|
||||
func(has_fpga_dbg); \
|
||||
func(has_gmch); \
|
||||
func(has_hdcp); \
|
||||
func(has_hotplug); \
|
||||
|
@ -427,6 +427,12 @@ static int icl_pcode_read_mem_global_info(struct drm_i915_private *dev_priv)
|
||||
case 0:
|
||||
dram_info->type = INTEL_DRAM_DDR4;
|
||||
break;
|
||||
case 1:
|
||||
dram_info->type = INTEL_DRAM_DDR5;
|
||||
break;
|
||||
case 2:
|
||||
dram_info->type = INTEL_DRAM_LPDDR5;
|
||||
break;
|
||||
case 3:
|
||||
dram_info->type = INTEL_DRAM_LPDDR4;
|
||||
break;
|
||||
|
@ -121,13 +121,18 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
|
||||
case INTEL_PCH_TGP2_DEVICE_ID_TYPE:
|
||||
drm_dbg_kms(&dev_priv->drm, "Found Tiger Lake LP PCH\n");
|
||||
drm_WARN_ON(&dev_priv->drm, !IS_TIGERLAKE(dev_priv) &&
|
||||
!IS_ROCKETLAKE(dev_priv));
|
||||
!IS_ROCKETLAKE(dev_priv) &&
|
||||
!IS_GEN9_BC(dev_priv));
|
||||
return PCH_TGP;
|
||||
case INTEL_PCH_JSP_DEVICE_ID_TYPE:
|
||||
case INTEL_PCH_JSP2_DEVICE_ID_TYPE:
|
||||
drm_dbg_kms(&dev_priv->drm, "Found Jasper Lake PCH\n");
|
||||
drm_WARN_ON(&dev_priv->drm, !IS_JSL_EHL(dev_priv));
|
||||
return PCH_JSP;
|
||||
case INTEL_PCH_ADP_DEVICE_ID_TYPE:
|
||||
drm_dbg_kms(&dev_priv->drm, "Found Alder Lake PCH\n");
|
||||
drm_WARN_ON(&dev_priv->drm, !IS_ALDERLAKE_S(dev_priv));
|
||||
return PCH_ADP;
|
||||
default:
|
||||
return PCH_NONE;
|
||||
}
|
||||
@ -156,7 +161,9 @@ intel_virt_detect_pch(const struct drm_i915_private *dev_priv,
|
||||
* make an educated guess as to which PCH is really there.
|
||||
*/
|
||||
|
||||
if (IS_TIGERLAKE(dev_priv) || IS_ROCKETLAKE(dev_priv))
|
||||
if (IS_ALDERLAKE_S(dev_priv))
|
||||
id = INTEL_PCH_ADP_DEVICE_ID_TYPE;
|
||||
else if (IS_TIGERLAKE(dev_priv) || IS_ROCKETLAKE(dev_priv))
|
||||
id = INTEL_PCH_TGP_DEVICE_ID_TYPE;
|
||||
else if (IS_JSL_EHL(dev_priv))
|
||||
id = INTEL_PCH_MCC_DEVICE_ID_TYPE;
|
||||
|
@ -26,6 +26,7 @@ enum intel_pch {
|
||||
PCH_JSP, /* Jasper Lake PCH */
|
||||
PCH_MCC, /* Mule Creek Canyon PCH */
|
||||
PCH_TGP, /* Tiger Lake PCH */
|
||||
PCH_ADP, /* Alder Lake PCH */
|
||||
|
||||
/* Fake PCHs, functionality handled on the same PCI dev */
|
||||
PCH_DG1 = 1024,
|
||||
@ -53,12 +54,14 @@ enum intel_pch {
|
||||
#define INTEL_PCH_TGP2_DEVICE_ID_TYPE 0x4380
|
||||
#define INTEL_PCH_JSP_DEVICE_ID_TYPE 0x4D80
|
||||
#define INTEL_PCH_JSP2_DEVICE_ID_TYPE 0x3880
|
||||
#define INTEL_PCH_ADP_DEVICE_ID_TYPE 0x7A80
|
||||
#define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100
|
||||
#define INTEL_PCH_P3X_DEVICE_ID_TYPE 0x7000
|
||||
#define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900 /* qemu q35 has 2918 */
|
||||
|
||||
#define INTEL_PCH_TYPE(dev_priv) ((dev_priv)->pch_type)
|
||||
#define INTEL_PCH_ID(dev_priv) ((dev_priv)->pch_id)
|
||||
#define HAS_PCH_ADP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_ADP)
|
||||
#define HAS_PCH_DG1(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_DG1)
|
||||
#define HAS_PCH_JSP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_JSP)
|
||||
#define HAS_PCH_MCC(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_MCC)
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user