mirror of
https://github.com/torvalds/linux.git
synced 2024-11-28 07:01:32 +00:00
Merge tag 'drm-intel-next-2016-03-30' of git://anongit.freedesktop.org/drm-intel into drm-next
- VBT code refactor for a clean split between parsing&using of firmware information (Jani) - untangle the pll computation code, and splitting up the monster i9xx_crtc_compute_clocks (Ander) - dsi support for bxt (Jani, Shashank Sharma and others) - color manager (i.e. de-gamma, color conversion matrix & gamma support) from Lionel Landwerlin - Vulkan hsw support in the command parser (Jordan Justen) - large-scale renaming of intel_engine_cs variables/parameters to avoid the epic ring vs. engine confusion introduced in gen8 (Tvrtko Ursulin) - few atomic patches from Maarten&Matt, big one is two-stage wm programming on ilk-bdw - refactor driver load and add infrastructure to inject load failures for testing, from Imre - various small things all over * tag 'drm-intel-next-2016-03-30' of git://anongit.freedesktop.org/drm-intel: (179 commits) drm/i915: Update DRIVER_DATE to 20160330 drm/i915: Call intel_dp_mst_resume() before resuming displays drm/i915: Fix races on fbdev drm/i915: remove unused dev_priv->render_reclock_avail drm/i915: move sdvo mappings to vbt data drm/i915: move edp low vswing config to vbt data drm/i915: use a substruct in vbt data for edp drm/i915: replace for_each_engine() drm/i915: introduce for_each_engine_id() drm/i915/bxt: Fix DSI HW state readout drm/i915: Remove vblank wait from hsw_enable_ips, v2. drm/i915: Tidy aliasing_gtt_bind_vma() drm/i915: Split PNV version of crtc_compute_clock() drm/i915: Split g4x_crtc_compute_clock() drm/i915: Split i8xx_crtc_compute_clock() drm/i915: Split CHV and VLV specific crtc_compute_clock() hooks drm/i915: Merge ironlake_compute_clocks() and ironlake_crtc_compute_clock() drm/i915: Move fp divisor calculation into ironlake_compute_dpll() drm/i915: Pass crtc_state->dpll directly to ->find_dpll() drm/i915: Simplify ironlake_crtc_compute_clock() CPU eDP case ...
This commit is contained in:
commit
85bd5ac371
@ -2153,7 +2153,11 @@ void intel_crt_init(struct drm_device *dev)
|
||||
<td valign="top" >ENUM</td>
|
||||
<td valign="top" >{ "Automatic", "Full", "Limited 16:235" }</td>
|
||||
<td valign="top" >Connector</td>
|
||||
<td valign="top" >TBD</td>
|
||||
<td valign="top" >When this property is set to Limited 16:235
|
||||
and CTM is set, the hardware will be programmed with the
|
||||
result of the multiplication of CTM by the limited range
|
||||
matrix to ensure the pixels normaly in the range 0..1.0 are
|
||||
remapped to the range 16/255..235/255.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td valign="top" >“audio”</td>
|
||||
@ -3334,7 +3338,7 @@ int num_ioctls;</synopsis>
|
||||
<title>Video BIOS Table (VBT)</title>
|
||||
!Pdrivers/gpu/drm/i915/intel_bios.c Video BIOS Table (VBT)
|
||||
!Idrivers/gpu/drm/i915/intel_bios.c
|
||||
!Idrivers/gpu/drm/i915/intel_bios.h
|
||||
!Idrivers/gpu/drm/i915/intel_vbt_defs.h
|
||||
</sect2>
|
||||
</sect1>
|
||||
|
||||
|
@ -56,3 +56,9 @@ config DRM_I915_USERPTR
|
||||
selected to enabled full userptr support.
|
||||
|
||||
If in doubt, say "Y".
|
||||
|
||||
menu "drm/i915 Debugging"
|
||||
depends on DRM_I915
|
||||
depends on EXPERT
|
||||
source drivers/gpu/drm/i915/Kconfig.debug
|
||||
endmenu
|
||||
|
12
drivers/gpu/drm/i915/Kconfig.debug
Normal file
12
drivers/gpu/drm/i915/Kconfig.debug
Normal file
@ -0,0 +1,12 @@
|
||||
config DRM_I915_DEBUG
|
||||
bool "Enable additional driver debugging"
|
||||
depends on DRM_I915
|
||||
default n
|
||||
help
|
||||
Choose this option to turn on extra driver debugging that may affect
|
||||
performance but will catch some internal issues.
|
||||
|
||||
Recommended for driver developers only.
|
||||
|
||||
If in doubt, say "N".
|
||||
|
@ -55,7 +55,9 @@ i915-y += intel_audio.o \
|
||||
intel_atomic.o \
|
||||
intel_atomic_plane.o \
|
||||
intel_bios.o \
|
||||
intel_color.o \
|
||||
intel_display.o \
|
||||
intel_dpll_mgr.o \
|
||||
intel_fbc.o \
|
||||
intel_fifo_underrun.o \
|
||||
intel_frontbuffer.o \
|
||||
|
@ -444,6 +444,7 @@ static const struct drm_i915_reg_descriptor gen7_render_regs[] = {
|
||||
REG64(CL_PRIMITIVES_COUNT),
|
||||
REG64(PS_INVOCATION_COUNT),
|
||||
REG64(PS_DEPTH_COUNT),
|
||||
REG64_IDX(RING_TIMESTAMP, RENDER_RING_BASE),
|
||||
REG32(OACONTROL), /* Only allowed for LRI and SRM. See below. */
|
||||
REG64(MI_PREDICATE_SRC0),
|
||||
REG64(MI_PREDICATE_SRC1),
|
||||
@ -471,6 +472,25 @@ static const struct drm_i915_reg_descriptor gen7_render_regs[] = {
|
||||
REG32(GEN7_L3SQCREG1),
|
||||
REG32(GEN7_L3CNTLREG2),
|
||||
REG32(GEN7_L3CNTLREG3),
|
||||
};
|
||||
|
||||
static const struct drm_i915_reg_descriptor hsw_render_regs[] = {
|
||||
REG64_IDX(HSW_CS_GPR, 0),
|
||||
REG64_IDX(HSW_CS_GPR, 1),
|
||||
REG64_IDX(HSW_CS_GPR, 2),
|
||||
REG64_IDX(HSW_CS_GPR, 3),
|
||||
REG64_IDX(HSW_CS_GPR, 4),
|
||||
REG64_IDX(HSW_CS_GPR, 5),
|
||||
REG64_IDX(HSW_CS_GPR, 6),
|
||||
REG64_IDX(HSW_CS_GPR, 7),
|
||||
REG64_IDX(HSW_CS_GPR, 8),
|
||||
REG64_IDX(HSW_CS_GPR, 9),
|
||||
REG64_IDX(HSW_CS_GPR, 10),
|
||||
REG64_IDX(HSW_CS_GPR, 11),
|
||||
REG64_IDX(HSW_CS_GPR, 12),
|
||||
REG64_IDX(HSW_CS_GPR, 13),
|
||||
REG64_IDX(HSW_CS_GPR, 14),
|
||||
REG64_IDX(HSW_CS_GPR, 15),
|
||||
REG32(HSW_SCRATCH1,
|
||||
.mask = ~HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE,
|
||||
.value = 0),
|
||||
@ -500,6 +520,33 @@ static const struct drm_i915_reg_descriptor hsw_master_regs[] = {
|
||||
#undef REG64
|
||||
#undef REG32
|
||||
|
||||
struct drm_i915_reg_table {
|
||||
const struct drm_i915_reg_descriptor *regs;
|
||||
int num_regs;
|
||||
bool master;
|
||||
};
|
||||
|
||||
static const struct drm_i915_reg_table ivb_render_reg_tables[] = {
|
||||
{ gen7_render_regs, ARRAY_SIZE(gen7_render_regs), false },
|
||||
{ ivb_master_regs, ARRAY_SIZE(ivb_master_regs), true },
|
||||
};
|
||||
|
||||
static const struct drm_i915_reg_table ivb_blt_reg_tables[] = {
|
||||
{ gen7_blt_regs, ARRAY_SIZE(gen7_blt_regs), false },
|
||||
{ ivb_master_regs, ARRAY_SIZE(ivb_master_regs), true },
|
||||
};
|
||||
|
||||
static const struct drm_i915_reg_table hsw_render_reg_tables[] = {
|
||||
{ gen7_render_regs, ARRAY_SIZE(gen7_render_regs), false },
|
||||
{ hsw_render_regs, ARRAY_SIZE(hsw_render_regs), false },
|
||||
{ hsw_master_regs, ARRAY_SIZE(hsw_master_regs), true },
|
||||
};
|
||||
|
||||
static const struct drm_i915_reg_table hsw_blt_reg_tables[] = {
|
||||
{ gen7_blt_regs, ARRAY_SIZE(gen7_blt_regs), false },
|
||||
{ hsw_master_regs, ARRAY_SIZE(hsw_master_regs), true },
|
||||
};
|
||||
|
||||
static u32 gen7_render_get_cmd_length_mask(u32 cmd_header)
|
||||
{
|
||||
u32 client = (cmd_header & INSTR_CLIENT_MASK) >> INSTR_CLIENT_SHIFT;
|
||||
@ -555,7 +602,7 @@ static u32 gen7_blt_get_cmd_length_mask(u32 cmd_header)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool validate_cmds_sorted(struct intel_engine_cs *ring,
|
||||
static bool validate_cmds_sorted(struct intel_engine_cs *engine,
|
||||
const struct drm_i915_cmd_table *cmd_tables,
|
||||
int cmd_table_count)
|
||||
{
|
||||
@ -577,7 +624,7 @@ static bool validate_cmds_sorted(struct intel_engine_cs *ring,
|
||||
|
||||
if (curr < previous) {
|
||||
DRM_ERROR("CMD: table not sorted ring=%d table=%d entry=%d cmd=0x%08X prev=0x%08X\n",
|
||||
ring->id, i, j, curr, previous);
|
||||
engine->id, i, j, curr, previous);
|
||||
ret = false;
|
||||
}
|
||||
|
||||
@ -611,11 +658,18 @@ static bool check_sorted(int ring_id,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static bool validate_regs_sorted(struct intel_engine_cs *ring)
|
||||
static bool validate_regs_sorted(struct intel_engine_cs *engine)
|
||||
{
|
||||
return check_sorted(ring->id, ring->reg_table, ring->reg_count) &&
|
||||
check_sorted(ring->id, ring->master_reg_table,
|
||||
ring->master_reg_count);
|
||||
int i;
|
||||
const struct drm_i915_reg_table *table;
|
||||
|
||||
for (i = 0; i < engine->reg_table_count; i++) {
|
||||
table = &engine->reg_tables[i];
|
||||
if (!check_sorted(engine->id, table->regs, table->num_regs))
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
struct cmd_node {
|
||||
@ -639,13 +693,13 @@ struct cmd_node {
|
||||
*/
|
||||
#define CMD_HASH_MASK STD_MI_OPCODE_MASK
|
||||
|
||||
static int init_hash_table(struct intel_engine_cs *ring,
|
||||
static int init_hash_table(struct intel_engine_cs *engine,
|
||||
const struct drm_i915_cmd_table *cmd_tables,
|
||||
int cmd_table_count)
|
||||
{
|
||||
int i, j;
|
||||
|
||||
hash_init(ring->cmd_hash);
|
||||
hash_init(engine->cmd_hash);
|
||||
|
||||
for (i = 0; i < cmd_table_count; i++) {
|
||||
const struct drm_i915_cmd_table *table = &cmd_tables[i];
|
||||
@ -660,7 +714,7 @@ static int init_hash_table(struct intel_engine_cs *ring,
|
||||
return -ENOMEM;
|
||||
|
||||
desc_node->desc = desc;
|
||||
hash_add(ring->cmd_hash, &desc_node->node,
|
||||
hash_add(engine->cmd_hash, &desc_node->node,
|
||||
desc->cmd.value & CMD_HASH_MASK);
|
||||
}
|
||||
}
|
||||
@ -668,13 +722,13 @@ static int init_hash_table(struct intel_engine_cs *ring,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void fini_hash_table(struct intel_engine_cs *ring)
|
||||
static void fini_hash_table(struct intel_engine_cs *engine)
|
||||
{
|
||||
struct hlist_node *tmp;
|
||||
struct cmd_node *desc_node;
|
||||
int i;
|
||||
|
||||
hash_for_each_safe(ring->cmd_hash, i, tmp, desc_node, node) {
|
||||
hash_for_each_safe(engine->cmd_hash, i, tmp, desc_node, node) {
|
||||
hash_del(&desc_node->node);
|
||||
kfree(desc_node);
|
||||
}
|
||||
@ -690,18 +744,18 @@ static void fini_hash_table(struct intel_engine_cs *ring)
|
||||
*
|
||||
* Return: non-zero if initialization fails
|
||||
*/
|
||||
int i915_cmd_parser_init_ring(struct intel_engine_cs *ring)
|
||||
int i915_cmd_parser_init_ring(struct intel_engine_cs *engine)
|
||||
{
|
||||
const struct drm_i915_cmd_table *cmd_tables;
|
||||
int cmd_table_count;
|
||||
int ret;
|
||||
|
||||
if (!IS_GEN7(ring->dev))
|
||||
if (!IS_GEN7(engine->dev))
|
||||
return 0;
|
||||
|
||||
switch (ring->id) {
|
||||
switch (engine->id) {
|
||||
case RCS:
|
||||
if (IS_HASWELL(ring->dev)) {
|
||||
if (IS_HASWELL(engine->dev)) {
|
||||
cmd_tables = hsw_render_ring_cmds;
|
||||
cmd_table_count =
|
||||
ARRAY_SIZE(hsw_render_ring_cmds);
|
||||
@ -710,26 +764,23 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *ring)
|
||||
cmd_table_count = ARRAY_SIZE(gen7_render_cmds);
|
||||
}
|
||||
|
||||
ring->reg_table = gen7_render_regs;
|
||||
ring->reg_count = ARRAY_SIZE(gen7_render_regs);
|
||||
|
||||
if (IS_HASWELL(ring->dev)) {
|
||||
ring->master_reg_table = hsw_master_regs;
|
||||
ring->master_reg_count = ARRAY_SIZE(hsw_master_regs);
|
||||
if (IS_HASWELL(engine->dev)) {
|
||||
engine->reg_tables = hsw_render_reg_tables;
|
||||
engine->reg_table_count = ARRAY_SIZE(hsw_render_reg_tables);
|
||||
} else {
|
||||
ring->master_reg_table = ivb_master_regs;
|
||||
ring->master_reg_count = ARRAY_SIZE(ivb_master_regs);
|
||||
engine->reg_tables = ivb_render_reg_tables;
|
||||
engine->reg_table_count = ARRAY_SIZE(ivb_render_reg_tables);
|
||||
}
|
||||
|
||||
ring->get_cmd_length_mask = gen7_render_get_cmd_length_mask;
|
||||
engine->get_cmd_length_mask = gen7_render_get_cmd_length_mask;
|
||||
break;
|
||||
case VCS:
|
||||
cmd_tables = gen7_video_cmds;
|
||||
cmd_table_count = ARRAY_SIZE(gen7_video_cmds);
|
||||
ring->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
|
||||
engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
|
||||
break;
|
||||
case BCS:
|
||||
if (IS_HASWELL(ring->dev)) {
|
||||
if (IS_HASWELL(engine->dev)) {
|
||||
cmd_tables = hsw_blt_ring_cmds;
|
||||
cmd_table_count = ARRAY_SIZE(hsw_blt_ring_cmds);
|
||||
} else {
|
||||
@ -737,44 +788,41 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *ring)
|
||||
cmd_table_count = ARRAY_SIZE(gen7_blt_cmds);
|
||||
}
|
||||
|
||||
ring->reg_table = gen7_blt_regs;
|
||||
ring->reg_count = ARRAY_SIZE(gen7_blt_regs);
|
||||
|
||||
if (IS_HASWELL(ring->dev)) {
|
||||
ring->master_reg_table = hsw_master_regs;
|
||||
ring->master_reg_count = ARRAY_SIZE(hsw_master_regs);
|
||||
if (IS_HASWELL(engine->dev)) {
|
||||
engine->reg_tables = hsw_blt_reg_tables;
|
||||
engine->reg_table_count = ARRAY_SIZE(hsw_blt_reg_tables);
|
||||
} else {
|
||||
ring->master_reg_table = ivb_master_regs;
|
||||
ring->master_reg_count = ARRAY_SIZE(ivb_master_regs);
|
||||
engine->reg_tables = ivb_blt_reg_tables;
|
||||
engine->reg_table_count = ARRAY_SIZE(ivb_blt_reg_tables);
|
||||
}
|
||||
|
||||
ring->get_cmd_length_mask = gen7_blt_get_cmd_length_mask;
|
||||
engine->get_cmd_length_mask = gen7_blt_get_cmd_length_mask;
|
||||
break;
|
||||
case VECS:
|
||||
cmd_tables = hsw_vebox_cmds;
|
||||
cmd_table_count = ARRAY_SIZE(hsw_vebox_cmds);
|
||||
/* VECS can use the same length_mask function as VCS */
|
||||
ring->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
|
||||
engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("CMD: cmd_parser_init with unknown ring: %d\n",
|
||||
ring->id);
|
||||
engine->id);
|
||||
BUG();
|
||||
}
|
||||
|
||||
BUG_ON(!validate_cmds_sorted(ring, cmd_tables, cmd_table_count));
|
||||
BUG_ON(!validate_regs_sorted(ring));
|
||||
BUG_ON(!validate_cmds_sorted(engine, cmd_tables, cmd_table_count));
|
||||
BUG_ON(!validate_regs_sorted(engine));
|
||||
|
||||
WARN_ON(!hash_empty(ring->cmd_hash));
|
||||
WARN_ON(!hash_empty(engine->cmd_hash));
|
||||
|
||||
ret = init_hash_table(ring, cmd_tables, cmd_table_count);
|
||||
ret = init_hash_table(engine, cmd_tables, cmd_table_count);
|
||||
if (ret) {
|
||||
DRM_ERROR("CMD: cmd_parser_init failed!\n");
|
||||
fini_hash_table(ring);
|
||||
fini_hash_table(engine);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ring->needs_cmd_parser = true;
|
||||
engine->needs_cmd_parser = true;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -786,21 +834,21 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *ring)
|
||||
* Releases any resources related to command parsing that may have been
|
||||
* initialized for the specified ring.
|
||||
*/
|
||||
void i915_cmd_parser_fini_ring(struct intel_engine_cs *ring)
|
||||
void i915_cmd_parser_fini_ring(struct intel_engine_cs *engine)
|
||||
{
|
||||
if (!ring->needs_cmd_parser)
|
||||
if (!engine->needs_cmd_parser)
|
||||
return;
|
||||
|
||||
fini_hash_table(ring);
|
||||
fini_hash_table(engine);
|
||||
}
|
||||
|
||||
static const struct drm_i915_cmd_descriptor*
|
||||
find_cmd_in_table(struct intel_engine_cs *ring,
|
||||
find_cmd_in_table(struct intel_engine_cs *engine,
|
||||
u32 cmd_header)
|
||||
{
|
||||
struct cmd_node *desc_node;
|
||||
|
||||
hash_for_each_possible(ring->cmd_hash, desc_node, node,
|
||||
hash_for_each_possible(engine->cmd_hash, desc_node, node,
|
||||
cmd_header & CMD_HASH_MASK) {
|
||||
const struct drm_i915_cmd_descriptor *desc = desc_node->desc;
|
||||
u32 masked_cmd = desc->cmd.mask & cmd_header;
|
||||
@ -822,18 +870,18 @@ find_cmd_in_table(struct intel_engine_cs *ring,
|
||||
* ring's default length encoding and returns default_desc.
|
||||
*/
|
||||
static const struct drm_i915_cmd_descriptor*
|
||||
find_cmd(struct intel_engine_cs *ring,
|
||||
find_cmd(struct intel_engine_cs *engine,
|
||||
u32 cmd_header,
|
||||
struct drm_i915_cmd_descriptor *default_desc)
|
||||
{
|
||||
const struct drm_i915_cmd_descriptor *desc;
|
||||
u32 mask;
|
||||
|
||||
desc = find_cmd_in_table(ring, cmd_header);
|
||||
desc = find_cmd_in_table(engine, cmd_header);
|
||||
if (desc)
|
||||
return desc;
|
||||
|
||||
mask = ring->get_cmd_length_mask(cmd_header);
|
||||
mask = engine->get_cmd_length_mask(cmd_header);
|
||||
if (!mask)
|
||||
return NULL;
|
||||
|
||||
@ -848,12 +896,31 @@ static const struct drm_i915_reg_descriptor *
|
||||
find_reg(const struct drm_i915_reg_descriptor *table,
|
||||
int count, u32 addr)
|
||||
{
|
||||
if (table) {
|
||||
int i;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
if (i915_mmio_reg_offset(table[i].addr) == addr)
|
||||
return &table[i];
|
||||
for (i = 0; i < count; i++) {
|
||||
if (i915_mmio_reg_offset(table[i].addr) == addr)
|
||||
return &table[i];
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static const struct drm_i915_reg_descriptor *
|
||||
find_reg_in_tables(const struct drm_i915_reg_table *tables,
|
||||
int count, bool is_master, u32 addr)
|
||||
{
|
||||
int i;
|
||||
const struct drm_i915_reg_table *table;
|
||||
const struct drm_i915_reg_descriptor *reg;
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
table = &tables[i];
|
||||
if (!table->master || is_master) {
|
||||
reg = find_reg(table->regs, table->num_regs,
|
||||
addr);
|
||||
if (reg != NULL)
|
||||
return reg;
|
||||
}
|
||||
}
|
||||
|
||||
@ -963,18 +1030,18 @@ unpin_src:
|
||||
*
|
||||
* Return: true if the ring requires software command parsing
|
||||
*/
|
||||
bool i915_needs_cmd_parser(struct intel_engine_cs *ring)
|
||||
bool i915_needs_cmd_parser(struct intel_engine_cs *engine)
|
||||
{
|
||||
if (!ring->needs_cmd_parser)
|
||||
if (!engine->needs_cmd_parser)
|
||||
return false;
|
||||
|
||||
if (!USES_PPGTT(ring->dev))
|
||||
if (!USES_PPGTT(engine->dev))
|
||||
return false;
|
||||
|
||||
return (i915.enable_cmd_parser == 1);
|
||||
}
|
||||
|
||||
static bool check_cmd(const struct intel_engine_cs *ring,
|
||||
static bool check_cmd(const struct intel_engine_cs *engine,
|
||||
const struct drm_i915_cmd_descriptor *desc,
|
||||
const u32 *cmd, u32 length,
|
||||
const bool is_master,
|
||||
@ -1004,17 +1071,14 @@ static bool check_cmd(const struct intel_engine_cs *ring,
|
||||
offset += step) {
|
||||
const u32 reg_addr = cmd[offset] & desc->reg.mask;
|
||||
const struct drm_i915_reg_descriptor *reg =
|
||||
find_reg(ring->reg_table, ring->reg_count,
|
||||
reg_addr);
|
||||
|
||||
if (!reg && is_master)
|
||||
reg = find_reg(ring->master_reg_table,
|
||||
ring->master_reg_count,
|
||||
reg_addr);
|
||||
find_reg_in_tables(engine->reg_tables,
|
||||
engine->reg_table_count,
|
||||
is_master,
|
||||
reg_addr);
|
||||
|
||||
if (!reg) {
|
||||
DRM_DEBUG_DRIVER("CMD: Rejected register 0x%08X in command: 0x%08X (ring=%d)\n",
|
||||
reg_addr, *cmd, ring->id);
|
||||
reg_addr, *cmd, engine->id);
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -1087,7 +1151,7 @@ static bool check_cmd(const struct intel_engine_cs *ring,
|
||||
*cmd,
|
||||
desc->bits[i].mask,
|
||||
desc->bits[i].expected,
|
||||
dword, ring->id);
|
||||
dword, engine->id);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
@ -1113,7 +1177,7 @@ static bool check_cmd(const struct intel_engine_cs *ring,
|
||||
* Return: non-zero if the parser finds violations or otherwise fails; -EACCES
|
||||
* if the batch appears legal but should use hardware parsing
|
||||
*/
|
||||
int i915_parse_cmds(struct intel_engine_cs *ring,
|
||||
int i915_parse_cmds(struct intel_engine_cs *engine,
|
||||
struct drm_i915_gem_object *batch_obj,
|
||||
struct drm_i915_gem_object *shadow_batch_obj,
|
||||
u32 batch_start_offset,
|
||||
@ -1147,7 +1211,7 @@ int i915_parse_cmds(struct intel_engine_cs *ring,
|
||||
if (*cmd == MI_BATCH_BUFFER_END)
|
||||
break;
|
||||
|
||||
desc = find_cmd(ring, *cmd, &default_desc);
|
||||
desc = find_cmd(engine, *cmd, &default_desc);
|
||||
if (!desc) {
|
||||
DRM_DEBUG_DRIVER("CMD: Unrecognized command: 0x%08X\n",
|
||||
*cmd);
|
||||
@ -1179,7 +1243,7 @@ int i915_parse_cmds(struct intel_engine_cs *ring,
|
||||
break;
|
||||
}
|
||||
|
||||
if (!check_cmd(ring, desc, cmd, length, is_master,
|
||||
if (!check_cmd(engine, desc, cmd, length, is_master,
|
||||
&oacontrol_set)) {
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
@ -1223,6 +1287,7 @@ int i915_cmd_parser_get_version(void)
|
||||
* 3. Allow access to the GPGPU_THREADS_DISPATCHED register.
|
||||
* 4. L3 atomic chicken bits of HSW_SCRATCH1 and HSW_ROW_CHICKEN3.
|
||||
* 5. GPGPU dispatch compute indirect registers.
|
||||
* 6. TIMESTAMP register and Haswell CS GPR registers
|
||||
*/
|
||||
return 5;
|
||||
return 6;
|
||||
}
|
||||
|
@ -129,10 +129,10 @@ static void
|
||||
describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
|
||||
struct intel_engine_cs *ring;
|
||||
struct intel_engine_cs *engine;
|
||||
struct i915_vma *vma;
|
||||
int pin_count = 0;
|
||||
int i;
|
||||
enum intel_engine_id id;
|
||||
|
||||
seq_printf(m, "%pK: %s%s%s%s %8zdKiB %02x %02x [ ",
|
||||
&obj->base,
|
||||
@ -143,9 +143,9 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
|
||||
obj->base.size / 1024,
|
||||
obj->base.read_domains,
|
||||
obj->base.write_domain);
|
||||
for_each_ring(ring, dev_priv, i)
|
||||
for_each_engine_id(engine, dev_priv, id)
|
||||
seq_printf(m, "%x ",
|
||||
i915_gem_request_get_seqno(obj->last_read_req[i]));
|
||||
i915_gem_request_get_seqno(obj->last_read_req[id]));
|
||||
seq_printf(m, "] %x %x%s%s%s",
|
||||
i915_gem_request_get_seqno(obj->last_write_req),
|
||||
i915_gem_request_get_seqno(obj->last_fenced_req),
|
||||
@ -184,7 +184,7 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
|
||||
}
|
||||
if (obj->last_write_req != NULL)
|
||||
seq_printf(m, " (%s)",
|
||||
i915_gem_request_get_ring(obj->last_write_req)->name);
|
||||
i915_gem_request_get_engine(obj->last_write_req)->name);
|
||||
if (obj->frontbuffer_bits)
|
||||
seq_printf(m, " (frontbuffer: 0x%03x)", obj->frontbuffer_bits);
|
||||
}
|
||||
@ -203,7 +203,7 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
|
||||
struct list_head *head;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct i915_address_space *vm = &dev_priv->gtt.base;
|
||||
struct i915_address_space *vm = &dev_priv->ggtt.base;
|
||||
struct i915_vma *vma;
|
||||
u64 total_obj_size, total_gtt_size;
|
||||
int count, ret;
|
||||
@ -397,15 +397,15 @@ static void print_batch_pool_stats(struct seq_file *m,
|
||||
{
|
||||
struct drm_i915_gem_object *obj;
|
||||
struct file_stats stats;
|
||||
struct intel_engine_cs *ring;
|
||||
int i, j;
|
||||
struct intel_engine_cs *engine;
|
||||
int j;
|
||||
|
||||
memset(&stats, 0, sizeof(stats));
|
||||
|
||||
for_each_ring(ring, dev_priv, i) {
|
||||
for (j = 0; j < ARRAY_SIZE(ring->batch_pool.cache_list); j++) {
|
||||
for_each_engine(engine, dev_priv) {
|
||||
for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
|
||||
list_for_each_entry(obj,
|
||||
&ring->batch_pool.cache_list[j],
|
||||
&engine->batch_pool.cache_list[j],
|
||||
batch_pool_link)
|
||||
per_file_stats(0, obj, &stats);
|
||||
}
|
||||
@ -433,7 +433,7 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
|
||||
u32 count, mappable_count, purgeable_count;
|
||||
u64 size, mappable_size, purgeable_size;
|
||||
struct drm_i915_gem_object *obj;
|
||||
struct i915_address_space *vm = &dev_priv->gtt.base;
|
||||
struct i915_address_space *vm = &dev_priv->ggtt.base;
|
||||
struct drm_file *file;
|
||||
struct i915_vma *vma;
|
||||
int ret;
|
||||
@ -492,8 +492,8 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
|
||||
count, size);
|
||||
|
||||
seq_printf(m, "%llu [%llu] gtt total\n",
|
||||
dev_priv->gtt.base.total,
|
||||
(u64)dev_priv->gtt.mappable_end - dev_priv->gtt.base.start);
|
||||
dev_priv->ggtt.base.total,
|
||||
(u64)dev_priv->ggtt.mappable_end - dev_priv->ggtt.base.start);
|
||||
|
||||
seq_putc(m, '\n');
|
||||
print_batch_pool_stats(m, dev_priv);
|
||||
@ -591,14 +591,13 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
|
||||
pipe, plane);
|
||||
}
|
||||
if (work->flip_queued_req) {
|
||||
struct intel_engine_cs *ring =
|
||||
i915_gem_request_get_ring(work->flip_queued_req);
|
||||
struct intel_engine_cs *engine = i915_gem_request_get_engine(work->flip_queued_req);
|
||||
|
||||
seq_printf(m, "Flip queued on %s at seqno %x, next seqno %x [current breadcrumb %x], completed? %d\n",
|
||||
ring->name,
|
||||
engine->name,
|
||||
i915_gem_request_get_seqno(work->flip_queued_req),
|
||||
dev_priv->next_seqno,
|
||||
ring->get_seqno(ring, true),
|
||||
engine->get_seqno(engine, true),
|
||||
i915_gem_request_completed(work->flip_queued_req, true));
|
||||
} else
|
||||
seq_printf(m, "Flip not associated with any ring\n");
|
||||
@ -637,28 +636,28 @@ static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_gem_object *obj;
|
||||
struct intel_engine_cs *ring;
|
||||
struct intel_engine_cs *engine;
|
||||
int total = 0;
|
||||
int ret, i, j;
|
||||
int ret, j;
|
||||
|
||||
ret = mutex_lock_interruptible(&dev->struct_mutex);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
for_each_ring(ring, dev_priv, i) {
|
||||
for (j = 0; j < ARRAY_SIZE(ring->batch_pool.cache_list); j++) {
|
||||
for_each_engine(engine, dev_priv) {
|
||||
for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
|
||||
int count;
|
||||
|
||||
count = 0;
|
||||
list_for_each_entry(obj,
|
||||
&ring->batch_pool.cache_list[j],
|
||||
&engine->batch_pool.cache_list[j],
|
||||
batch_pool_link)
|
||||
count++;
|
||||
seq_printf(m, "%s cache[%d]: %d objects\n",
|
||||
ring->name, j, count);
|
||||
engine->name, j, count);
|
||||
|
||||
list_for_each_entry(obj,
|
||||
&ring->batch_pool.cache_list[j],
|
||||
&engine->batch_pool.cache_list[j],
|
||||
batch_pool_link) {
|
||||
seq_puts(m, " ");
|
||||
describe_obj(m, obj);
|
||||
@ -681,26 +680,26 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
|
||||
struct drm_info_node *node = m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_engine_cs *ring;
|
||||
struct intel_engine_cs *engine;
|
||||
struct drm_i915_gem_request *req;
|
||||
int ret, any, i;
|
||||
int ret, any;
|
||||
|
||||
ret = mutex_lock_interruptible(&dev->struct_mutex);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
any = 0;
|
||||
for_each_ring(ring, dev_priv, i) {
|
||||
for_each_engine(engine, dev_priv) {
|
||||
int count;
|
||||
|
||||
count = 0;
|
||||
list_for_each_entry(req, &ring->request_list, list)
|
||||
list_for_each_entry(req, &engine->request_list, list)
|
||||
count++;
|
||||
if (count == 0)
|
||||
continue;
|
||||
|
||||
seq_printf(m, "%s requests: %d\n", ring->name, count);
|
||||
list_for_each_entry(req, &ring->request_list, list) {
|
||||
seq_printf(m, "%s requests: %d\n", engine->name, count);
|
||||
list_for_each_entry(req, &engine->request_list, list) {
|
||||
struct task_struct *task;
|
||||
|
||||
rcu_read_lock();
|
||||
@ -726,11 +725,11 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
|
||||
}
|
||||
|
||||
static void i915_ring_seqno_info(struct seq_file *m,
|
||||
struct intel_engine_cs *ring)
|
||||
struct intel_engine_cs *engine)
|
||||
{
|
||||
if (ring->get_seqno) {
|
||||
if (engine->get_seqno) {
|
||||
seq_printf(m, "Current sequence (%s): %x\n",
|
||||
ring->name, ring->get_seqno(ring, false));
|
||||
engine->name, engine->get_seqno(engine, false));
|
||||
}
|
||||
}
|
||||
|
||||
@ -739,16 +738,16 @@ static int i915_gem_seqno_info(struct seq_file *m, void *data)
|
||||
struct drm_info_node *node = m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_engine_cs *ring;
|
||||
int ret, i;
|
||||
struct intel_engine_cs *engine;
|
||||
int ret;
|
||||
|
||||
ret = mutex_lock_interruptible(&dev->struct_mutex);
|
||||
if (ret)
|
||||
return ret;
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
|
||||
for_each_ring(ring, dev_priv, i)
|
||||
i915_ring_seqno_info(m, ring);
|
||||
for_each_engine(engine, dev_priv)
|
||||
i915_ring_seqno_info(m, engine);
|
||||
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
@ -762,7 +761,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
|
||||
struct drm_info_node *node = m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_engine_cs *ring;
|
||||
struct intel_engine_cs *engine;
|
||||
int ret, i, pipe;
|
||||
|
||||
ret = mutex_lock_interruptible(&dev->struct_mutex);
|
||||
@ -934,13 +933,13 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
|
||||
seq_printf(m, "Graphics Interrupt mask: %08x\n",
|
||||
I915_READ(GTIMR));
|
||||
}
|
||||
for_each_ring(ring, dev_priv, i) {
|
||||
for_each_engine(engine, dev_priv) {
|
||||
if (INTEL_INFO(dev)->gen >= 6) {
|
||||
seq_printf(m,
|
||||
"Graphics Interrupt mask (%s): %08x\n",
|
||||
ring->name, I915_READ_IMR(ring));
|
||||
engine->name, I915_READ_IMR(engine));
|
||||
}
|
||||
i915_ring_seqno_info(m, ring);
|
||||
i915_ring_seqno_info(m, engine);
|
||||
}
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
@ -981,12 +980,12 @@ static int i915_hws_info(struct seq_file *m, void *data)
|
||||
struct drm_info_node *node = m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_engine_cs *ring;
|
||||
struct intel_engine_cs *engine;
|
||||
const u32 *hws;
|
||||
int i;
|
||||
|
||||
ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
|
||||
hws = ring->status_page.page_addr;
|
||||
engine = &dev_priv->engine[(uintptr_t)node->info_ent->data];
|
||||
hws = engine->status_page.page_addr;
|
||||
if (hws == NULL)
|
||||
return 0;
|
||||
|
||||
@ -1331,11 +1330,12 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
|
||||
struct drm_info_node *node = m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_engine_cs *ring;
|
||||
u64 acthd[I915_NUM_RINGS];
|
||||
u32 seqno[I915_NUM_RINGS];
|
||||
struct intel_engine_cs *engine;
|
||||
u64 acthd[I915_NUM_ENGINES];
|
||||
u32 seqno[I915_NUM_ENGINES];
|
||||
u32 instdone[I915_NUM_INSTDONE_REG];
|
||||
int i, j;
|
||||
enum intel_engine_id id;
|
||||
int j;
|
||||
|
||||
if (!i915.enable_hangcheck) {
|
||||
seq_printf(m, "Hangcheck disabled\n");
|
||||
@ -1344,9 +1344,9 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
|
||||
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
|
||||
for_each_ring(ring, dev_priv, i) {
|
||||
seqno[i] = ring->get_seqno(ring, false);
|
||||
acthd[i] = intel_ring_get_active_head(ring);
|
||||
for_each_engine_id(engine, dev_priv, id) {
|
||||
seqno[id] = engine->get_seqno(engine, false);
|
||||
acthd[id] = intel_ring_get_active_head(engine);
|
||||
}
|
||||
|
||||
i915_get_extra_instdone(dev, instdone);
|
||||
@ -1360,19 +1360,17 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
|
||||
} else
|
||||
seq_printf(m, "Hangcheck inactive\n");
|
||||
|
||||
for_each_ring(ring, dev_priv, i) {
|
||||
seq_printf(m, "%s:\n", ring->name);
|
||||
for_each_engine_id(engine, dev_priv, id) {
|
||||
seq_printf(m, "%s:\n", engine->name);
|
||||
seq_printf(m, "\tseqno = %x [current %x]\n",
|
||||
ring->hangcheck.seqno, seqno[i]);
|
||||
engine->hangcheck.seqno, seqno[id]);
|
||||
seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
|
||||
(long long)ring->hangcheck.acthd,
|
||||
(long long)acthd[i]);
|
||||
seq_printf(m, "\tmax ACTHD = 0x%08llx\n",
|
||||
(long long)ring->hangcheck.max_acthd);
|
||||
seq_printf(m, "\tscore = %d\n", ring->hangcheck.score);
|
||||
seq_printf(m, "\taction = %d\n", ring->hangcheck.action);
|
||||
(long long)engine->hangcheck.acthd,
|
||||
(long long)acthd[id]);
|
||||
seq_printf(m, "\tscore = %d\n", engine->hangcheck.score);
|
||||
seq_printf(m, "\taction = %d\n", engine->hangcheck.action);
|
||||
|
||||
if (ring->id == RCS) {
|
||||
if (engine->id == RCS) {
|
||||
seq_puts(m, "\tinstdone read =");
|
||||
|
||||
for (j = 0; j < I915_NUM_INSTDONE_REG; j++)
|
||||
@ -1382,7 +1380,7 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
|
||||
|
||||
for (j = 0; j < I915_NUM_INSTDONE_REG; j++)
|
||||
seq_printf(m, " 0x%08x",
|
||||
ring->hangcheck.instdone[j]);
|
||||
engine->hangcheck.instdone[j]);
|
||||
|
||||
seq_puts(m, "\n");
|
||||
}
|
||||
@ -1948,9 +1946,10 @@ static int i915_context_status(struct seq_file *m, void *unused)
|
||||
struct drm_info_node *node = m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_engine_cs *ring;
|
||||
struct intel_engine_cs *engine;
|
||||
struct intel_context *ctx;
|
||||
int ret, i;
|
||||
enum intel_engine_id id;
|
||||
int ret;
|
||||
|
||||
ret = mutex_lock_interruptible(&dev->struct_mutex);
|
||||
if (ret)
|
||||
@ -1968,13 +1967,13 @@ static int i915_context_status(struct seq_file *m, void *unused)
|
||||
|
||||
if (i915.enable_execlists) {
|
||||
seq_putc(m, '\n');
|
||||
for_each_ring(ring, dev_priv, i) {
|
||||
for_each_engine_id(engine, dev_priv, id) {
|
||||
struct drm_i915_gem_object *ctx_obj =
|
||||
ctx->engine[i].state;
|
||||
ctx->engine[id].state;
|
||||
struct intel_ringbuffer *ringbuf =
|
||||
ctx->engine[i].ringbuf;
|
||||
ctx->engine[id].ringbuf;
|
||||
|
||||
seq_printf(m, "%s: ", ring->name);
|
||||
seq_printf(m, "%s: ", engine->name);
|
||||
if (ctx_obj)
|
||||
describe_obj(m, ctx_obj);
|
||||
if (ringbuf)
|
||||
@ -1995,22 +1994,22 @@ static int i915_context_status(struct seq_file *m, void *unused)
|
||||
|
||||
static void i915_dump_lrc_obj(struct seq_file *m,
|
||||
struct intel_context *ctx,
|
||||
struct intel_engine_cs *ring)
|
||||
struct intel_engine_cs *engine)
|
||||
{
|
||||
struct page *page;
|
||||
uint32_t *reg_state;
|
||||
int j;
|
||||
struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state;
|
||||
struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state;
|
||||
unsigned long ggtt_offset = 0;
|
||||
|
||||
if (ctx_obj == NULL) {
|
||||
seq_printf(m, "Context on %s with no gem object\n",
|
||||
ring->name);
|
||||
engine->name);
|
||||
return;
|
||||
}
|
||||
|
||||
seq_printf(m, "CONTEXT: %s %u\n", ring->name,
|
||||
intel_execlists_ctx_id(ctx, ring));
|
||||
seq_printf(m, "CONTEXT: %s %u\n", engine->name,
|
||||
intel_execlists_ctx_id(ctx, engine));
|
||||
|
||||
if (!i915_gem_obj_ggtt_bound(ctx_obj))
|
||||
seq_puts(m, "\tNot bound in GGTT\n");
|
||||
@ -2043,9 +2042,9 @@ static int i915_dump_lrc(struct seq_file *m, void *unused)
|
||||
struct drm_info_node *node = (struct drm_info_node *) m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_engine_cs *ring;
|
||||
struct intel_engine_cs *engine;
|
||||
struct intel_context *ctx;
|
||||
int ret, i;
|
||||
int ret;
|
||||
|
||||
if (!i915.enable_execlists) {
|
||||
seq_printf(m, "Logical Ring Contexts are disabled\n");
|
||||
@ -2058,8 +2057,8 @@ static int i915_dump_lrc(struct seq_file *m, void *unused)
|
||||
|
||||
list_for_each_entry(ctx, &dev_priv->context_list, link)
|
||||
if (ctx != dev_priv->kernel_context)
|
||||
for_each_ring(ring, dev_priv, i)
|
||||
i915_dump_lrc_obj(m, ctx, ring);
|
||||
for_each_engine(engine, dev_priv)
|
||||
i915_dump_lrc_obj(m, ctx, engine);
|
||||
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
@ -2071,15 +2070,14 @@ static int i915_execlists(struct seq_file *m, void *data)
|
||||
struct drm_info_node *node = (struct drm_info_node *)m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_engine_cs *ring;
|
||||
struct intel_engine_cs *engine;
|
||||
u32 status_pointer;
|
||||
u8 read_pointer;
|
||||
u8 write_pointer;
|
||||
u32 status;
|
||||
u32 ctx_id;
|
||||
struct list_head *cursor;
|
||||
int ring_id, i;
|
||||
int ret;
|
||||
int i, ret;
|
||||
|
||||
if (!i915.enable_execlists) {
|
||||
seq_puts(m, "Logical Ring Contexts are disabled\n");
|
||||
@ -2092,22 +2090,22 @@ static int i915_execlists(struct seq_file *m, void *data)
|
||||
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
|
||||
for_each_ring(ring, dev_priv, ring_id) {
|
||||
for_each_engine(engine, dev_priv) {
|
||||
struct drm_i915_gem_request *head_req = NULL;
|
||||
int count = 0;
|
||||
unsigned long flags;
|
||||
|
||||
seq_printf(m, "%s\n", ring->name);
|
||||
seq_printf(m, "%s\n", engine->name);
|
||||
|
||||
status = I915_READ(RING_EXECLIST_STATUS_LO(ring));
|
||||
ctx_id = I915_READ(RING_EXECLIST_STATUS_HI(ring));
|
||||
status = I915_READ(RING_EXECLIST_STATUS_LO(engine));
|
||||
ctx_id = I915_READ(RING_EXECLIST_STATUS_HI(engine));
|
||||
seq_printf(m, "\tExeclist status: 0x%08X, context: %u\n",
|
||||
status, ctx_id);
|
||||
|
||||
status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(ring));
|
||||
status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(engine));
|
||||
seq_printf(m, "\tStatus pointer: 0x%08X\n", status_pointer);
|
||||
|
||||
read_pointer = ring->next_context_status_buffer;
|
||||
read_pointer = engine->next_context_status_buffer;
|
||||
write_pointer = GEN8_CSB_WRITE_PTR(status_pointer);
|
||||
if (read_pointer > write_pointer)
|
||||
write_pointer += GEN8_CSB_ENTRIES;
|
||||
@ -2115,24 +2113,25 @@ static int i915_execlists(struct seq_file *m, void *data)
|
||||
read_pointer, write_pointer);
|
||||
|
||||
for (i = 0; i < GEN8_CSB_ENTRIES; i++) {
|
||||
status = I915_READ(RING_CONTEXT_STATUS_BUF_LO(ring, i));
|
||||
ctx_id = I915_READ(RING_CONTEXT_STATUS_BUF_HI(ring, i));
|
||||
status = I915_READ(RING_CONTEXT_STATUS_BUF_LO(engine, i));
|
||||
ctx_id = I915_READ(RING_CONTEXT_STATUS_BUF_HI(engine, i));
|
||||
|
||||
seq_printf(m, "\tStatus buffer %d: 0x%08X, context: %u\n",
|
||||
i, status, ctx_id);
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&ring->execlist_lock, flags);
|
||||
list_for_each(cursor, &ring->execlist_queue)
|
||||
spin_lock_irqsave(&engine->execlist_lock, flags);
|
||||
list_for_each(cursor, &engine->execlist_queue)
|
||||
count++;
|
||||
head_req = list_first_entry_or_null(&ring->execlist_queue,
|
||||
struct drm_i915_gem_request, execlist_link);
|
||||
spin_unlock_irqrestore(&ring->execlist_lock, flags);
|
||||
head_req = list_first_entry_or_null(&engine->execlist_queue,
|
||||
struct drm_i915_gem_request,
|
||||
execlist_link);
|
||||
spin_unlock_irqrestore(&engine->execlist_lock, flags);
|
||||
|
||||
seq_printf(m, "\t%d requests in queue\n", count);
|
||||
if (head_req) {
|
||||
seq_printf(m, "\tHead request id: %u\n",
|
||||
intel_execlists_ctx_id(head_req->ctx, ring));
|
||||
intel_execlists_ctx_id(head_req->ctx, engine));
|
||||
seq_printf(m, "\tHead request tail: %u\n",
|
||||
head_req->tail);
|
||||
}
|
||||
@ -2248,19 +2247,19 @@ static int per_file_ctx(int id, void *ptr, void *data)
|
||||
static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_engine_cs *ring;
|
||||
struct intel_engine_cs *engine;
|
||||
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
|
||||
int unused, i;
|
||||
int i;
|
||||
|
||||
if (!ppgtt)
|
||||
return;
|
||||
|
||||
for_each_ring(ring, dev_priv, unused) {
|
||||
seq_printf(m, "%s\n", ring->name);
|
||||
for_each_engine(engine, dev_priv) {
|
||||
seq_printf(m, "%s\n", engine->name);
|
||||
for (i = 0; i < 4; i++) {
|
||||
u64 pdp = I915_READ(GEN8_RING_PDP_UDW(ring, i));
|
||||
u64 pdp = I915_READ(GEN8_RING_PDP_UDW(engine, i));
|
||||
pdp <<= 32;
|
||||
pdp |= I915_READ(GEN8_RING_PDP_LDW(ring, i));
|
||||
pdp |= I915_READ(GEN8_RING_PDP_LDW(engine, i));
|
||||
seq_printf(m, "\tPDP%d 0x%016llx\n", i, pdp);
|
||||
}
|
||||
}
|
||||
@ -2269,19 +2268,22 @@ static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev)
|
||||
static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_engine_cs *ring;
|
||||
int i;
|
||||
struct intel_engine_cs *engine;
|
||||
|
||||
if (INTEL_INFO(dev)->gen == 6)
|
||||
seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
|
||||
|
||||
for_each_ring(ring, dev_priv, i) {
|
||||
seq_printf(m, "%s\n", ring->name);
|
||||
for_each_engine(engine, dev_priv) {
|
||||
seq_printf(m, "%s\n", engine->name);
|
||||
if (INTEL_INFO(dev)->gen == 7)
|
||||
seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(RING_MODE_GEN7(ring)));
|
||||
seq_printf(m, "PP_DIR_BASE: 0x%08x\n", I915_READ(RING_PP_DIR_BASE(ring)));
|
||||
seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n", I915_READ(RING_PP_DIR_BASE_READ(ring)));
|
||||
seq_printf(m, "PP_DIR_DCLV: 0x%08x\n", I915_READ(RING_PP_DIR_DCLV(ring)));
|
||||
seq_printf(m, "GFX_MODE: 0x%08x\n",
|
||||
I915_READ(RING_MODE_GEN7(engine)));
|
||||
seq_printf(m, "PP_DIR_BASE: 0x%08x\n",
|
||||
I915_READ(RING_PP_DIR_BASE(engine)));
|
||||
seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n",
|
||||
I915_READ(RING_PP_DIR_BASE_READ(engine)));
|
||||
seq_printf(m, "PP_DIR_DCLV: 0x%08x\n",
|
||||
I915_READ(RING_PP_DIR_DCLV(engine)));
|
||||
}
|
||||
if (dev_priv->mm.aliasing_ppgtt) {
|
||||
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
|
||||
@ -2336,12 +2338,11 @@ out_put:
|
||||
|
||||
static int count_irq_waiters(struct drm_i915_private *i915)
|
||||
{
|
||||
struct intel_engine_cs *ring;
|
||||
struct intel_engine_cs *engine;
|
||||
int count = 0;
|
||||
int i;
|
||||
|
||||
for_each_ring(ring, i915, i)
|
||||
count += ring->irq_refcount;
|
||||
for_each_engine(engine, i915)
|
||||
count += engine->irq_refcount;
|
||||
|
||||
return count;
|
||||
}
|
||||
@ -2449,9 +2450,8 @@ static void i915_guc_client_info(struct seq_file *m,
|
||||
struct drm_i915_private *dev_priv,
|
||||
struct i915_guc_client *client)
|
||||
{
|
||||
struct intel_engine_cs *ring;
|
||||
struct intel_engine_cs *engine;
|
||||
uint64_t tot = 0;
|
||||
uint32_t i;
|
||||
|
||||
seq_printf(m, "\tPriority %d, GuC ctx index: %u, PD offset 0x%x\n",
|
||||
client->priority, client->ctx_index, client->proc_desc_offset);
|
||||
@ -2464,11 +2464,11 @@ static void i915_guc_client_info(struct seq_file *m,
|
||||
seq_printf(m, "\tFailed doorbell: %u\n", client->b_fail);
|
||||
seq_printf(m, "\tLast submission result: %d\n", client->retcode);
|
||||
|
||||
for_each_ring(ring, dev_priv, i) {
|
||||
for_each_engine(engine, dev_priv) {
|
||||
seq_printf(m, "\tSubmissions: %llu %s\n",
|
||||
client->submissions[ring->guc_id],
|
||||
ring->name);
|
||||
tot += client->submissions[ring->guc_id];
|
||||
client->submissions[engine->guc_id],
|
||||
engine->name);
|
||||
tot += client->submissions[engine->guc_id];
|
||||
}
|
||||
seq_printf(m, "\tTotal: %llu\n", tot);
|
||||
}
|
||||
@ -2480,8 +2480,7 @@ static int i915_guc_info(struct seq_file *m, void *data)
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_guc guc;
|
||||
struct i915_guc_client client = {};
|
||||
struct intel_engine_cs *ring;
|
||||
enum intel_ring_id i;
|
||||
struct intel_engine_cs *engine;
|
||||
u64 total = 0;
|
||||
|
||||
if (!HAS_GUC_SCHED(dev_priv->dev))
|
||||
@ -2504,11 +2503,11 @@ static int i915_guc_info(struct seq_file *m, void *data)
|
||||
seq_printf(m, "GuC last action error code: %d\n", guc.action_err);
|
||||
|
||||
seq_printf(m, "\nGuC submissions:\n");
|
||||
for_each_ring(ring, dev_priv, i) {
|
||||
for_each_engine(engine, dev_priv) {
|
||||
seq_printf(m, "\t%-24s: %10llu, last seqno 0x%08x\n",
|
||||
ring->name, guc.submissions[ring->guc_id],
|
||||
guc.last_seqno[ring->guc_id]);
|
||||
total += guc.submissions[ring->guc_id];
|
||||
engine->name, guc.submissions[engine->guc_id],
|
||||
guc.last_seqno[engine->guc_id]);
|
||||
total += guc.submissions[engine->guc_id];
|
||||
}
|
||||
seq_printf(m, "\t%s: %llu\n", "Total", total);
|
||||
|
||||
@ -3130,9 +3129,10 @@ static int i915_semaphore_status(struct seq_file *m, void *unused)
|
||||
struct drm_info_node *node = (struct drm_info_node *) m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_engine_cs *ring;
|
||||
struct intel_engine_cs *engine;
|
||||
int num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
|
||||
int i, j, ret;
|
||||
enum intel_engine_id id;
|
||||
int j, ret;
|
||||
|
||||
if (!i915_semaphore_is_enabled(dev)) {
|
||||
seq_puts(m, "Semaphores are disabled\n");
|
||||
@ -3151,14 +3151,14 @@ static int i915_semaphore_status(struct seq_file *m, void *unused)
|
||||
page = i915_gem_object_get_page(dev_priv->semaphore_obj, 0);
|
||||
|
||||
seqno = (uint64_t *)kmap_atomic(page);
|
||||
for_each_ring(ring, dev_priv, i) {
|
||||
for_each_engine_id(engine, dev_priv, id) {
|
||||
uint64_t offset;
|
||||
|
||||
seq_printf(m, "%s\n", ring->name);
|
||||
seq_printf(m, "%s\n", engine->name);
|
||||
|
||||
seq_puts(m, " Last signal:");
|
||||
for (j = 0; j < num_rings; j++) {
|
||||
offset = i * I915_NUM_RINGS + j;
|
||||
offset = id * I915_NUM_ENGINES + j;
|
||||
seq_printf(m, "0x%08llx (0x%02llx) ",
|
||||
seqno[offset], offset * 8);
|
||||
}
|
||||
@ -3166,7 +3166,7 @@ static int i915_semaphore_status(struct seq_file *m, void *unused)
|
||||
|
||||
seq_puts(m, " Last wait: ");
|
||||
for (j = 0; j < num_rings; j++) {
|
||||
offset = i + (j * I915_NUM_RINGS);
|
||||
offset = id + (j * I915_NUM_ENGINES);
|
||||
seq_printf(m, "0x%08llx (0x%02llx) ",
|
||||
seqno[offset], offset * 8);
|
||||
}
|
||||
@ -3176,18 +3176,18 @@ static int i915_semaphore_status(struct seq_file *m, void *unused)
|
||||
kunmap_atomic(seqno);
|
||||
} else {
|
||||
seq_puts(m, " Last signal:");
|
||||
for_each_ring(ring, dev_priv, i)
|
||||
for_each_engine(engine, dev_priv)
|
||||
for (j = 0; j < num_rings; j++)
|
||||
seq_printf(m, "0x%08x\n",
|
||||
I915_READ(ring->semaphore.mbox.signal[j]));
|
||||
I915_READ(engine->semaphore.mbox.signal[j]));
|
||||
seq_putc(m, '\n');
|
||||
}
|
||||
|
||||
seq_puts(m, "\nSync seqno:\n");
|
||||
for_each_ring(ring, dev_priv, i) {
|
||||
for (j = 0; j < num_rings; j++) {
|
||||
seq_printf(m, " 0x%08x ", ring->semaphore.sync_seqno[j]);
|
||||
}
|
||||
for_each_engine(engine, dev_priv) {
|
||||
for (j = 0; j < num_rings; j++)
|
||||
seq_printf(m, " 0x%08x ",
|
||||
engine->semaphore.sync_seqno[j]);
|
||||
seq_putc(m, '\n');
|
||||
}
|
||||
seq_putc(m, '\n');
|
||||
@ -3209,8 +3209,8 @@ static int i915_shared_dplls_info(struct seq_file *m, void *unused)
|
||||
struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
|
||||
|
||||
seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->name, pll->id);
|
||||
seq_printf(m, " crtc_mask: 0x%08x, active: %d, on: %s\n",
|
||||
pll->config.crtc_mask, pll->active, yesno(pll->on));
|
||||
seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n",
|
||||
pll->config.crtc_mask, pll->active_mask, yesno(pll->on));
|
||||
seq_printf(m, " tracked hardware state:\n");
|
||||
seq_printf(m, " dpll: 0x%08x\n", pll->config.hw_state.dpll);
|
||||
seq_printf(m, " dpll_md: 0x%08x\n",
|
||||
@ -3228,11 +3228,12 @@ static int i915_wa_registers(struct seq_file *m, void *unused)
|
||||
{
|
||||
int i;
|
||||
int ret;
|
||||
struct intel_engine_cs *ring;
|
||||
struct intel_engine_cs *engine;
|
||||
struct drm_info_node *node = (struct drm_info_node *) m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct i915_workarounds *workarounds = &dev_priv->workarounds;
|
||||
enum intel_engine_id id;
|
||||
|
||||
ret = mutex_lock_interruptible(&dev->struct_mutex);
|
||||
if (ret)
|
||||
@ -3241,9 +3242,9 @@ static int i915_wa_registers(struct seq_file *m, void *unused)
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
|
||||
seq_printf(m, "Workarounds applied: %d\n", workarounds->count);
|
||||
for_each_ring(ring, dev_priv, i)
|
||||
for_each_engine_id(engine, dev_priv, id)
|
||||
seq_printf(m, "HW whitelist count for %s: %d\n",
|
||||
ring->name, workarounds->hw_whitelist_count[i]);
|
||||
engine->name, workarounds->hw_whitelist_count[id]);
|
||||
for (i = 0; i < workarounds->count; ++i) {
|
||||
i915_reg_t addr;
|
||||
u32 mask, value, read;
|
||||
|
@ -50,6 +50,66 @@
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/oom.h>
|
||||
|
||||
static unsigned int i915_load_fail_count;
|
||||
|
||||
bool __i915_inject_load_failure(const char *func, int line)
|
||||
{
|
||||
if (i915_load_fail_count >= i915.inject_load_failure)
|
||||
return false;
|
||||
|
||||
if (++i915_load_fail_count == i915.inject_load_failure) {
|
||||
DRM_INFO("Injecting failure at checkpoint %u [%s:%d]\n",
|
||||
i915.inject_load_failure, func, line);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
#define FDO_BUG_URL "https://bugs.freedesktop.org/enter_bug.cgi?product=DRI"
|
||||
#define FDO_BUG_MSG "Please file a bug at " FDO_BUG_URL " against DRM/Intel " \
|
||||
"providing the dmesg log by booting with drm.debug=0xf"
|
||||
|
||||
void
|
||||
__i915_printk(struct drm_i915_private *dev_priv, const char *level,
|
||||
const char *fmt, ...)
|
||||
{
|
||||
static bool shown_bug_once;
|
||||
struct device *dev = dev_priv->dev->dev;
|
||||
bool is_error = level[1] <= KERN_ERR[1];
|
||||
bool is_debug = level[1] == KERN_DEBUG[1];
|
||||
struct va_format vaf;
|
||||
va_list args;
|
||||
|
||||
if (is_debug && !(drm_debug & DRM_UT_DRIVER))
|
||||
return;
|
||||
|
||||
va_start(args, fmt);
|
||||
|
||||
vaf.fmt = fmt;
|
||||
vaf.va = &args;
|
||||
|
||||
dev_printk(level, dev, "[" DRM_NAME ":%ps] %pV",
|
||||
__builtin_return_address(0), &vaf);
|
||||
|
||||
if (is_error && !shown_bug_once) {
|
||||
dev_notice(dev, "%s", FDO_BUG_MSG);
|
||||
shown_bug_once = true;
|
||||
}
|
||||
|
||||
va_end(args);
|
||||
}
|
||||
|
||||
static bool i915_error_injected(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
return i915.inject_load_failure &&
|
||||
i915_load_fail_count == i915.inject_load_failure;
|
||||
}
|
||||
|
||||
#define i915_load_error(dev_priv, fmt, ...) \
|
||||
__i915_printk(dev_priv, \
|
||||
i915_error_injected(dev_priv) ? KERN_DEBUG : KERN_ERR, \
|
||||
fmt, ##__VA_ARGS__)
|
||||
|
||||
static int i915_getparam(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
@ -87,16 +147,16 @@ static int i915_getparam(struct drm_device *dev, void *data,
|
||||
value = 1;
|
||||
break;
|
||||
case I915_PARAM_HAS_BSD:
|
||||
value = intel_ring_initialized(&dev_priv->ring[VCS]);
|
||||
value = intel_engine_initialized(&dev_priv->engine[VCS]);
|
||||
break;
|
||||
case I915_PARAM_HAS_BLT:
|
||||
value = intel_ring_initialized(&dev_priv->ring[BCS]);
|
||||
value = intel_engine_initialized(&dev_priv->engine[BCS]);
|
||||
break;
|
||||
case I915_PARAM_HAS_VEBOX:
|
||||
value = intel_ring_initialized(&dev_priv->ring[VECS]);
|
||||
value = intel_engine_initialized(&dev_priv->engine[VECS]);
|
||||
break;
|
||||
case I915_PARAM_HAS_BSD2:
|
||||
value = intel_ring_initialized(&dev_priv->ring[VCS2]);
|
||||
value = intel_engine_initialized(&dev_priv->engine[VCS2]);
|
||||
break;
|
||||
case I915_PARAM_HAS_RELAXED_FENCING:
|
||||
value = 1;
|
||||
@ -370,6 +430,9 @@ static int i915_load_modeset_init(struct drm_device *dev)
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int ret;
|
||||
|
||||
if (i915_inject_load_failure())
|
||||
return -ENODEV;
|
||||
|
||||
ret = intel_bios_init(dev_priv);
|
||||
if (ret)
|
||||
DRM_INFO("failed to find VBIOS tables\n");
|
||||
@ -430,11 +493,9 @@ static int i915_load_modeset_init(struct drm_device *dev)
|
||||
* Some ports require correctly set-up hpd registers for detection to
|
||||
* work properly (leading to ghost connected connector status), e.g. VGA
|
||||
* on gm45. Hence we can only set up the initial fbdev config after hpd
|
||||
* irqs are fully enabled. Now we should scan for the initial config
|
||||
* only once hotplug handling is enabled, but due to screwed-up locking
|
||||
* around kms/fbdev init we can't protect the fdbev initial config
|
||||
* scanning against hotplug events. Hence do this first and ignore the
|
||||
* tiny window where we will loose hotplug notifactions.
|
||||
* irqs are fully enabled. We protect the fbdev initial config scanning
|
||||
* against hotplug events by waiting in intel_fbdev_output_poll_changed
|
||||
* until the asynchronous thread has finished.
|
||||
*/
|
||||
intel_fbdev_initial_config_async(dev);
|
||||
|
||||
@ -444,7 +505,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
|
||||
|
||||
cleanup_gem:
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
i915_gem_cleanup_ringbuffer(dev);
|
||||
i915_gem_cleanup_engines(dev);
|
||||
i915_gem_context_fini(dev);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
cleanup_irq:
|
||||
@ -453,6 +514,7 @@ cleanup_irq:
|
||||
intel_teardown_gmbus(dev);
|
||||
cleanup_csr:
|
||||
intel_csr_ucode_fini(dev_priv);
|
||||
intel_power_domains_fini(dev_priv);
|
||||
vga_switcheroo_unregister_client(dev->pdev);
|
||||
cleanup_vga_client:
|
||||
vga_client_register(dev->pdev, NULL, NULL, NULL);
|
||||
@ -472,8 +534,8 @@ static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
|
||||
if (!ap)
|
||||
return -ENOMEM;
|
||||
|
||||
ap->ranges[0].base = dev_priv->gtt.mappable_base;
|
||||
ap->ranges[0].size = dev_priv->gtt.mappable_end;
|
||||
ap->ranges[0].base = dev_priv->ggtt.mappable_base;
|
||||
ap->ranges[0].size = dev_priv->ggtt.mappable_end;
|
||||
|
||||
primary =
|
||||
pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
|
||||
@ -853,6 +915,10 @@ static void intel_device_info_runtime_init(struct drm_device *dev)
|
||||
else if (INTEL_INFO(dev)->gen >= 9)
|
||||
gen9_sseu_info_init(dev);
|
||||
|
||||
/* Snooping is broken on BXT A stepping. */
|
||||
info->has_snoop = !info->has_llc;
|
||||
info->has_snoop &= !IS_BXT_REVID(dev, 0, BXT_REVID_A1);
|
||||
|
||||
DRM_DEBUG_DRIVER("slice total: %u\n", info->slice_total);
|
||||
DRM_DEBUG_DRIVER("subslice total: %u\n", info->subslice_total);
|
||||
DRM_DEBUG_DRIVER("subslice per slice: %u\n", info->subslice_per_slice);
|
||||
@ -929,6 +995,84 @@ static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv)
|
||||
destroy_workqueue(dev_priv->wq);
|
||||
}
|
||||
|
||||
/**
|
||||
* i915_driver_init_early - setup state not requiring device access
|
||||
* @dev_priv: device private
|
||||
*
|
||||
* Initialize everything that is a "SW-only" state, that is state not
|
||||
* requiring accessing the device or exposing the driver via kernel internal
|
||||
* or userspace interfaces. Example steps belonging here: lock initialization,
|
||||
* system memory allocation, setting up device specific attributes and
|
||||
* function hooks not requiring accessing the device.
|
||||
*/
|
||||
static int i915_driver_init_early(struct drm_i915_private *dev_priv,
|
||||
struct drm_device *dev,
|
||||
struct intel_device_info *info)
|
||||
{
|
||||
struct intel_device_info *device_info;
|
||||
int ret = 0;
|
||||
|
||||
if (i915_inject_load_failure())
|
||||
return -ENODEV;
|
||||
|
||||
/* Setup the write-once "constant" device info */
|
||||
device_info = (struct intel_device_info *)&dev_priv->info;
|
||||
memcpy(device_info, info, sizeof(dev_priv->info));
|
||||
device_info->device_id = dev->pdev->device;
|
||||
|
||||
spin_lock_init(&dev_priv->irq_lock);
|
||||
spin_lock_init(&dev_priv->gpu_error.lock);
|
||||
mutex_init(&dev_priv->backlight_lock);
|
||||
spin_lock_init(&dev_priv->uncore.lock);
|
||||
spin_lock_init(&dev_priv->mm.object_stat_lock);
|
||||
spin_lock_init(&dev_priv->mmio_flip_lock);
|
||||
mutex_init(&dev_priv->sb_lock);
|
||||
mutex_init(&dev_priv->modeset_restore_lock);
|
||||
mutex_init(&dev_priv->av_mutex);
|
||||
mutex_init(&dev_priv->wm.wm_mutex);
|
||||
mutex_init(&dev_priv->pps_mutex);
|
||||
|
||||
ret = i915_workqueues_init(dev_priv);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
/* This must be called before any calls to HAS_PCH_* */
|
||||
intel_detect_pch(dev);
|
||||
|
||||
intel_pm_setup(dev);
|
||||
intel_init_dpio(dev_priv);
|
||||
intel_power_domains_init(dev_priv);
|
||||
intel_irq_init(dev_priv);
|
||||
intel_init_display_hooks(dev_priv);
|
||||
intel_init_clock_gating_hooks(dev_priv);
|
||||
intel_init_audio_hooks(dev_priv);
|
||||
i915_gem_load_init(dev);
|
||||
|
||||
intel_display_crc_init(dev);
|
||||
|
||||
i915_dump_device_info(dev_priv);
|
||||
|
||||
/* Not all pre-production machines fall into this category, only the
|
||||
* very first ones. Almost everything should work, except for maybe
|
||||
* suspend/resume. And we don't implement workarounds that affect only
|
||||
* pre-production machines. */
|
||||
if (IS_HSW_EARLY_SDV(dev))
|
||||
DRM_INFO("This is an early pre-production Haswell machine. "
|
||||
"It may not be fully functional.\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* i915_driver_cleanup_early - cleanup the setup done in i915_driver_init_early()
|
||||
* @dev_priv: device private
|
||||
*/
|
||||
static void i915_driver_cleanup_early(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
i915_gem_load_cleanup(dev_priv->dev);
|
||||
i915_workqueues_cleanup(dev_priv);
|
||||
}
|
||||
|
||||
static int i915_mmio_setup(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
@ -970,84 +1114,73 @@ static void i915_mmio_cleanup(struct drm_device *dev)
|
||||
}
|
||||
|
||||
/**
|
||||
* i915_driver_load - setup chip and create an initial config
|
||||
* @dev: DRM device
|
||||
* @flags: startup flags
|
||||
* i915_driver_init_mmio - setup device MMIO
|
||||
* @dev_priv: device private
|
||||
*
|
||||
* The driver load routine has to do several things:
|
||||
* - drive output discovery via intel_modeset_init()
|
||||
* - initialize the memory manager
|
||||
* - allocate initial config memory
|
||||
* - setup the DRM framebuffer with the allocated memory
|
||||
* Setup minimal device state necessary for MMIO accesses later in the
|
||||
* initialization sequence. The setup here should avoid any other device-wide
|
||||
* side effects or exposing the driver via kernel internal or user space
|
||||
* interfaces.
|
||||
*/
|
||||
int i915_driver_load(struct drm_device *dev, unsigned long flags)
|
||||
static int i915_driver_init_mmio(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_i915_private *dev_priv;
|
||||
struct intel_device_info *info, *device_info;
|
||||
int ret = 0;
|
||||
uint32_t aperture_size;
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
int ret;
|
||||
|
||||
info = (struct intel_device_info *) flags;
|
||||
if (i915_inject_load_failure())
|
||||
return -ENODEV;
|
||||
|
||||
dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
|
||||
if (dev_priv == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
dev->dev_private = dev_priv;
|
||||
dev_priv->dev = dev;
|
||||
|
||||
/* Setup the write-once "constant" device info */
|
||||
device_info = (struct intel_device_info *)&dev_priv->info;
|
||||
memcpy(device_info, info, sizeof(dev_priv->info));
|
||||
device_info->device_id = dev->pdev->device;
|
||||
|
||||
spin_lock_init(&dev_priv->irq_lock);
|
||||
spin_lock_init(&dev_priv->gpu_error.lock);
|
||||
mutex_init(&dev_priv->backlight_lock);
|
||||
spin_lock_init(&dev_priv->uncore.lock);
|
||||
spin_lock_init(&dev_priv->mm.object_stat_lock);
|
||||
spin_lock_init(&dev_priv->mmio_flip_lock);
|
||||
mutex_init(&dev_priv->sb_lock);
|
||||
mutex_init(&dev_priv->modeset_restore_lock);
|
||||
mutex_init(&dev_priv->av_mutex);
|
||||
|
||||
ret = i915_workqueues_init(dev_priv);
|
||||
if (ret < 0)
|
||||
goto out_free_priv;
|
||||
|
||||
intel_pm_setup(dev);
|
||||
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
|
||||
intel_display_crc_init(dev);
|
||||
|
||||
i915_dump_device_info(dev_priv);
|
||||
|
||||
/* Not all pre-production machines fall into this category, only the
|
||||
* very first ones. Almost everything should work, except for maybe
|
||||
* suspend/resume. And we don't implement workarounds that affect only
|
||||
* pre-production machines. */
|
||||
if (IS_HSW_EARLY_SDV(dev))
|
||||
DRM_INFO("This is an early pre-production Haswell machine. "
|
||||
"It may not be fully functional.\n");
|
||||
|
||||
if (i915_get_bridge_dev(dev)) {
|
||||
ret = -EIO;
|
||||
goto out_runtime_pm_put;
|
||||
}
|
||||
if (i915_get_bridge_dev(dev))
|
||||
return -EIO;
|
||||
|
||||
ret = i915_mmio_setup(dev);
|
||||
if (ret < 0)
|
||||
goto put_bridge;
|
||||
|
||||
/* This must be called before any calls to HAS_PCH_* */
|
||||
intel_detect_pch(dev);
|
||||
|
||||
intel_uncore_init(dev);
|
||||
|
||||
return 0;
|
||||
|
||||
put_bridge:
|
||||
pci_dev_put(dev_priv->bridge_dev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* i915_driver_cleanup_mmio - cleanup the setup done in i915_driver_init_mmio()
|
||||
* @dev_priv: device private
|
||||
*/
|
||||
static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
|
||||
intel_uncore_fini(dev);
|
||||
i915_mmio_cleanup(dev);
|
||||
pci_dev_put(dev_priv->bridge_dev);
|
||||
}
|
||||
|
||||
/**
|
||||
* i915_driver_init_hw - setup state requiring device access
|
||||
* @dev_priv: device private
|
||||
*
|
||||
* Setup state that requires accessing the device, but doesn't require
|
||||
* exposing the driver via kernel internal or userspace interfaces.
|
||||
*/
|
||||
static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
uint32_t aperture_size;
|
||||
int ret;
|
||||
|
||||
if (i915_inject_load_failure())
|
||||
return -ENODEV;
|
||||
|
||||
intel_device_info_runtime_init(dev);
|
||||
|
||||
ret = i915_gem_gtt_init(dev);
|
||||
if (ret)
|
||||
goto out_uncore_fini;
|
||||
return ret;
|
||||
|
||||
/* WARNING: Apparently we must kick fbdev drivers before vgacon,
|
||||
* otherwise the vga fbdev driver falls over. */
|
||||
@ -1080,26 +1213,27 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
|
||||
if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
|
||||
dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));
|
||||
|
||||
aperture_size = dev_priv->gtt.mappable_end;
|
||||
aperture_size = dev_priv->ggtt.mappable_end;
|
||||
|
||||
dev_priv->gtt.mappable =
|
||||
io_mapping_create_wc(dev_priv->gtt.mappable_base,
|
||||
dev_priv->ggtt.mappable =
|
||||
io_mapping_create_wc(dev_priv->ggtt.mappable_base,
|
||||
aperture_size);
|
||||
if (dev_priv->gtt.mappable == NULL) {
|
||||
if (dev_priv->ggtt.mappable == NULL) {
|
||||
ret = -EIO;
|
||||
goto out_gtt;
|
||||
}
|
||||
|
||||
dev_priv->gtt.mtrr = arch_phys_wc_add(dev_priv->gtt.mappable_base,
|
||||
dev_priv->ggtt.mtrr = arch_phys_wc_add(dev_priv->ggtt.mappable_base,
|
||||
aperture_size);
|
||||
|
||||
intel_irq_init(dev_priv);
|
||||
pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY,
|
||||
PM_QOS_DEFAULT_VALUE);
|
||||
|
||||
intel_uncore_sanitize(dev);
|
||||
|
||||
intel_opregion_setup(dev);
|
||||
|
||||
i915_gem_load_init(dev);
|
||||
i915_gem_shrinker_init(dev_priv);
|
||||
i915_gem_load_init_fences(dev_priv);
|
||||
|
||||
/* On the 945G/GM, the chipset reports the MSI capability on the
|
||||
* integrated graphics even though the support isn't actually there
|
||||
@ -1117,24 +1251,43 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
|
||||
DRM_DEBUG_DRIVER("can't enable MSI");
|
||||
}
|
||||
|
||||
intel_device_info_runtime_init(dev);
|
||||
return 0;
|
||||
|
||||
intel_init_dpio(dev_priv);
|
||||
out_gtt:
|
||||
i915_global_gtt_cleanup(dev);
|
||||
|
||||
if (INTEL_INFO(dev)->num_pipes) {
|
||||
ret = drm_vblank_init(dev, INTEL_INFO(dev)->num_pipes);
|
||||
if (ret)
|
||||
goto out_gem_unload;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
intel_power_domains_init(dev_priv);
|
||||
/**
|
||||
* i915_driver_cleanup_hw - cleanup the setup done in i915_driver_init_hw()
|
||||
* @dev_priv: device private
|
||||
*/
|
||||
static void i915_driver_cleanup_hw(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
|
||||
ret = i915_load_modeset_init(dev);
|
||||
if (ret < 0) {
|
||||
DRM_ERROR("failed to init modeset\n");
|
||||
goto out_power_well;
|
||||
}
|
||||
if (dev->pdev->msi_enabled)
|
||||
pci_disable_msi(dev->pdev);
|
||||
|
||||
pm_qos_remove_request(&dev_priv->pm_qos);
|
||||
arch_phys_wc_del(dev_priv->ggtt.mtrr);
|
||||
io_mapping_free(dev_priv->ggtt.mappable);
|
||||
i915_global_gtt_cleanup(dev);
|
||||
}
|
||||
|
||||
/**
|
||||
* i915_driver_register - register the driver with the rest of the system
|
||||
* @dev_priv: device private
|
||||
*
|
||||
* Perform any steps necessary to make the driver available via kernel
|
||||
* internal or userspace interfaces.
|
||||
*/
|
||||
static void i915_driver_register(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
|
||||
i915_gem_shrinker_init(dev_priv);
|
||||
/*
|
||||
* Notify a valid surface after modesetting,
|
||||
* when running inside a VM.
|
||||
@ -1144,48 +1297,107 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
|
||||
|
||||
i915_setup_sysfs(dev);
|
||||
|
||||
if (INTEL_INFO(dev)->num_pipes) {
|
||||
if (INTEL_INFO(dev_priv)->num_pipes) {
|
||||
/* Must be done after probing outputs */
|
||||
intel_opregion_init(dev);
|
||||
acpi_video_register();
|
||||
}
|
||||
|
||||
if (IS_GEN5(dev))
|
||||
if (IS_GEN5(dev_priv))
|
||||
intel_gpu_ips_init(dev_priv);
|
||||
|
||||
intel_runtime_pm_enable(dev_priv);
|
||||
|
||||
i915_audio_component_init(dev_priv);
|
||||
}
|
||||
|
||||
/**
|
||||
* i915_driver_unregister - cleanup the registration done in i915_driver_regiser()
|
||||
* @dev_priv: device private
|
||||
*/
|
||||
static void i915_driver_unregister(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
i915_audio_component_cleanup(dev_priv);
|
||||
intel_gpu_ips_teardown();
|
||||
acpi_video_unregister();
|
||||
intel_opregion_fini(dev_priv->dev);
|
||||
i915_teardown_sysfs(dev_priv->dev);
|
||||
i915_gem_shrinker_cleanup(dev_priv);
|
||||
}
|
||||
|
||||
/**
|
||||
* i915_driver_load - setup chip and create an initial config
|
||||
* @dev: DRM device
|
||||
* @flags: startup flags
|
||||
*
|
||||
* The driver load routine has to do several things:
|
||||
* - drive output discovery via intel_modeset_init()
|
||||
* - initialize the memory manager
|
||||
* - allocate initial config memory
|
||||
* - setup the DRM framebuffer with the allocated memory
|
||||
*/
|
||||
int i915_driver_load(struct drm_device *dev, unsigned long flags)
|
||||
{
|
||||
struct drm_i915_private *dev_priv;
|
||||
int ret = 0;
|
||||
|
||||
dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
|
||||
if (dev_priv == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
dev->dev_private = dev_priv;
|
||||
/* Must be set before calling __i915_printk */
|
||||
dev_priv->dev = dev;
|
||||
|
||||
ret = i915_driver_init_early(dev_priv, dev,
|
||||
(struct intel_device_info *)flags);
|
||||
|
||||
if (ret < 0)
|
||||
goto out_free_priv;
|
||||
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
|
||||
ret = i915_driver_init_mmio(dev_priv);
|
||||
if (ret < 0)
|
||||
goto out_runtime_pm_put;
|
||||
|
||||
ret = i915_driver_init_hw(dev_priv);
|
||||
if (ret < 0)
|
||||
goto out_cleanup_mmio;
|
||||
|
||||
/*
|
||||
* TODO: move the vblank init and parts of modeset init steps into one
|
||||
* of the i915_driver_init_/i915_driver_register functions according
|
||||
* to the role/effect of the given init step.
|
||||
*/
|
||||
if (INTEL_INFO(dev)->num_pipes) {
|
||||
ret = drm_vblank_init(dev, INTEL_INFO(dev)->num_pipes);
|
||||
if (ret)
|
||||
goto out_cleanup_hw;
|
||||
}
|
||||
|
||||
ret = i915_load_modeset_init(dev);
|
||||
if (ret < 0)
|
||||
goto out_cleanup_vblank;
|
||||
|
||||
i915_driver_register(dev_priv);
|
||||
|
||||
intel_runtime_pm_enable(dev_priv);
|
||||
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
|
||||
return 0;
|
||||
|
||||
out_power_well:
|
||||
intel_power_domains_fini(dev_priv);
|
||||
out_cleanup_vblank:
|
||||
drm_vblank_cleanup(dev);
|
||||
out_gem_unload:
|
||||
i915_gem_shrinker_cleanup(dev_priv);
|
||||
|
||||
if (dev->pdev->msi_enabled)
|
||||
pci_disable_msi(dev->pdev);
|
||||
|
||||
intel_teardown_mchbar(dev);
|
||||
pm_qos_remove_request(&dev_priv->pm_qos);
|
||||
arch_phys_wc_del(dev_priv->gtt.mtrr);
|
||||
io_mapping_free(dev_priv->gtt.mappable);
|
||||
out_gtt:
|
||||
i915_global_gtt_cleanup(dev);
|
||||
out_uncore_fini:
|
||||
intel_uncore_fini(dev);
|
||||
i915_mmio_cleanup(dev);
|
||||
put_bridge:
|
||||
pci_dev_put(dev_priv->bridge_dev);
|
||||
i915_gem_load_cleanup(dev);
|
||||
out_cleanup_hw:
|
||||
i915_driver_cleanup_hw(dev_priv);
|
||||
out_cleanup_mmio:
|
||||
i915_driver_cleanup_mmio(dev_priv);
|
||||
out_runtime_pm_put:
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
i915_workqueues_cleanup(dev_priv);
|
||||
i915_driver_cleanup_early(dev_priv);
|
||||
out_free_priv:
|
||||
i915_load_error(dev_priv, "Device initialization failed (%d)\n", ret);
|
||||
|
||||
kfree(dev_priv);
|
||||
|
||||
return ret;
|
||||
@ -1198,26 +1410,15 @@ int i915_driver_unload(struct drm_device *dev)
|
||||
|
||||
intel_fbdev_fini(dev);
|
||||
|
||||
i915_audio_component_cleanup(dev_priv);
|
||||
|
||||
ret = i915_gem_suspend(dev);
|
||||
if (ret) {
|
||||
DRM_ERROR("failed to idle hardware: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
intel_power_domains_fini(dev_priv);
|
||||
intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
|
||||
|
||||
intel_gpu_ips_teardown();
|
||||
|
||||
i915_teardown_sysfs(dev);
|
||||
|
||||
i915_gem_shrinker_cleanup(dev_priv);
|
||||
|
||||
io_mapping_free(dev_priv->gtt.mappable);
|
||||
arch_phys_wc_del(dev_priv->gtt.mtrr);
|
||||
|
||||
acpi_video_unregister();
|
||||
i915_driver_unregister(dev_priv);
|
||||
|
||||
drm_vblank_cleanup(dev);
|
||||
|
||||
@ -1246,31 +1447,24 @@ int i915_driver_unload(struct drm_device *dev)
|
||||
cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
|
||||
i915_destroy_error_state(dev);
|
||||
|
||||
if (dev->pdev->msi_enabled)
|
||||
pci_disable_msi(dev->pdev);
|
||||
|
||||
intel_opregion_fini(dev);
|
||||
|
||||
/* Flush any outstanding unpin_work. */
|
||||
flush_workqueue(dev_priv->wq);
|
||||
|
||||
intel_guc_ucode_fini(dev);
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
i915_gem_cleanup_ringbuffer(dev);
|
||||
i915_gem_cleanup_engines(dev);
|
||||
i915_gem_context_fini(dev);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
intel_fbc_cleanup_cfb(dev_priv);
|
||||
|
||||
pm_qos_remove_request(&dev_priv->pm_qos);
|
||||
intel_power_domains_fini(dev_priv);
|
||||
|
||||
i915_global_gtt_cleanup(dev);
|
||||
i915_driver_cleanup_hw(dev_priv);
|
||||
i915_driver_cleanup_mmio(dev_priv);
|
||||
|
||||
intel_uncore_fini(dev);
|
||||
i915_mmio_cleanup(dev);
|
||||
intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
|
||||
|
||||
i915_gem_load_cleanup(dev);
|
||||
pci_dev_put(dev_priv->bridge_dev);
|
||||
i915_workqueues_cleanup(dev_priv);
|
||||
i915_driver_cleanup_early(dev_priv);
|
||||
kfree(dev_priv);
|
||||
|
||||
return 0;
|
||||
|
@ -66,6 +66,11 @@ static struct drm_driver driver;
|
||||
#define IVB_CURSOR_OFFSETS \
|
||||
.cursor_offsets = { CURSOR_A_OFFSET, IVB_CURSOR_B_OFFSET, IVB_CURSOR_C_OFFSET }
|
||||
|
||||
#define BDW_COLORS \
|
||||
.color = { .degamma_lut_size = 512, .gamma_lut_size = 512 }
|
||||
#define CHV_COLORS \
|
||||
.color = { .degamma_lut_size = 65, .gamma_lut_size = 257 }
|
||||
|
||||
static const struct intel_device_info intel_i830_info = {
|
||||
.gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2,
|
||||
.has_overlay = 1, .overlay_needs_physical = 1,
|
||||
@ -288,24 +293,28 @@ static const struct intel_device_info intel_haswell_m_info = {
|
||||
.is_mobile = 1,
|
||||
};
|
||||
|
||||
#define BDW_FEATURES \
|
||||
HSW_FEATURES, \
|
||||
BDW_COLORS
|
||||
|
||||
static const struct intel_device_info intel_broadwell_d_info = {
|
||||
HSW_FEATURES,
|
||||
BDW_FEATURES,
|
||||
.gen = 8,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_broadwell_m_info = {
|
||||
HSW_FEATURES,
|
||||
BDW_FEATURES,
|
||||
.gen = 8, .is_mobile = 1,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_broadwell_gt3d_info = {
|
||||
HSW_FEATURES,
|
||||
BDW_FEATURES,
|
||||
.gen = 8,
|
||||
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_broadwell_gt3m_info = {
|
||||
HSW_FEATURES,
|
||||
BDW_FEATURES,
|
||||
.gen = 8, .is_mobile = 1,
|
||||
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
|
||||
};
|
||||
@ -318,16 +327,17 @@ static const struct intel_device_info intel_cherryview_info = {
|
||||
.display_mmio_offset = VLV_DISPLAY_BASE,
|
||||
GEN_CHV_PIPEOFFSETS,
|
||||
CURSOR_OFFSETS,
|
||||
CHV_COLORS,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_skylake_info = {
|
||||
HSW_FEATURES,
|
||||
BDW_FEATURES,
|
||||
.is_skylake = 1,
|
||||
.gen = 9,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_skylake_gt3_info = {
|
||||
HSW_FEATURES,
|
||||
BDW_FEATURES,
|
||||
.is_skylake = 1,
|
||||
.gen = 9,
|
||||
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
|
||||
@ -345,17 +355,18 @@ static const struct intel_device_info intel_broxton_info = {
|
||||
.has_fbc = 1,
|
||||
GEN_DEFAULT_PIPEOFFSETS,
|
||||
IVB_CURSOR_OFFSETS,
|
||||
BDW_COLORS,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_kabylake_info = {
|
||||
HSW_FEATURES,
|
||||
BDW_FEATURES,
|
||||
.is_preliminary = 1,
|
||||
.is_kabylake = 1,
|
||||
.gen = 9,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_kabylake_gt3_info = {
|
||||
HSW_FEATURES,
|
||||
BDW_FEATURES,
|
||||
.is_preliminary = 1,
|
||||
.is_kabylake = 1,
|
||||
.gen = 9,
|
||||
@ -504,6 +515,7 @@ void intel_detect_pch(struct drm_device *dev)
|
||||
WARN_ON(!IS_SKYLAKE(dev) &&
|
||||
!IS_KABYLAKE(dev));
|
||||
} else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) ||
|
||||
(id == INTEL_PCH_P3X_DEVICE_ID_TYPE) ||
|
||||
((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) &&
|
||||
pch->subsystem_vendor == 0x1af4 &&
|
||||
pch->subsystem_device == 0x1100)) {
|
||||
@ -758,10 +770,10 @@ static int i915_drm_resume(struct drm_device *dev)
|
||||
dev_priv->display.hpd_irq_setup(dev);
|
||||
spin_unlock_irq(&dev_priv->irq_lock);
|
||||
|
||||
intel_display_resume(dev);
|
||||
|
||||
intel_dp_mst_resume(dev);
|
||||
|
||||
intel_display_resume(dev);
|
||||
|
||||
/*
|
||||
* ... but also need to make sure that hotplug processing
|
||||
* doesn't cause havoc. Like in the driver load code we don't
|
||||
@ -881,7 +893,7 @@ int i915_reset(struct drm_device *dev)
|
||||
|
||||
simulated = dev_priv->gpu_error.stop_rings != 0;
|
||||
|
||||
ret = intel_gpu_reset(dev);
|
||||
ret = intel_gpu_reset(dev, ALL_ENGINES);
|
||||
|
||||
/* Also reset the gpu hangman. */
|
||||
if (simulated) {
|
||||
|
@ -53,13 +53,14 @@
|
||||
#include <linux/kref.h>
|
||||
#include <linux/pm_qos.h>
|
||||
#include "intel_guc.h"
|
||||
#include "intel_dpll_mgr.h"
|
||||
|
||||
/* General customization:
|
||||
*/
|
||||
|
||||
#define DRIVER_NAME "i915"
|
||||
#define DRIVER_DESC "Intel Graphics"
|
||||
#define DRIVER_DATE "20160229"
|
||||
#define DRIVER_DATE "20160330"
|
||||
|
||||
#undef WARN_ON
|
||||
/* Many gcc seem to no see through this and fall over :( */
|
||||
@ -97,6 +98,10 @@
|
||||
#define I915_STATE_WARN_ON(x) \
|
||||
I915_STATE_WARN((x), "%s", "WARN_ON(" __stringify(x) ")")
|
||||
|
||||
bool __i915_inject_load_failure(const char *func, int line);
|
||||
#define i915_inject_load_failure() \
|
||||
__i915_inject_load_failure(__func__, __LINE__)
|
||||
|
||||
static inline const char *yesno(bool v)
|
||||
{
|
||||
return v ? "yes" : "no";
|
||||
@ -122,9 +127,35 @@ enum transcoder {
|
||||
TRANSCODER_B,
|
||||
TRANSCODER_C,
|
||||
TRANSCODER_EDP,
|
||||
TRANSCODER_DSI_A,
|
||||
TRANSCODER_DSI_C,
|
||||
I915_MAX_TRANSCODERS
|
||||
};
|
||||
#define transcoder_name(t) ((t) + 'A')
|
||||
|
||||
static inline const char *transcoder_name(enum transcoder transcoder)
|
||||
{
|
||||
switch (transcoder) {
|
||||
case TRANSCODER_A:
|
||||
return "A";
|
||||
case TRANSCODER_B:
|
||||
return "B";
|
||||
case TRANSCODER_C:
|
||||
return "C";
|
||||
case TRANSCODER_EDP:
|
||||
return "EDP";
|
||||
case TRANSCODER_DSI_A:
|
||||
return "DSI A";
|
||||
case TRANSCODER_DSI_C:
|
||||
return "DSI C";
|
||||
default:
|
||||
return "<invalid>";
|
||||
}
|
||||
}
|
||||
|
||||
static inline bool transcoder_is_dsi(enum transcoder transcoder)
|
||||
{
|
||||
return transcoder == TRANSCODER_DSI_A || transcoder == TRANSCODER_DSI_C;
|
||||
}
|
||||
|
||||
/*
|
||||
* I915_MAX_PLANES in the enum below is the maximum (across all platforms)
|
||||
@ -176,6 +207,8 @@ enum intel_display_power_domain {
|
||||
POWER_DOMAIN_TRANSCODER_B,
|
||||
POWER_DOMAIN_TRANSCODER_C,
|
||||
POWER_DOMAIN_TRANSCODER_EDP,
|
||||
POWER_DOMAIN_TRANSCODER_DSI_A,
|
||||
POWER_DOMAIN_TRANSCODER_DSI_C,
|
||||
POWER_DOMAIN_PORT_DDI_A_LANES,
|
||||
POWER_DOMAIN_PORT_DDI_B_LANES,
|
||||
POWER_DOMAIN_PORT_DDI_C_LANES,
|
||||
@ -273,6 +306,10 @@ struct i915_hotplug {
|
||||
(__s) < INTEL_INFO(__dev_priv)->num_sprites[(__p)]; \
|
||||
(__s)++)
|
||||
|
||||
#define for_each_port_masked(__port, __ports_mask) \
|
||||
for ((__port) = PORT_A; (__port) < I915_MAX_PORTS; (__port)++) \
|
||||
for_each_if ((__ports_mask) & (1 << (__port)))
|
||||
|
||||
#define for_each_crtc(dev, crtc) \
|
||||
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
|
||||
|
||||
@ -340,81 +377,6 @@ struct drm_i915_file_private {
|
||||
unsigned int bsd_ring;
|
||||
};
|
||||
|
||||
enum intel_dpll_id {
|
||||
DPLL_ID_PRIVATE = -1, /* non-shared dpll in use */
|
||||
/* real shared dpll ids must be >= 0 */
|
||||
DPLL_ID_PCH_PLL_A = 0,
|
||||
DPLL_ID_PCH_PLL_B = 1,
|
||||
/* hsw/bdw */
|
||||
DPLL_ID_WRPLL1 = 0,
|
||||
DPLL_ID_WRPLL2 = 1,
|
||||
DPLL_ID_SPLL = 2,
|
||||
|
||||
/* skl */
|
||||
DPLL_ID_SKL_DPLL1 = 0,
|
||||
DPLL_ID_SKL_DPLL2 = 1,
|
||||
DPLL_ID_SKL_DPLL3 = 2,
|
||||
};
|
||||
#define I915_NUM_PLLS 3
|
||||
|
||||
struct intel_dpll_hw_state {
|
||||
/* i9xx, pch plls */
|
||||
uint32_t dpll;
|
||||
uint32_t dpll_md;
|
||||
uint32_t fp0;
|
||||
uint32_t fp1;
|
||||
|
||||
/* hsw, bdw */
|
||||
uint32_t wrpll;
|
||||
uint32_t spll;
|
||||
|
||||
/* skl */
|
||||
/*
|
||||
* DPLL_CTRL1 has 6 bits for each each this DPLL. We store those in
|
||||
* lower part of ctrl1 and they get shifted into position when writing
|
||||
* the register. This allows us to easily compare the state to share
|
||||
* the DPLL.
|
||||
*/
|
||||
uint32_t ctrl1;
|
||||
/* HDMI only, 0 when used for DP */
|
||||
uint32_t cfgcr1, cfgcr2;
|
||||
|
||||
/* bxt */
|
||||
uint32_t ebb0, ebb4, pll0, pll1, pll2, pll3, pll6, pll8, pll9, pll10,
|
||||
pcsdw12;
|
||||
};
|
||||
|
||||
struct intel_shared_dpll_config {
|
||||
unsigned crtc_mask; /* mask of CRTCs sharing this PLL */
|
||||
struct intel_dpll_hw_state hw_state;
|
||||
};
|
||||
|
||||
struct intel_shared_dpll {
|
||||
struct intel_shared_dpll_config config;
|
||||
|
||||
int active; /* count of number of active CRTCs (i.e. DPMS on) */
|
||||
bool on; /* is the PLL actually active? Disabled during modeset */
|
||||
const char *name;
|
||||
/* should match the index in the dev_priv->shared_dplls array */
|
||||
enum intel_dpll_id id;
|
||||
/* The mode_set hook is optional and should be used together with the
|
||||
* intel_prepare_shared_dpll function. */
|
||||
void (*mode_set)(struct drm_i915_private *dev_priv,
|
||||
struct intel_shared_dpll *pll);
|
||||
void (*enable)(struct drm_i915_private *dev_priv,
|
||||
struct intel_shared_dpll *pll);
|
||||
void (*disable)(struct drm_i915_private *dev_priv,
|
||||
struct intel_shared_dpll *pll);
|
||||
bool (*get_hw_state)(struct drm_i915_private *dev_priv,
|
||||
struct intel_shared_dpll *pll,
|
||||
struct intel_dpll_hw_state *hw_state);
|
||||
};
|
||||
|
||||
#define SKL_DPLL0 0
|
||||
#define SKL_DPLL1 1
|
||||
#define SKL_DPLL2 2
|
||||
#define SKL_DPLL3 3
|
||||
|
||||
/* Used by dp and fdi links */
|
||||
struct intel_link_m_n {
|
||||
uint32_t tu;
|
||||
@ -533,7 +495,7 @@ struct drm_i915_error_state {
|
||||
u32 cpu_ring_head;
|
||||
u32 cpu_ring_tail;
|
||||
|
||||
u32 semaphore_seqno[I915_NUM_RINGS - 1];
|
||||
u32 semaphore_seqno[I915_NUM_ENGINES - 1];
|
||||
|
||||
/* Register state */
|
||||
u32 start;
|
||||
@ -553,7 +515,7 @@ struct drm_i915_error_state {
|
||||
u32 fault_reg;
|
||||
u64 faddr;
|
||||
u32 rc_psmi; /* sleep state */
|
||||
u32 semaphore_mboxes[I915_NUM_RINGS - 1];
|
||||
u32 semaphore_mboxes[I915_NUM_ENGINES - 1];
|
||||
|
||||
struct drm_i915_error_object {
|
||||
int page_count;
|
||||
@ -561,6 +523,8 @@ struct drm_i915_error_state {
|
||||
u32 *pages[0];
|
||||
} *ringbuffer, *batchbuffer, *wa_batchbuffer, *ctx, *hws_page;
|
||||
|
||||
struct drm_i915_error_object *wa_ctx;
|
||||
|
||||
struct drm_i915_error_request {
|
||||
long jiffies;
|
||||
u32 seqno;
|
||||
@ -577,12 +541,12 @@ struct drm_i915_error_state {
|
||||
|
||||
pid_t pid;
|
||||
char comm[TASK_COMM_LEN];
|
||||
} ring[I915_NUM_RINGS];
|
||||
} ring[I915_NUM_ENGINES];
|
||||
|
||||
struct drm_i915_error_buffer {
|
||||
u32 size;
|
||||
u32 name;
|
||||
u32 rseqno[I915_NUM_RINGS], wseqno;
|
||||
u32 rseqno[I915_NUM_ENGINES], wseqno;
|
||||
u64 gtt_offset;
|
||||
u32 read_domains;
|
||||
u32 write_domain;
|
||||
@ -611,27 +575,12 @@ struct dpll;
|
||||
struct drm_i915_display_funcs {
|
||||
int (*get_display_clock_speed)(struct drm_device *dev);
|
||||
int (*get_fifo_size)(struct drm_device *dev, int plane);
|
||||
/**
|
||||
* find_dpll() - Find the best values for the PLL
|
||||
* @limit: limits for the PLL
|
||||
* @crtc: current CRTC
|
||||
* @target: target frequency in kHz
|
||||
* @refclk: reference clock frequency in kHz
|
||||
* @match_clock: if provided, @best_clock P divider must
|
||||
* match the P divider from @match_clock
|
||||
* used for LVDS downclocking
|
||||
* @best_clock: best PLL values found
|
||||
*
|
||||
* Returns true on success, false on failure.
|
||||
*/
|
||||
bool (*find_dpll)(const struct intel_limit *limit,
|
||||
struct intel_crtc_state *crtc_state,
|
||||
int target, int refclk,
|
||||
struct dpll *match_clock,
|
||||
struct dpll *best_clock);
|
||||
int (*compute_pipe_wm)(struct intel_crtc *crtc,
|
||||
struct drm_atomic_state *state);
|
||||
void (*program_watermarks)(struct intel_crtc_state *cstate);
|
||||
int (*compute_pipe_wm)(struct intel_crtc_state *cstate);
|
||||
int (*compute_intermediate_wm)(struct drm_device *dev,
|
||||
struct intel_crtc *intel_crtc,
|
||||
struct intel_crtc_state *newstate);
|
||||
void (*initial_watermarks)(struct intel_crtc_state *cstate);
|
||||
void (*optimize_watermarks)(struct intel_crtc_state *cstate);
|
||||
void (*update_wm)(struct drm_crtc *crtc);
|
||||
int (*modeset_calc_cdclk)(struct drm_atomic_state *state);
|
||||
void (*modeset_commit_cdclk)(struct drm_atomic_state *state);
|
||||
@ -662,6 +611,9 @@ struct drm_i915_display_funcs {
|
||||
/* render clock increase/decrease */
|
||||
/* display clock increase/decrease */
|
||||
/* pll clock increase/decrease */
|
||||
|
||||
void (*load_csc_matrix)(struct drm_crtc *crtc);
|
||||
void (*load_luts)(struct drm_crtc *crtc);
|
||||
};
|
||||
|
||||
enum forcewake_domain_id {
|
||||
@ -750,6 +702,7 @@ struct intel_csr {
|
||||
i915_reg_t mmioaddr[8];
|
||||
uint32_t mmiodata[8];
|
||||
uint32_t dc_state;
|
||||
uint32_t allowed_dc_mask;
|
||||
};
|
||||
|
||||
#define DEV_INFO_FOR_EACH_FLAG(func, sep) \
|
||||
@ -779,6 +732,7 @@ struct intel_csr {
|
||||
func(overlay_needs_physical) sep \
|
||||
func(supports_tv) sep \
|
||||
func(has_llc) sep \
|
||||
func(has_snoop) sep \
|
||||
func(has_ddi) sep \
|
||||
func(has_fpga_dbg)
|
||||
|
||||
@ -810,6 +764,11 @@ struct intel_device_info {
|
||||
u8 has_slice_pg:1;
|
||||
u8 has_subslice_pg:1;
|
||||
u8 has_eu_pg:1;
|
||||
|
||||
struct color_luts {
|
||||
u16 degamma_lut_size;
|
||||
u16 gamma_lut_size;
|
||||
} color;
|
||||
};
|
||||
|
||||
#undef DEFINE_FLAG
|
||||
@ -891,7 +850,7 @@ struct intel_context {
|
||||
struct i915_vma *lrc_vma;
|
||||
u64 lrc_desc;
|
||||
uint32_t *lrc_reg_state;
|
||||
} engine[I915_NUM_RINGS];
|
||||
} engine[I915_NUM_ENGINES];
|
||||
|
||||
struct list_head link;
|
||||
};
|
||||
@ -1482,21 +1441,22 @@ struct intel_vbt_data {
|
||||
unsigned int lvds_use_ssc:1;
|
||||
unsigned int display_clock_mode:1;
|
||||
unsigned int fdi_rx_polarity_inverted:1;
|
||||
unsigned int has_mipi:1;
|
||||
int lvds_ssc_freq;
|
||||
unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
|
||||
|
||||
enum drrs_support_type drrs_type;
|
||||
|
||||
/* eDP */
|
||||
int edp_rate;
|
||||
int edp_lanes;
|
||||
int edp_preemphasis;
|
||||
int edp_vswing;
|
||||
bool edp_initialized;
|
||||
bool edp_support;
|
||||
int edp_bpp;
|
||||
struct edp_power_seq edp_pps;
|
||||
struct {
|
||||
int rate;
|
||||
int lanes;
|
||||
int preemphasis;
|
||||
int vswing;
|
||||
bool low_vswing;
|
||||
bool initialized;
|
||||
bool support;
|
||||
int bpp;
|
||||
struct edp_power_seq pps;
|
||||
} edp;
|
||||
|
||||
struct {
|
||||
bool full_link;
|
||||
@ -1516,7 +1476,6 @@ struct intel_vbt_data {
|
||||
|
||||
/* MIPI DSI */
|
||||
struct {
|
||||
u16 port;
|
||||
u16 panel_id;
|
||||
struct mipi_config *config;
|
||||
struct mipi_pps_data *pps;
|
||||
@ -1532,6 +1491,7 @@ struct intel_vbt_data {
|
||||
union child_device_config *child_dev;
|
||||
|
||||
struct ddi_vbt_port_info ddi_port_info[I915_MAX_PORTS];
|
||||
struct sdvo_device_mapping sdvo_mappings[2];
|
||||
};
|
||||
|
||||
enum intel_ddb_partitioning {
|
||||
@ -1706,7 +1666,7 @@ struct i915_wa_reg {
|
||||
struct i915_workarounds {
|
||||
struct i915_wa_reg reg[I915_MAX_WA_REGS];
|
||||
u32 count;
|
||||
u32 hw_whitelist_count[I915_NUM_RINGS];
|
||||
u32 hw_whitelist_count[I915_NUM_ENGINES];
|
||||
};
|
||||
|
||||
struct i915_virtual_gpu {
|
||||
@ -1719,7 +1679,7 @@ struct i915_execbuffer_params {
|
||||
uint32_t dispatch_flags;
|
||||
uint32_t args_batch_start_offset;
|
||||
uint64_t batch_obj_vm_offset;
|
||||
struct intel_engine_cs *ring;
|
||||
struct intel_engine_cs *engine;
|
||||
struct drm_i915_gem_object *batch_obj;
|
||||
struct intel_context *ctx;
|
||||
struct drm_i915_gem_request *request;
|
||||
@ -1771,7 +1731,7 @@ struct drm_i915_private {
|
||||
wait_queue_head_t gmbus_wait_queue;
|
||||
|
||||
struct pci_dev *bridge_dev;
|
||||
struct intel_engine_cs ring[I915_NUM_RINGS];
|
||||
struct intel_engine_cs engine[I915_NUM_ENGINES];
|
||||
struct drm_i915_gem_object *semaphore_obj;
|
||||
uint32_t last_seqno, next_seqno;
|
||||
|
||||
@ -1829,6 +1789,7 @@ struct drm_i915_private {
|
||||
unsigned int skl_boot_cdclk;
|
||||
unsigned int cdclk_freq, max_cdclk_freq, atomic_cdclk_freq;
|
||||
unsigned int max_dotclk_freq;
|
||||
unsigned int rawclk_freq;
|
||||
unsigned int hpll_freq;
|
||||
unsigned int czclk_freq;
|
||||
|
||||
@ -1855,7 +1816,7 @@ struct drm_i915_private {
|
||||
struct drm_atomic_state *modeset_restore_state;
|
||||
|
||||
struct list_head vm_list; /* Global list of all address spaces */
|
||||
struct i915_gtt gtt; /* VM representing the global address space */
|
||||
struct i915_ggtt ggtt; /* VM representing the global address space */
|
||||
|
||||
struct i915_gem_mm mm;
|
||||
DECLARE_HASHTABLE(mm_structs, 7);
|
||||
@ -1863,8 +1824,6 @@ struct drm_i915_private {
|
||||
|
||||
/* Kernel Modesetting */
|
||||
|
||||
struct sdvo_device_mapping sdvo_mappings[2];
|
||||
|
||||
struct drm_crtc *plane_to_crtc_mapping[I915_MAX_PIPES];
|
||||
struct drm_crtc *pipe_to_crtc_mapping[I915_MAX_PIPES];
|
||||
wait_queue_head_t pending_flip_queue;
|
||||
@ -1876,6 +1835,7 @@ struct drm_i915_private {
|
||||
/* dpll and cdclk state is protected by connection_mutex */
|
||||
int num_shared_dpll;
|
||||
struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
|
||||
const struct intel_dpll_mgr *dpll_mgr;
|
||||
|
||||
unsigned int active_crtcs;
|
||||
unsigned int min_pixclk[I915_MAX_PIPES];
|
||||
@ -1884,9 +1844,6 @@ struct drm_i915_private {
|
||||
|
||||
struct i915_workarounds workarounds;
|
||||
|
||||
/* Reclocking support */
|
||||
bool render_reclock_avail;
|
||||
|
||||
struct i915_frontbuffer_tracking fb_tracking;
|
||||
|
||||
u16 orig_clock;
|
||||
@ -1980,6 +1937,13 @@ struct drm_i915_private {
|
||||
};
|
||||
|
||||
uint8_t max_level;
|
||||
|
||||
/*
|
||||
* Should be held around atomic WM register writing; also
|
||||
* protects * intel_crtc->wm.active and
|
||||
* cstate->wm.need_postvbl_update.
|
||||
*/
|
||||
struct mutex wm_mutex;
|
||||
} wm;
|
||||
|
||||
struct i915_runtime_pm pm;
|
||||
@ -1989,15 +1953,13 @@ struct drm_i915_private {
|
||||
int (*execbuf_submit)(struct i915_execbuffer_params *params,
|
||||
struct drm_i915_gem_execbuffer2 *args,
|
||||
struct list_head *vmas);
|
||||
int (*init_rings)(struct drm_device *dev);
|
||||
void (*cleanup_ring)(struct intel_engine_cs *ring);
|
||||
void (*stop_ring)(struct intel_engine_cs *ring);
|
||||
int (*init_engines)(struct drm_device *dev);
|
||||
void (*cleanup_engine)(struct intel_engine_cs *engine);
|
||||
void (*stop_engine)(struct intel_engine_cs *engine);
|
||||
} gt;
|
||||
|
||||
struct intel_context *kernel_context;
|
||||
|
||||
bool edp_low_vswing;
|
||||
|
||||
/* perform PHY state sanity checks? */
|
||||
bool chv_phy_assert[2];
|
||||
|
||||
@ -2024,10 +1986,28 @@ static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc)
|
||||
return container_of(guc, struct drm_i915_private, guc);
|
||||
}
|
||||
|
||||
/* Iterate over initialised rings */
|
||||
#define for_each_ring(ring__, dev_priv__, i__) \
|
||||
for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \
|
||||
for_each_if ((((ring__) = &(dev_priv__)->ring[(i__)]), intel_ring_initialized((ring__))))
|
||||
/* Simple iterator over all initialised engines */
|
||||
#define for_each_engine(engine__, dev_priv__) \
|
||||
for ((engine__) = &(dev_priv__)->engine[0]; \
|
||||
(engine__) < &(dev_priv__)->engine[I915_NUM_ENGINES]; \
|
||||
(engine__)++) \
|
||||
for_each_if (intel_engine_initialized(engine__))
|
||||
|
||||
/* Iterator with engine_id */
|
||||
#define for_each_engine_id(engine__, dev_priv__, id__) \
|
||||
for ((engine__) = &(dev_priv__)->engine[0], (id__) = 0; \
|
||||
(engine__) < &(dev_priv__)->engine[I915_NUM_ENGINES]; \
|
||||
(engine__)++) \
|
||||
for_each_if (((id__) = (engine__)->id, \
|
||||
intel_engine_initialized(engine__)))
|
||||
|
||||
/* Iterator over subset of engines selected by mask */
|
||||
#define for_each_engine_masked(engine__, dev_priv__, mask__) \
|
||||
for ((engine__) = &(dev_priv__)->engine[0]; \
|
||||
(engine__) < &(dev_priv__)->engine[I915_NUM_ENGINES]; \
|
||||
(engine__)++) \
|
||||
for_each_if (((mask__) & intel_engine_flag(engine__)) && \
|
||||
intel_engine_initialized(engine__))
|
||||
|
||||
enum hdmi_force_audio {
|
||||
HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */
|
||||
@ -2097,7 +2077,7 @@ struct drm_i915_gem_object {
|
||||
struct drm_mm_node *stolen;
|
||||
struct list_head global_list;
|
||||
|
||||
struct list_head ring_list[I915_NUM_RINGS];
|
||||
struct list_head engine_list[I915_NUM_ENGINES];
|
||||
/** Used in execbuf to temporarily hold a ref */
|
||||
struct list_head obj_exec_link;
|
||||
|
||||
@ -2108,7 +2088,7 @@ struct drm_i915_gem_object {
|
||||
* rendering and so a non-zero seqno), and is not set if it i s on
|
||||
* inactive (ready to be unbound) list.
|
||||
*/
|
||||
unsigned int active:I915_NUM_RINGS;
|
||||
unsigned int active:I915_NUM_ENGINES;
|
||||
|
||||
/**
|
||||
* This is set if the object has been written to since last bound
|
||||
@ -2187,7 +2167,7 @@ struct drm_i915_gem_object {
|
||||
* read request. This allows for the CPU to read from an active
|
||||
* buffer by only waiting for the write to complete.
|
||||
* */
|
||||
struct drm_i915_gem_request *last_read_req[I915_NUM_RINGS];
|
||||
struct drm_i915_gem_request *last_read_req[I915_NUM_ENGINES];
|
||||
struct drm_i915_gem_request *last_write_req;
|
||||
/** Breadcrumb of last fenced GPU access to the buffer. */
|
||||
struct drm_i915_gem_request *last_fenced_req;
|
||||
@ -2242,7 +2222,7 @@ struct drm_i915_gem_request {
|
||||
|
||||
/** On Which ring this request was generated */
|
||||
struct drm_i915_private *i915;
|
||||
struct intel_engine_cs *ring;
|
||||
struct intel_engine_cs *engine;
|
||||
|
||||
/** GEM sequence number associated with the previous request,
|
||||
* when the HWS breadcrumb is equal to this the GPU is processing
|
||||
@ -2335,9 +2315,9 @@ i915_gem_request_get_seqno(struct drm_i915_gem_request *req)
|
||||
}
|
||||
|
||||
static inline struct intel_engine_cs *
|
||||
i915_gem_request_get_ring(struct drm_i915_gem_request *req)
|
||||
i915_gem_request_get_engine(struct drm_i915_gem_request *req)
|
||||
{
|
||||
return req ? req->ring : NULL;
|
||||
return req ? req->engine : NULL;
|
||||
}
|
||||
|
||||
static inline struct drm_i915_gem_request *
|
||||
@ -2351,7 +2331,7 @@ i915_gem_request_reference(struct drm_i915_gem_request *req)
|
||||
static inline void
|
||||
i915_gem_request_unreference(struct drm_i915_gem_request *req)
|
||||
{
|
||||
WARN_ON(!mutex_is_locked(&req->ring->dev->struct_mutex));
|
||||
WARN_ON(!mutex_is_locked(&req->engine->dev->struct_mutex));
|
||||
kref_put(&req->ref, i915_gem_request_free);
|
||||
}
|
||||
|
||||
@ -2363,7 +2343,7 @@ i915_gem_request_unreference__unlocked(struct drm_i915_gem_request *req)
|
||||
if (!req)
|
||||
return;
|
||||
|
||||
dev = req->ring->dev;
|
||||
dev = req->engine->dev;
|
||||
if (kref_put_mutex(&req->ref, i915_gem_request_free, &dev->struct_mutex))
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
}
|
||||
@ -2611,11 +2591,14 @@ struct drm_i915_cmd_table {
|
||||
#define BLT_RING (1<<BCS)
|
||||
#define VEBOX_RING (1<<VECS)
|
||||
#define BSD2_RING (1<<VCS2)
|
||||
#define ALL_ENGINES (~0)
|
||||
|
||||
#define HAS_BSD(dev) (INTEL_INFO(dev)->ring_mask & BSD_RING)
|
||||
#define HAS_BSD2(dev) (INTEL_INFO(dev)->ring_mask & BSD2_RING)
|
||||
#define HAS_BLT(dev) (INTEL_INFO(dev)->ring_mask & BLT_RING)
|
||||
#define HAS_VEBOX(dev) (INTEL_INFO(dev)->ring_mask & VEBOX_RING)
|
||||
#define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc)
|
||||
#define HAS_SNOOP(dev) (INTEL_INFO(dev)->has_snoop)
|
||||
#define HAS_WT(dev) ((IS_HASWELL(dev) || IS_BROADWELL(dev)) && \
|
||||
__I915__(dev)->ellc_size)
|
||||
#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
|
||||
@ -2695,6 +2678,7 @@ struct drm_i915_cmd_table {
|
||||
#define INTEL_PCH_SPT_DEVICE_ID_TYPE 0xA100
|
||||
#define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE 0x9D00
|
||||
#define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100
|
||||
#define INTEL_PCH_P3X_DEVICE_ID_TYPE 0x7000
|
||||
#define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900 /* qemu q35 has 2918 */
|
||||
|
||||
#define INTEL_PCH_TYPE(dev) (__I915__(dev)->pch_type)
|
||||
@ -2726,6 +2710,13 @@ extern int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state);
|
||||
extern int i915_resume_switcheroo(struct drm_device *dev);
|
||||
|
||||
/* i915_dma.c */
|
||||
void __printf(3, 4)
|
||||
__i915_printk(struct drm_i915_private *dev_priv, const char *level,
|
||||
const char *fmt, ...);
|
||||
|
||||
#define i915_report_error(dev_priv, fmt, ...) \
|
||||
__i915_printk(dev_priv, KERN_ERR, fmt, ##__VA_ARGS__)
|
||||
|
||||
extern int i915_driver_load(struct drm_device *, unsigned long flags);
|
||||
extern int i915_driver_unload(struct drm_device *);
|
||||
extern int i915_driver_open(struct drm_device *dev, struct drm_file *file);
|
||||
@ -2738,9 +2729,10 @@ extern void i915_driver_postclose(struct drm_device *dev,
|
||||
extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
|
||||
unsigned long arg);
|
||||
#endif
|
||||
extern int intel_gpu_reset(struct drm_device *dev);
|
||||
extern int intel_gpu_reset(struct drm_device *dev, u32 engine_mask);
|
||||
extern bool intel_has_gpu_reset(struct drm_device *dev);
|
||||
extern int i915_reset(struct drm_device *dev);
|
||||
extern void intel_engine_init_hangcheck(struct intel_engine_cs *engine);
|
||||
extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
|
||||
extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
|
||||
extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
|
||||
@ -2757,7 +2749,7 @@ bool intel_hpd_pin_to_port(enum hpd_pin pin, enum port *port);
|
||||
/* i915_irq.c */
|
||||
void i915_queue_hangcheck(struct drm_device *dev);
|
||||
__printf(3, 4)
|
||||
void i915_handle_error(struct drm_device *dev, bool wedged,
|
||||
void i915_handle_error(struct drm_device *dev, u32 engine_mask,
|
||||
const char *fmt, ...);
|
||||
|
||||
extern void intel_irq_init(struct drm_i915_private *dev_priv);
|
||||
@ -2893,6 +2885,7 @@ int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
void i915_gem_load_init(struct drm_device *dev);
|
||||
void i915_gem_load_cleanup(struct drm_device *dev);
|
||||
void i915_gem_load_init_fences(struct drm_i915_private *dev_priv);
|
||||
void *i915_gem_object_alloc(struct drm_device *dev);
|
||||
void i915_gem_object_free(struct drm_i915_gem_object *obj);
|
||||
void i915_gem_object_init(struct drm_i915_gem_object *obj,
|
||||
@ -3006,14 +2999,14 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2)
|
||||
static inline bool i915_gem_request_started(struct drm_i915_gem_request *req,
|
||||
bool lazy_coherency)
|
||||
{
|
||||
u32 seqno = req->ring->get_seqno(req->ring, lazy_coherency);
|
||||
u32 seqno = req->engine->get_seqno(req->engine, lazy_coherency);
|
||||
return i915_seqno_passed(seqno, req->previous_seqno);
|
||||
}
|
||||
|
||||
static inline bool i915_gem_request_completed(struct drm_i915_gem_request *req,
|
||||
bool lazy_coherency)
|
||||
{
|
||||
u32 seqno = req->ring->get_seqno(req->ring, lazy_coherency);
|
||||
u32 seqno = req->engine->get_seqno(req->engine, lazy_coherency);
|
||||
return i915_seqno_passed(seqno, req->seqno);
|
||||
}
|
||||
|
||||
@ -3021,10 +3014,10 @@ int __must_check i915_gem_get_seqno(struct drm_device *dev, u32 *seqno);
|
||||
int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno);
|
||||
|
||||
struct drm_i915_gem_request *
|
||||
i915_gem_find_active_request(struct intel_engine_cs *ring);
|
||||
i915_gem_find_active_request(struct intel_engine_cs *engine);
|
||||
|
||||
bool i915_gem_retire_requests(struct drm_device *dev);
|
||||
void i915_gem_retire_requests_ring(struct intel_engine_cs *ring);
|
||||
void i915_gem_retire_requests_ring(struct intel_engine_cs *engine);
|
||||
int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
|
||||
bool interruptible);
|
||||
|
||||
@ -3059,11 +3052,11 @@ static inline bool i915_stop_ring_allow_warn(struct drm_i915_private *dev_priv)
|
||||
void i915_gem_reset(struct drm_device *dev);
|
||||
bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
|
||||
int __must_check i915_gem_init(struct drm_device *dev);
|
||||
int i915_gem_init_rings(struct drm_device *dev);
|
||||
int i915_gem_init_engines(struct drm_device *dev);
|
||||
int __must_check i915_gem_init_hw(struct drm_device *dev);
|
||||
int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice);
|
||||
void i915_gem_init_swizzling(struct drm_device *dev);
|
||||
void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
|
||||
void i915_gem_cleanup_engines(struct drm_device *dev);
|
||||
int __must_check i915_gpu_idle(struct drm_device *dev);
|
||||
int __must_check i915_gem_suspend(struct drm_device *dev);
|
||||
void __i915_add_request(struct drm_i915_gem_request *req,
|
||||
@ -3155,7 +3148,7 @@ bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj);
|
||||
|
||||
/* Some GGTT VM helpers */
|
||||
#define i915_obj_to_ggtt(obj) \
|
||||
(&((struct drm_i915_private *)(obj)->base.dev->dev_private)->gtt.base)
|
||||
(&((struct drm_i915_private *)(obj)->base.dev->dev_private)->ggtt.base)
|
||||
|
||||
static inline struct i915_hw_ppgtt *
|
||||
i915_vm_to_ppgtt(struct i915_address_space *vm)
|
||||
@ -3342,7 +3335,7 @@ static inline void i915_error_state_buf_release(
|
||||
{
|
||||
kfree(eb->buf);
|
||||
}
|
||||
void i915_capture_error_state(struct drm_device *dev, bool wedge,
|
||||
void i915_capture_error_state(struct drm_device *dev, u32 engine_mask,
|
||||
const char *error_msg);
|
||||
void i915_error_state_get(struct drm_device *dev,
|
||||
struct i915_error_state_file_priv *error_priv);
|
||||
@ -3354,10 +3347,10 @@ const char *i915_cache_level_str(struct drm_i915_private *i915, int type);
|
||||
|
||||
/* i915_cmd_parser.c */
|
||||
int i915_cmd_parser_get_version(void);
|
||||
int i915_cmd_parser_init_ring(struct intel_engine_cs *ring);
|
||||
void i915_cmd_parser_fini_ring(struct intel_engine_cs *ring);
|
||||
bool i915_needs_cmd_parser(struct intel_engine_cs *ring);
|
||||
int i915_parse_cmds(struct intel_engine_cs *ring,
|
||||
int i915_cmd_parser_init_ring(struct intel_engine_cs *engine);
|
||||
void i915_cmd_parser_fini_ring(struct intel_engine_cs *engine);
|
||||
bool i915_needs_cmd_parser(struct intel_engine_cs *engine);
|
||||
int i915_parse_cmds(struct intel_engine_cs *engine,
|
||||
struct drm_i915_gem_object *batch_obj,
|
||||
struct drm_i915_gem_object *shadow_batch_obj,
|
||||
u32 batch_start_offset,
|
||||
@ -3391,6 +3384,10 @@ extern void intel_i2c_reset(struct drm_device *dev);
|
||||
/* intel_bios.c */
|
||||
int intel_bios_init(struct drm_i915_private *dev_priv);
|
||||
bool intel_bios_is_valid_vbt(const void *buf, size_t size);
|
||||
bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv);
|
||||
bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin);
|
||||
bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port);
|
||||
bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv, enum port *port);
|
||||
|
||||
/* intel_opregion.c */
|
||||
#ifdef CONFIG_ACPI
|
||||
@ -3628,11 +3625,11 @@ wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms)
|
||||
}
|
||||
}
|
||||
|
||||
static inline void i915_trace_irq_get(struct intel_engine_cs *ring,
|
||||
static inline void i915_trace_irq_get(struct intel_engine_cs *engine,
|
||||
struct drm_i915_gem_request *req)
|
||||
{
|
||||
if (ring->trace_irq_req == NULL && ring->irq_get(ring))
|
||||
i915_gem_request_assign(&ring->trace_irq_req, req);
|
||||
if (engine->trace_irq_req == NULL && engine->irq_get(engine))
|
||||
i915_gem_request_assign(&engine->trace_irq_req, req);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -132,7 +132,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_gem_get_aperture *args = data;
|
||||
struct i915_gtt *ggtt = &dev_priv->gtt;
|
||||
struct i915_ggtt *ggtt = &dev_priv->ggtt;
|
||||
struct i915_vma *vma;
|
||||
size_t pinned;
|
||||
|
||||
@ -146,7 +146,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
|
||||
pinned += vma->node.size;
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
args->aper_size = dev_priv->gtt.base.total;
|
||||
args->aper_size = dev_priv->ggtt.base.total;
|
||||
args->aper_available_size = args->aper_size - pinned;
|
||||
|
||||
return 0;
|
||||
@ -807,7 +807,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
|
||||
* source page isn't available. Return the error and we'll
|
||||
* retry in the slow path.
|
||||
*/
|
||||
if (fast_user_write(dev_priv->gtt.mappable, page_base,
|
||||
if (fast_user_write(dev_priv->ggtt.mappable, page_base,
|
||||
page_offset, user_data, page_length)) {
|
||||
ret = -EFAULT;
|
||||
goto out_flush;
|
||||
@ -1141,9 +1141,9 @@ static void fake_irq(unsigned long data)
|
||||
}
|
||||
|
||||
static bool missed_irq(struct drm_i915_private *dev_priv,
|
||||
struct intel_engine_cs *ring)
|
||||
struct intel_engine_cs *engine)
|
||||
{
|
||||
return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings);
|
||||
return test_bit(engine->id, &dev_priv->gpu_error.missed_irq_rings);
|
||||
}
|
||||
|
||||
static unsigned long local_clock_us(unsigned *cpu)
|
||||
@ -1193,7 +1193,7 @@ static int __i915_spin_request(struct drm_i915_gem_request *req, int state)
|
||||
* takes to sleep on a request, on the order of a microsecond.
|
||||
*/
|
||||
|
||||
if (req->ring->irq_refcount)
|
||||
if (req->engine->irq_refcount)
|
||||
return -EBUSY;
|
||||
|
||||
/* Only spin if we know the GPU is processing this request */
|
||||
@ -1243,11 +1243,11 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
|
||||
s64 *timeout,
|
||||
struct intel_rps_client *rps)
|
||||
{
|
||||
struct intel_engine_cs *ring = i915_gem_request_get_ring(req);
|
||||
struct drm_device *dev = ring->dev;
|
||||
struct intel_engine_cs *engine = i915_gem_request_get_engine(req);
|
||||
struct drm_device *dev = engine->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
const bool irq_test_in_progress =
|
||||
ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(ring);
|
||||
ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_engine_flag(engine);
|
||||
int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
|
||||
DEFINE_WAIT(wait);
|
||||
unsigned long timeout_expire;
|
||||
@ -1288,7 +1288,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
|
||||
if (ret == 0)
|
||||
goto out;
|
||||
|
||||
if (!irq_test_in_progress && WARN_ON(!ring->irq_get(ring))) {
|
||||
if (!irq_test_in_progress && WARN_ON(!engine->irq_get(engine))) {
|
||||
ret = -ENODEV;
|
||||
goto out;
|
||||
}
|
||||
@ -1296,7 +1296,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
|
||||
for (;;) {
|
||||
struct timer_list timer;
|
||||
|
||||
prepare_to_wait(&ring->irq_queue, &wait, state);
|
||||
prepare_to_wait(&engine->irq_queue, &wait, state);
|
||||
|
||||
/* We need to check whether any gpu reset happened in between
|
||||
* the caller grabbing the seqno and now ... */
|
||||
@ -1325,11 +1325,11 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
|
||||
}
|
||||
|
||||
timer.function = NULL;
|
||||
if (timeout || missed_irq(dev_priv, ring)) {
|
||||
if (timeout || missed_irq(dev_priv, engine)) {
|
||||
unsigned long expire;
|
||||
|
||||
setup_timer_on_stack(&timer, fake_irq, (unsigned long)current);
|
||||
expire = missed_irq(dev_priv, ring) ? jiffies + 1 : timeout_expire;
|
||||
expire = missed_irq(dev_priv, engine) ? jiffies + 1 : timeout_expire;
|
||||
mod_timer(&timer, expire);
|
||||
}
|
||||
|
||||
@ -1341,9 +1341,9 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
|
||||
}
|
||||
}
|
||||
if (!irq_test_in_progress)
|
||||
ring->irq_put(ring);
|
||||
engine->irq_put(engine);
|
||||
|
||||
finish_wait(&ring->irq_queue, &wait);
|
||||
finish_wait(&engine->irq_queue, &wait);
|
||||
|
||||
out:
|
||||
trace_i915_gem_request_wait_end(req);
|
||||
@ -1370,7 +1370,6 @@ out:
|
||||
int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
|
||||
struct drm_file *file)
|
||||
{
|
||||
struct drm_i915_private *dev_private;
|
||||
struct drm_i915_file_private *file_priv;
|
||||
|
||||
WARN_ON(!req || !file || req->file_priv);
|
||||
@ -1381,7 +1380,6 @@ int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
|
||||
if (req->file_priv)
|
||||
return -EINVAL;
|
||||
|
||||
dev_private = req->ring->dev->dev_private;
|
||||
file_priv = file->driver_priv;
|
||||
|
||||
spin_lock(&file_priv->mm.lock);
|
||||
@ -1434,7 +1432,7 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
|
||||
static void
|
||||
__i915_gem_request_retire__upto(struct drm_i915_gem_request *req)
|
||||
{
|
||||
struct intel_engine_cs *engine = req->ring;
|
||||
struct intel_engine_cs *engine = req->engine;
|
||||
struct drm_i915_gem_request *tmp;
|
||||
|
||||
lockdep_assert_held(&engine->dev->struct_mutex);
|
||||
@ -1466,7 +1464,7 @@ i915_wait_request(struct drm_i915_gem_request *req)
|
||||
|
||||
BUG_ON(req == NULL);
|
||||
|
||||
dev = req->ring->dev;
|
||||
dev = req->engine->dev;
|
||||
dev_priv = dev->dev_private;
|
||||
interruptible = dev_priv->mm.interruptible;
|
||||
|
||||
@ -1505,14 +1503,14 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
i = obj->last_write_req->ring->id;
|
||||
i = obj->last_write_req->engine->id;
|
||||
if (obj->last_read_req[i] == obj->last_write_req)
|
||||
i915_gem_object_retire__read(obj, i);
|
||||
else
|
||||
i915_gem_object_retire__write(obj);
|
||||
}
|
||||
} else {
|
||||
for (i = 0; i < I915_NUM_RINGS; i++) {
|
||||
for (i = 0; i < I915_NUM_ENGINES; i++) {
|
||||
if (obj->last_read_req[i] == NULL)
|
||||
continue;
|
||||
|
||||
@ -1532,7 +1530,7 @@ static void
|
||||
i915_gem_object_retire_request(struct drm_i915_gem_object *obj,
|
||||
struct drm_i915_gem_request *req)
|
||||
{
|
||||
int ring = req->ring->id;
|
||||
int ring = req->engine->id;
|
||||
|
||||
if (obj->last_read_req[ring] == req)
|
||||
i915_gem_object_retire__read(obj, ring);
|
||||
@ -1552,7 +1550,7 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
|
||||
{
|
||||
struct drm_device *dev = obj->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_gem_request *requests[I915_NUM_RINGS];
|
||||
struct drm_i915_gem_request *requests[I915_NUM_ENGINES];
|
||||
unsigned reset_counter;
|
||||
int ret, i, n = 0;
|
||||
|
||||
@ -1577,7 +1575,7 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
|
||||
|
||||
requests[n++] = i915_gem_request_reference(req);
|
||||
} else {
|
||||
for (i = 0; i < I915_NUM_RINGS; i++) {
|
||||
for (i = 0; i < I915_NUM_ENGINES; i++) {
|
||||
struct drm_i915_gem_request *req;
|
||||
|
||||
req = obj->last_read_req[i];
|
||||
@ -1827,7 +1825,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
|
||||
}
|
||||
|
||||
/* Use a partial view if the object is bigger than the aperture. */
|
||||
if (obj->base.size >= dev_priv->gtt.mappable_end &&
|
||||
if (obj->base.size >= dev_priv->ggtt.mappable_end &&
|
||||
obj->tiling_mode == I915_TILING_NONE) {
|
||||
static const unsigned int chunk_size = 256; // 1 MiB
|
||||
|
||||
@ -1855,7 +1853,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
|
||||
goto unpin;
|
||||
|
||||
/* Finally, remap it using the new GTT offset */
|
||||
pfn = dev_priv->gtt.mappable_base +
|
||||
pfn = dev_priv->ggtt.mappable_base +
|
||||
i915_gem_obj_ggtt_offset_view(obj, &view);
|
||||
pfn >>= PAGE_SHIFT;
|
||||
|
||||
@ -2404,17 +2402,17 @@ void i915_vma_move_to_active(struct i915_vma *vma,
|
||||
struct drm_i915_gem_request *req)
|
||||
{
|
||||
struct drm_i915_gem_object *obj = vma->obj;
|
||||
struct intel_engine_cs *ring;
|
||||
struct intel_engine_cs *engine;
|
||||
|
||||
ring = i915_gem_request_get_ring(req);
|
||||
engine = i915_gem_request_get_engine(req);
|
||||
|
||||
/* Add a reference if we're newly entering the active list. */
|
||||
if (obj->active == 0)
|
||||
drm_gem_object_reference(&obj->base);
|
||||
obj->active |= intel_ring_flag(ring);
|
||||
obj->active |= intel_engine_flag(engine);
|
||||
|
||||
list_move_tail(&obj->ring_list[ring->id], &ring->active_list);
|
||||
i915_gem_request_assign(&obj->last_read_req[ring->id], req);
|
||||
list_move_tail(&obj->engine_list[engine->id], &engine->active_list);
|
||||
i915_gem_request_assign(&obj->last_read_req[engine->id], req);
|
||||
|
||||
list_move_tail(&vma->vm_link, &vma->vm->active_list);
|
||||
}
|
||||
@ -2423,7 +2421,7 @@ static void
|
||||
i915_gem_object_retire__write(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
RQ_BUG_ON(obj->last_write_req == NULL);
|
||||
RQ_BUG_ON(!(obj->active & intel_ring_flag(obj->last_write_req->ring)));
|
||||
RQ_BUG_ON(!(obj->active & intel_engine_flag(obj->last_write_req->engine)));
|
||||
|
||||
i915_gem_request_assign(&obj->last_write_req, NULL);
|
||||
intel_fb_obj_flush(obj, true, ORIGIN_CS);
|
||||
@ -2437,10 +2435,10 @@ i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring)
|
||||
RQ_BUG_ON(obj->last_read_req[ring] == NULL);
|
||||
RQ_BUG_ON(!(obj->active & (1 << ring)));
|
||||
|
||||
list_del_init(&obj->ring_list[ring]);
|
||||
list_del_init(&obj->engine_list[ring]);
|
||||
i915_gem_request_assign(&obj->last_read_req[ring], NULL);
|
||||
|
||||
if (obj->last_write_req && obj->last_write_req->ring->id == ring)
|
||||
if (obj->last_write_req && obj->last_write_req->engine->id == ring)
|
||||
i915_gem_object_retire__write(obj);
|
||||
|
||||
obj->active &= ~(1 << ring);
|
||||
@ -2467,23 +2465,23 @@ static int
|
||||
i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_engine_cs *ring;
|
||||
int ret, i, j;
|
||||
struct intel_engine_cs *engine;
|
||||
int ret, j;
|
||||
|
||||
/* Carefully retire all requests without writing to the rings */
|
||||
for_each_ring(ring, dev_priv, i) {
|
||||
ret = intel_ring_idle(ring);
|
||||
for_each_engine(engine, dev_priv) {
|
||||
ret = intel_engine_idle(engine);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
i915_gem_retire_requests(dev);
|
||||
|
||||
/* Finally reset hw state */
|
||||
for_each_ring(ring, dev_priv, i) {
|
||||
intel_ring_init_seqno(ring, seqno);
|
||||
for_each_engine(engine, dev_priv) {
|
||||
intel_ring_init_seqno(engine, seqno);
|
||||
|
||||
for (j = 0; j < ARRAY_SIZE(ring->semaphore.sync_seqno); j++)
|
||||
ring->semaphore.sync_seqno[j] = 0;
|
||||
for (j = 0; j < ARRAY_SIZE(engine->semaphore.sync_seqno); j++)
|
||||
engine->semaphore.sync_seqno[j] = 0;
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -2542,7 +2540,7 @@ void __i915_add_request(struct drm_i915_gem_request *request,
|
||||
struct drm_i915_gem_object *obj,
|
||||
bool flush_caches)
|
||||
{
|
||||
struct intel_engine_cs *ring;
|
||||
struct intel_engine_cs *engine;
|
||||
struct drm_i915_private *dev_priv;
|
||||
struct intel_ringbuffer *ringbuf;
|
||||
u32 request_start;
|
||||
@ -2551,8 +2549,8 @@ void __i915_add_request(struct drm_i915_gem_request *request,
|
||||
if (WARN_ON(request == NULL))
|
||||
return;
|
||||
|
||||
ring = request->ring;
|
||||
dev_priv = ring->dev->dev_private;
|
||||
engine = request->engine;
|
||||
dev_priv = request->i915;
|
||||
ringbuf = request->ringbuf;
|
||||
|
||||
/*
|
||||
@ -2587,9 +2585,9 @@ void __i915_add_request(struct drm_i915_gem_request *request,
|
||||
request->postfix = intel_ring_get_tail(ringbuf);
|
||||
|
||||
if (i915.enable_execlists)
|
||||
ret = ring->emit_request(request);
|
||||
ret = engine->emit_request(request);
|
||||
else {
|
||||
ret = ring->add_request(request);
|
||||
ret = engine->add_request(request);
|
||||
|
||||
request->tail = intel_ring_get_tail(ringbuf);
|
||||
}
|
||||
@ -2607,13 +2605,13 @@ void __i915_add_request(struct drm_i915_gem_request *request,
|
||||
request->batch_obj = obj;
|
||||
|
||||
request->emitted_jiffies = jiffies;
|
||||
request->previous_seqno = ring->last_submitted_seqno;
|
||||
ring->last_submitted_seqno = request->seqno;
|
||||
list_add_tail(&request->list, &ring->request_list);
|
||||
request->previous_seqno = engine->last_submitted_seqno;
|
||||
engine->last_submitted_seqno = request->seqno;
|
||||
list_add_tail(&request->list, &engine->request_list);
|
||||
|
||||
trace_i915_gem_request_add(request);
|
||||
|
||||
i915_queue_hangcheck(ring->dev);
|
||||
i915_queue_hangcheck(engine->dev);
|
||||
|
||||
queue_delayed_work(dev_priv->wq,
|
||||
&dev_priv->mm.retire_work,
|
||||
@ -2680,7 +2678,7 @@ void i915_gem_request_free(struct kref *req_ref)
|
||||
|
||||
if (ctx) {
|
||||
if (i915.enable_execlists && ctx != req->i915->kernel_context)
|
||||
intel_lr_context_unpin(ctx, req->ring);
|
||||
intel_lr_context_unpin(ctx, req->engine);
|
||||
|
||||
i915_gem_context_unreference(ctx);
|
||||
}
|
||||
@ -2689,11 +2687,11 @@ void i915_gem_request_free(struct kref *req_ref)
|
||||
}
|
||||
|
||||
static inline int
|
||||
__i915_gem_request_alloc(struct intel_engine_cs *ring,
|
||||
__i915_gem_request_alloc(struct intel_engine_cs *engine,
|
||||
struct intel_context *ctx,
|
||||
struct drm_i915_gem_request **req_out)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(ring->dev);
|
||||
struct drm_i915_private *dev_priv = to_i915(engine->dev);
|
||||
struct drm_i915_gem_request *req;
|
||||
int ret;
|
||||
|
||||
@ -2706,13 +2704,13 @@ __i915_gem_request_alloc(struct intel_engine_cs *ring,
|
||||
if (req == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = i915_gem_get_seqno(ring->dev, &req->seqno);
|
||||
ret = i915_gem_get_seqno(engine->dev, &req->seqno);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
kref_init(&req->ref);
|
||||
req->i915 = dev_priv;
|
||||
req->ring = ring;
|
||||
req->engine = engine;
|
||||
req->ctx = ctx;
|
||||
i915_gem_context_reference(req->ctx);
|
||||
|
||||
@ -2787,11 +2785,11 @@ void i915_gem_request_cancel(struct drm_i915_gem_request *req)
|
||||
}
|
||||
|
||||
struct drm_i915_gem_request *
|
||||
i915_gem_find_active_request(struct intel_engine_cs *ring)
|
||||
i915_gem_find_active_request(struct intel_engine_cs *engine)
|
||||
{
|
||||
struct drm_i915_gem_request *request;
|
||||
|
||||
list_for_each_entry(request, &ring->request_list, list) {
|
||||
list_for_each_entry(request, &engine->request_list, list) {
|
||||
if (i915_gem_request_completed(request, false))
|
||||
continue;
|
||||
|
||||
@ -2801,38 +2799,38 @@ i915_gem_find_active_request(struct intel_engine_cs *ring)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
|
||||
struct intel_engine_cs *ring)
|
||||
static void i915_gem_reset_engine_status(struct drm_i915_private *dev_priv,
|
||||
struct intel_engine_cs *engine)
|
||||
{
|
||||
struct drm_i915_gem_request *request;
|
||||
bool ring_hung;
|
||||
|
||||
request = i915_gem_find_active_request(ring);
|
||||
request = i915_gem_find_active_request(engine);
|
||||
|
||||
if (request == NULL)
|
||||
return;
|
||||
|
||||
ring_hung = ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
|
||||
ring_hung = engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
|
||||
|
||||
i915_set_reset_status(dev_priv, request->ctx, ring_hung);
|
||||
|
||||
list_for_each_entry_continue(request, &ring->request_list, list)
|
||||
list_for_each_entry_continue(request, &engine->request_list, list)
|
||||
i915_set_reset_status(dev_priv, request->ctx, false);
|
||||
}
|
||||
|
||||
static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
|
||||
struct intel_engine_cs *ring)
|
||||
static void i915_gem_reset_engine_cleanup(struct drm_i915_private *dev_priv,
|
||||
struct intel_engine_cs *engine)
|
||||
{
|
||||
struct intel_ringbuffer *buffer;
|
||||
|
||||
while (!list_empty(&ring->active_list)) {
|
||||
while (!list_empty(&engine->active_list)) {
|
||||
struct drm_i915_gem_object *obj;
|
||||
|
||||
obj = list_first_entry(&ring->active_list,
|
||||
obj = list_first_entry(&engine->active_list,
|
||||
struct drm_i915_gem_object,
|
||||
ring_list[ring->id]);
|
||||
engine_list[engine->id]);
|
||||
|
||||
i915_gem_object_retire__read(obj, ring->id);
|
||||
i915_gem_object_retire__read(obj, engine->id);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2842,14 +2840,14 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
|
||||
*/
|
||||
|
||||
if (i915.enable_execlists) {
|
||||
spin_lock_irq(&ring->execlist_lock);
|
||||
spin_lock_irq(&engine->execlist_lock);
|
||||
|
||||
/* list_splice_tail_init checks for empty lists */
|
||||
list_splice_tail_init(&ring->execlist_queue,
|
||||
&ring->execlist_retired_req_list);
|
||||
list_splice_tail_init(&engine->execlist_queue,
|
||||
&engine->execlist_retired_req_list);
|
||||
|
||||
spin_unlock_irq(&ring->execlist_lock);
|
||||
intel_execlists_retire_requests(ring);
|
||||
spin_unlock_irq(&engine->execlist_lock);
|
||||
intel_execlists_retire_requests(engine);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2859,10 +2857,10 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
|
||||
* implicit references on things like e.g. ppgtt address spaces through
|
||||
* the request.
|
||||
*/
|
||||
while (!list_empty(&ring->request_list)) {
|
||||
while (!list_empty(&engine->request_list)) {
|
||||
struct drm_i915_gem_request *request;
|
||||
|
||||
request = list_first_entry(&ring->request_list,
|
||||
request = list_first_entry(&engine->request_list,
|
||||
struct drm_i915_gem_request,
|
||||
list);
|
||||
|
||||
@ -2876,7 +2874,7 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
|
||||
* upon reset is less than when we start. Do one more pass over
|
||||
* all the ringbuffers to reset last_retired_head.
|
||||
*/
|
||||
list_for_each_entry(buffer, &ring->buffers, link) {
|
||||
list_for_each_entry(buffer, &engine->buffers, link) {
|
||||
buffer->last_retired_head = buffer->tail;
|
||||
intel_ring_update_space(buffer);
|
||||
}
|
||||
@ -2885,19 +2883,18 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
|
||||
void i915_gem_reset(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_engine_cs *ring;
|
||||
int i;
|
||||
struct intel_engine_cs *engine;
|
||||
|
||||
/*
|
||||
* Before we free the objects from the requests, we need to inspect
|
||||
* them for finding the guilty party. As the requests only borrow
|
||||
* their reference to the objects, the inspection must be done first.
|
||||
*/
|
||||
for_each_ring(ring, dev_priv, i)
|
||||
i915_gem_reset_ring_status(dev_priv, ring);
|
||||
for_each_engine(engine, dev_priv)
|
||||
i915_gem_reset_engine_status(dev_priv, engine);
|
||||
|
||||
for_each_ring(ring, dev_priv, i)
|
||||
i915_gem_reset_ring_cleanup(dev_priv, ring);
|
||||
for_each_engine(engine, dev_priv)
|
||||
i915_gem_reset_engine_cleanup(dev_priv, engine);
|
||||
|
||||
i915_gem_context_reset(dev);
|
||||
|
||||
@ -2910,19 +2907,19 @@ void i915_gem_reset(struct drm_device *dev)
|
||||
* This function clears the request list as sequence numbers are passed.
|
||||
*/
|
||||
void
|
||||
i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
|
||||
i915_gem_retire_requests_ring(struct intel_engine_cs *engine)
|
||||
{
|
||||
WARN_ON(i915_verify_lists(ring->dev));
|
||||
WARN_ON(i915_verify_lists(engine->dev));
|
||||
|
||||
/* Retire requests first as we use it above for the early return.
|
||||
* If we retire requests last, we may use a later seqno and so clear
|
||||
* the requests lists without clearing the active list, leading to
|
||||
* confusion.
|
||||
*/
|
||||
while (!list_empty(&ring->request_list)) {
|
||||
while (!list_empty(&engine->request_list)) {
|
||||
struct drm_i915_gem_request *request;
|
||||
|
||||
request = list_first_entry(&ring->request_list,
|
||||
request = list_first_entry(&engine->request_list,
|
||||
struct drm_i915_gem_request,
|
||||
list);
|
||||
|
||||
@ -2936,45 +2933,44 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
|
||||
* by the ringbuffer to the flushing/inactive lists as appropriate,
|
||||
* before we free the context associated with the requests.
|
||||
*/
|
||||
while (!list_empty(&ring->active_list)) {
|
||||
while (!list_empty(&engine->active_list)) {
|
||||
struct drm_i915_gem_object *obj;
|
||||
|
||||
obj = list_first_entry(&ring->active_list,
|
||||
struct drm_i915_gem_object,
|
||||
ring_list[ring->id]);
|
||||
obj = list_first_entry(&engine->active_list,
|
||||
struct drm_i915_gem_object,
|
||||
engine_list[engine->id]);
|
||||
|
||||
if (!list_empty(&obj->last_read_req[ring->id]->list))
|
||||
if (!list_empty(&obj->last_read_req[engine->id]->list))
|
||||
break;
|
||||
|
||||
i915_gem_object_retire__read(obj, ring->id);
|
||||
i915_gem_object_retire__read(obj, engine->id);
|
||||
}
|
||||
|
||||
if (unlikely(ring->trace_irq_req &&
|
||||
i915_gem_request_completed(ring->trace_irq_req, true))) {
|
||||
ring->irq_put(ring);
|
||||
i915_gem_request_assign(&ring->trace_irq_req, NULL);
|
||||
if (unlikely(engine->trace_irq_req &&
|
||||
i915_gem_request_completed(engine->trace_irq_req, true))) {
|
||||
engine->irq_put(engine);
|
||||
i915_gem_request_assign(&engine->trace_irq_req, NULL);
|
||||
}
|
||||
|
||||
WARN_ON(i915_verify_lists(ring->dev));
|
||||
WARN_ON(i915_verify_lists(engine->dev));
|
||||
}
|
||||
|
||||
bool
|
||||
i915_gem_retire_requests(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_engine_cs *ring;
|
||||
struct intel_engine_cs *engine;
|
||||
bool idle = true;
|
||||
int i;
|
||||
|
||||
for_each_ring(ring, dev_priv, i) {
|
||||
i915_gem_retire_requests_ring(ring);
|
||||
idle &= list_empty(&ring->request_list);
|
||||
for_each_engine(engine, dev_priv) {
|
||||
i915_gem_retire_requests_ring(engine);
|
||||
idle &= list_empty(&engine->request_list);
|
||||
if (i915.enable_execlists) {
|
||||
spin_lock_irq(&ring->execlist_lock);
|
||||
idle &= list_empty(&ring->execlist_queue);
|
||||
spin_unlock_irq(&ring->execlist_lock);
|
||||
spin_lock_irq(&engine->execlist_lock);
|
||||
idle &= list_empty(&engine->execlist_queue);
|
||||
spin_unlock_irq(&engine->execlist_lock);
|
||||
|
||||
intel_execlists_retire_requests(ring);
|
||||
intel_execlists_retire_requests(engine);
|
||||
}
|
||||
}
|
||||
|
||||
@ -3011,25 +3007,21 @@ i915_gem_idle_work_handler(struct work_struct *work)
|
||||
struct drm_i915_private *dev_priv =
|
||||
container_of(work, typeof(*dev_priv), mm.idle_work.work);
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct intel_engine_cs *ring;
|
||||
int i;
|
||||
struct intel_engine_cs *engine;
|
||||
|
||||
for_each_ring(ring, dev_priv, i)
|
||||
if (!list_empty(&ring->request_list))
|
||||
for_each_engine(engine, dev_priv)
|
||||
if (!list_empty(&engine->request_list))
|
||||
return;
|
||||
|
||||
/* we probably should sync with hangcheck here, using cancel_work_sync.
|
||||
* Also locking seems to be fubar here, ring->request_list is protected
|
||||
* Also locking seems to be fubar here, engine->request_list is protected
|
||||
* by dev->struct_mutex. */
|
||||
|
||||
intel_mark_idle(dev);
|
||||
|
||||
if (mutex_trylock(&dev->struct_mutex)) {
|
||||
struct intel_engine_cs *ring;
|
||||
int i;
|
||||
|
||||
for_each_ring(ring, dev_priv, i)
|
||||
i915_gem_batch_pool_fini(&ring->batch_pool);
|
||||
for_each_engine(engine, dev_priv)
|
||||
i915_gem_batch_pool_fini(&engine->batch_pool);
|
||||
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
}
|
||||
@ -3048,7 +3040,7 @@ i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
|
||||
if (!obj->active)
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < I915_NUM_RINGS; i++) {
|
||||
for (i = 0; i < I915_NUM_ENGINES; i++) {
|
||||
struct drm_i915_gem_request *req;
|
||||
|
||||
req = obj->last_read_req[i];
|
||||
@ -3096,7 +3088,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_gem_wait *args = data;
|
||||
struct drm_i915_gem_object *obj;
|
||||
struct drm_i915_gem_request *req[I915_NUM_RINGS];
|
||||
struct drm_i915_gem_request *req[I915_NUM_ENGINES];
|
||||
unsigned reset_counter;
|
||||
int i, n = 0;
|
||||
int ret;
|
||||
@ -3133,7 +3125,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
|
||||
drm_gem_object_unreference(&obj->base);
|
||||
reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
|
||||
|
||||
for (i = 0; i < I915_NUM_RINGS; i++) {
|
||||
for (i = 0; i < I915_NUM_ENGINES; i++) {
|
||||
if (obj->last_read_req[i] == NULL)
|
||||
continue;
|
||||
|
||||
@ -3166,7 +3158,7 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj,
|
||||
struct intel_engine_cs *from;
|
||||
int ret;
|
||||
|
||||
from = i915_gem_request_get_ring(from_req);
|
||||
from = i915_gem_request_get_engine(from_req);
|
||||
if (to == from)
|
||||
return 0;
|
||||
|
||||
@ -3260,7 +3252,7 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj,
|
||||
struct drm_i915_gem_request **to_req)
|
||||
{
|
||||
const bool readonly = obj->base.pending_write_domain == 0;
|
||||
struct drm_i915_gem_request *req[I915_NUM_RINGS];
|
||||
struct drm_i915_gem_request *req[I915_NUM_ENGINES];
|
||||
int ret, i, n;
|
||||
|
||||
if (!obj->active)
|
||||
@ -3274,7 +3266,7 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj,
|
||||
if (obj->last_write_req)
|
||||
req[n++] = obj->last_write_req;
|
||||
} else {
|
||||
for (i = 0; i < I915_NUM_RINGS; i++)
|
||||
for (i = 0; i < I915_NUM_ENGINES; i++)
|
||||
if (obj->last_read_req[i])
|
||||
req[n++] = obj->last_read_req[i];
|
||||
}
|
||||
@ -3391,15 +3383,15 @@ int __i915_vma_unbind_no_wait(struct i915_vma *vma)
|
||||
int i915_gpu_idle(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_engine_cs *ring;
|
||||
int ret, i;
|
||||
struct intel_engine_cs *engine;
|
||||
int ret;
|
||||
|
||||
/* Flush everything onto the inactive list. */
|
||||
for_each_ring(ring, dev_priv, i) {
|
||||
for_each_engine(engine, dev_priv) {
|
||||
if (!i915.enable_execlists) {
|
||||
struct drm_i915_gem_request *req;
|
||||
|
||||
req = i915_gem_request_alloc(ring, NULL);
|
||||
req = i915_gem_request_alloc(engine, NULL);
|
||||
if (IS_ERR(req))
|
||||
return PTR_ERR(req);
|
||||
|
||||
@ -3412,7 +3404,7 @@ int i915_gpu_idle(struct drm_device *dev)
|
||||
i915_add_request_no_flush(req);
|
||||
}
|
||||
|
||||
ret = intel_ring_idle(ring);
|
||||
ret = intel_engine_idle(engine);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
@ -3513,7 +3505,7 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
|
||||
start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
|
||||
end = vm->total;
|
||||
if (flags & PIN_MAPPABLE)
|
||||
end = min_t(u64, end, dev_priv->gtt.mappable_end);
|
||||
end = min_t(u64, end, dev_priv->ggtt.mappable_end);
|
||||
if (flags & PIN_ZONE_4G)
|
||||
end = min_t(u64, end, (1ULL << 32) - PAGE_SIZE);
|
||||
|
||||
@ -3774,7 +3766,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
|
||||
vma = i915_gem_obj_to_ggtt(obj);
|
||||
if (vma && drm_mm_node_allocated(&vma->node) && !obj->active)
|
||||
list_move_tail(&vma->vm_link,
|
||||
&to_i915(obj->base.dev)->gtt.base.inactive_list);
|
||||
&to_i915(obj->base.dev)->ggtt.base.inactive_list);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -3949,7 +3941,7 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
|
||||
* cacheline, whereas normally such cachelines would get
|
||||
* invalidated.
|
||||
*/
|
||||
if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
|
||||
if (!HAS_LLC(dev) && !HAS_SNOOP(dev))
|
||||
return -ENODEV;
|
||||
|
||||
level = I915_CACHE_LLC;
|
||||
@ -4211,7 +4203,7 @@ void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
|
||||
(vma->node.start & (fence_alignment - 1)) == 0);
|
||||
|
||||
mappable = (vma->node.start + fence_size <=
|
||||
to_i915(obj->base.dev)->gtt.mappable_end);
|
||||
to_i915(obj->base.dev)->ggtt.mappable_end);
|
||||
|
||||
obj->map_and_fenceable = mappable && fenceable;
|
||||
}
|
||||
@ -4359,15 +4351,15 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
|
||||
if (obj->active) {
|
||||
int i;
|
||||
|
||||
for (i = 0; i < I915_NUM_RINGS; i++) {
|
||||
for (i = 0; i < I915_NUM_ENGINES; i++) {
|
||||
struct drm_i915_gem_request *req;
|
||||
|
||||
req = obj->last_read_req[i];
|
||||
if (req)
|
||||
args->busy |= 1 << (16 + req->ring->exec_id);
|
||||
args->busy |= 1 << (16 + req->engine->exec_id);
|
||||
}
|
||||
if (obj->last_write_req)
|
||||
args->busy |= obj->last_write_req->ring->exec_id;
|
||||
args->busy |= obj->last_write_req->engine->exec_id;
|
||||
}
|
||||
|
||||
unref:
|
||||
@ -4447,8 +4439,8 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
|
||||
int i;
|
||||
|
||||
INIT_LIST_HEAD(&obj->global_list);
|
||||
for (i = 0; i < I915_NUM_RINGS; i++)
|
||||
INIT_LIST_HEAD(&obj->ring_list[i]);
|
||||
for (i = 0; i < I915_NUM_ENGINES; i++)
|
||||
INIT_LIST_HEAD(&obj->engine_list[i]);
|
||||
INIT_LIST_HEAD(&obj->obj_exec_link);
|
||||
INIT_LIST_HEAD(&obj->vma_list);
|
||||
INIT_LIST_HEAD(&obj->batch_pool_link);
|
||||
@ -4653,14 +4645,13 @@ void i915_gem_vma_destroy(struct i915_vma *vma)
|
||||
}
|
||||
|
||||
static void
|
||||
i915_gem_stop_ringbuffers(struct drm_device *dev)
|
||||
i915_gem_stop_engines(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_engine_cs *ring;
|
||||
int i;
|
||||
struct intel_engine_cs *engine;
|
||||
|
||||
for_each_ring(ring, dev_priv, i)
|
||||
dev_priv->gt.stop_ring(ring);
|
||||
for_each_engine(engine, dev_priv)
|
||||
dev_priv->gt.stop_engine(engine);
|
||||
}
|
||||
|
||||
int
|
||||
@ -4676,7 +4667,7 @@ i915_gem_suspend(struct drm_device *dev)
|
||||
|
||||
i915_gem_retire_requests(dev);
|
||||
|
||||
i915_gem_stop_ringbuffers(dev);
|
||||
i915_gem_stop_engines(dev);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
|
||||
@ -4697,8 +4688,8 @@ err:
|
||||
|
||||
int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice)
|
||||
{
|
||||
struct intel_engine_cs *ring = req->ring;
|
||||
struct drm_device *dev = ring->dev;
|
||||
struct intel_engine_cs *engine = req->engine;
|
||||
struct drm_device *dev = engine->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 *remap_info = dev_priv->l3_parity.remap_info[slice];
|
||||
int i, ret;
|
||||
@ -4716,12 +4707,12 @@ int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice)
|
||||
* at initialization time.
|
||||
*/
|
||||
for (i = 0; i < GEN7_L3LOG_SIZE / 4; i++) {
|
||||
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
|
||||
intel_ring_emit_reg(ring, GEN7_L3LOG(slice, i));
|
||||
intel_ring_emit(ring, remap_info[i]);
|
||||
intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
|
||||
intel_ring_emit_reg(engine, GEN7_L3LOG(slice, i));
|
||||
intel_ring_emit(engine, remap_info[i]);
|
||||
}
|
||||
|
||||
intel_ring_advance(ring);
|
||||
intel_ring_advance(engine);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -4778,7 +4769,7 @@ static void init_unused_rings(struct drm_device *dev)
|
||||
}
|
||||
}
|
||||
|
||||
int i915_gem_init_rings(struct drm_device *dev)
|
||||
int i915_gem_init_engines(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int ret;
|
||||
@ -4814,13 +4805,13 @@ int i915_gem_init_rings(struct drm_device *dev)
|
||||
return 0;
|
||||
|
||||
cleanup_vebox_ring:
|
||||
intel_cleanup_ring_buffer(&dev_priv->ring[VECS]);
|
||||
intel_cleanup_engine(&dev_priv->engine[VECS]);
|
||||
cleanup_blt_ring:
|
||||
intel_cleanup_ring_buffer(&dev_priv->ring[BCS]);
|
||||
intel_cleanup_engine(&dev_priv->engine[BCS]);
|
||||
cleanup_bsd_ring:
|
||||
intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
|
||||
intel_cleanup_engine(&dev_priv->engine[VCS]);
|
||||
cleanup_render_ring:
|
||||
intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
|
||||
intel_cleanup_engine(&dev_priv->engine[RCS]);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -4829,8 +4820,8 @@ int
|
||||
i915_gem_init_hw(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_engine_cs *ring;
|
||||
int ret, i, j;
|
||||
struct intel_engine_cs *engine;
|
||||
int ret, j;
|
||||
|
||||
if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
|
||||
return -EIO;
|
||||
@ -4876,8 +4867,8 @@ i915_gem_init_hw(struct drm_device *dev)
|
||||
}
|
||||
|
||||
/* Need to do basic initialisation of all rings first: */
|
||||
for_each_ring(ring, dev_priv, i) {
|
||||
ret = ring->init_hw(ring);
|
||||
for_each_engine(engine, dev_priv) {
|
||||
ret = engine->init_hw(engine);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
@ -4901,34 +4892,36 @@ i915_gem_init_hw(struct drm_device *dev)
|
||||
goto out;
|
||||
|
||||
/* Now it is safe to go back round and do everything else: */
|
||||
for_each_ring(ring, dev_priv, i) {
|
||||
for_each_engine(engine, dev_priv) {
|
||||
struct drm_i915_gem_request *req;
|
||||
|
||||
req = i915_gem_request_alloc(ring, NULL);
|
||||
req = i915_gem_request_alloc(engine, NULL);
|
||||
if (IS_ERR(req)) {
|
||||
ret = PTR_ERR(req);
|
||||
i915_gem_cleanup_ringbuffer(dev);
|
||||
i915_gem_cleanup_engines(dev);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (ring->id == RCS) {
|
||||
if (engine->id == RCS) {
|
||||
for (j = 0; j < NUM_L3_SLICES(dev); j++)
|
||||
i915_gem_l3_remap(req, j);
|
||||
}
|
||||
|
||||
ret = i915_ppgtt_init_ring(req);
|
||||
if (ret && ret != -EIO) {
|
||||
DRM_ERROR("PPGTT enable ring #%d failed %d\n", i, ret);
|
||||
DRM_ERROR("PPGTT enable %s failed %d\n",
|
||||
engine->name, ret);
|
||||
i915_gem_request_cancel(req);
|
||||
i915_gem_cleanup_ringbuffer(dev);
|
||||
i915_gem_cleanup_engines(dev);
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = i915_gem_context_enable(req);
|
||||
if (ret && ret != -EIO) {
|
||||
DRM_ERROR("Context enable ring #%d failed %d\n", i, ret);
|
||||
DRM_ERROR("Context enable %s failed %d\n",
|
||||
engine->name, ret);
|
||||
i915_gem_request_cancel(req);
|
||||
i915_gem_cleanup_ringbuffer(dev);
|
||||
i915_gem_cleanup_engines(dev);
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -4952,14 +4945,14 @@ int i915_gem_init(struct drm_device *dev)
|
||||
|
||||
if (!i915.enable_execlists) {
|
||||
dev_priv->gt.execbuf_submit = i915_gem_ringbuffer_submission;
|
||||
dev_priv->gt.init_rings = i915_gem_init_rings;
|
||||
dev_priv->gt.cleanup_ring = intel_cleanup_ring_buffer;
|
||||
dev_priv->gt.stop_ring = intel_stop_ring_buffer;
|
||||
dev_priv->gt.init_engines = i915_gem_init_engines;
|
||||
dev_priv->gt.cleanup_engine = intel_cleanup_engine;
|
||||
dev_priv->gt.stop_engine = intel_stop_engine;
|
||||
} else {
|
||||
dev_priv->gt.execbuf_submit = intel_execlists_submission;
|
||||
dev_priv->gt.init_rings = intel_logical_rings_init;
|
||||
dev_priv->gt.cleanup_ring = intel_logical_ring_cleanup;
|
||||
dev_priv->gt.stop_ring = intel_logical_ring_stop;
|
||||
dev_priv->gt.init_engines = intel_logical_rings_init;
|
||||
dev_priv->gt.cleanup_engine = intel_logical_ring_cleanup;
|
||||
dev_priv->gt.stop_engine = intel_logical_ring_stop;
|
||||
}
|
||||
|
||||
/* This is just a security blanket to placate dragons.
|
||||
@ -4980,7 +4973,7 @@ int i915_gem_init(struct drm_device *dev)
|
||||
if (ret)
|
||||
goto out_unlock;
|
||||
|
||||
ret = dev_priv->gt.init_rings(dev);
|
||||
ret = dev_priv->gt.init_engines(dev);
|
||||
if (ret)
|
||||
goto out_unlock;
|
||||
|
||||
@ -5003,29 +4996,52 @@ out_unlock:
|
||||
}
|
||||
|
||||
void
|
||||
i915_gem_cleanup_ringbuffer(struct drm_device *dev)
|
||||
i915_gem_cleanup_engines(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_engine_cs *ring;
|
||||
int i;
|
||||
struct intel_engine_cs *engine;
|
||||
|
||||
for_each_ring(ring, dev_priv, i)
|
||||
dev_priv->gt.cleanup_ring(ring);
|
||||
for_each_engine(engine, dev_priv)
|
||||
dev_priv->gt.cleanup_engine(engine);
|
||||
|
||||
if (i915.enable_execlists)
|
||||
/*
|
||||
* Neither the BIOS, ourselves or any other kernel
|
||||
* expects the system to be in execlists mode on startup,
|
||||
* so we need to reset the GPU back to legacy mode.
|
||||
*/
|
||||
intel_gpu_reset(dev);
|
||||
if (i915.enable_execlists)
|
||||
/*
|
||||
* Neither the BIOS, ourselves or any other kernel
|
||||
* expects the system to be in execlists mode on startup,
|
||||
* so we need to reset the GPU back to legacy mode.
|
||||
*/
|
||||
intel_gpu_reset(dev, ALL_ENGINES);
|
||||
}
|
||||
|
||||
static void
|
||||
init_ring_lists(struct intel_engine_cs *ring)
|
||||
init_engine_lists(struct intel_engine_cs *engine)
|
||||
{
|
||||
INIT_LIST_HEAD(&ring->active_list);
|
||||
INIT_LIST_HEAD(&ring->request_list);
|
||||
INIT_LIST_HEAD(&engine->active_list);
|
||||
INIT_LIST_HEAD(&engine->request_list);
|
||||
}
|
||||
|
||||
void
|
||||
i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
|
||||
if (INTEL_INFO(dev_priv)->gen >= 7 && !IS_VALLEYVIEW(dev_priv) &&
|
||||
!IS_CHERRYVIEW(dev_priv))
|
||||
dev_priv->num_fence_regs = 32;
|
||||
else if (INTEL_INFO(dev_priv)->gen >= 4 || IS_I945G(dev_priv) ||
|
||||
IS_I945GM(dev_priv) || IS_G33(dev_priv))
|
||||
dev_priv->num_fence_regs = 16;
|
||||
else
|
||||
dev_priv->num_fence_regs = 8;
|
||||
|
||||
if (intel_vgpu_active(dev))
|
||||
dev_priv->num_fence_regs =
|
||||
I915_READ(vgtif_reg(avail_rs.fence_num));
|
||||
|
||||
/* Initialize fence registers to zero */
|
||||
i915_gem_restore_fences(dev);
|
||||
|
||||
i915_gem_detect_bit_6_swizzle(dev);
|
||||
}
|
||||
|
||||
void
|
||||
@ -5055,8 +5071,8 @@ i915_gem_load_init(struct drm_device *dev)
|
||||
INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
|
||||
INIT_LIST_HEAD(&dev_priv->mm.bound_list);
|
||||
INIT_LIST_HEAD(&dev_priv->mm.fence_list);
|
||||
for (i = 0; i < I915_NUM_RINGS; i++)
|
||||
init_ring_lists(&dev_priv->ring[i]);
|
||||
for (i = 0; i < I915_NUM_ENGINES; i++)
|
||||
init_engine_lists(&dev_priv->engine[i]);
|
||||
for (i = 0; i < I915_MAX_NUM_FENCES; i++)
|
||||
INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
|
||||
INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
|
||||
@ -5067,17 +5083,6 @@ i915_gem_load_init(struct drm_device *dev)
|
||||
|
||||
dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
|
||||
|
||||
if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev))
|
||||
dev_priv->num_fence_regs = 32;
|
||||
else if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
|
||||
dev_priv->num_fence_regs = 16;
|
||||
else
|
||||
dev_priv->num_fence_regs = 8;
|
||||
|
||||
if (intel_vgpu_active(dev))
|
||||
dev_priv->num_fence_regs =
|
||||
I915_READ(vgtif_reg(avail_rs.fence_num));
|
||||
|
||||
/*
|
||||
* Set initial sequence number for requests.
|
||||
* Using this number allows the wraparound to happen early,
|
||||
@ -5086,11 +5091,8 @@ i915_gem_load_init(struct drm_device *dev)
|
||||
dev_priv->next_seqno = ((u32)~0 - 0x1100);
|
||||
dev_priv->last_seqno = ((u32)~0 - 0x1101);
|
||||
|
||||
/* Initialize fence registers to zero */
|
||||
INIT_LIST_HEAD(&dev_priv->mm.fence_list);
|
||||
i915_gem_restore_fences(dev);
|
||||
|
||||
i915_gem_detect_bit_6_swizzle(dev);
|
||||
init_waitqueue_head(&dev_priv->pending_flip_queue);
|
||||
|
||||
dev_priv->mm.interruptible = true;
|
||||
|
@ -345,12 +345,12 @@ void i915_gem_context_reset(struct drm_device *dev)
|
||||
intel_lr_context_reset(dev, ctx);
|
||||
}
|
||||
|
||||
for (i = 0; i < I915_NUM_RINGS; i++) {
|
||||
struct intel_engine_cs *ring = &dev_priv->ring[i];
|
||||
for (i = 0; i < I915_NUM_ENGINES; i++) {
|
||||
struct intel_engine_cs *engine = &dev_priv->engine[i];
|
||||
|
||||
if (ring->last_context) {
|
||||
i915_gem_context_unpin(ring->last_context, ring);
|
||||
ring->last_context = NULL;
|
||||
if (engine->last_context) {
|
||||
i915_gem_context_unpin(engine->last_context, engine);
|
||||
engine->last_context = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
@ -413,7 +413,7 @@ void i915_gem_context_fini(struct drm_device *dev)
|
||||
/* The only known way to stop the gpu from accessing the hw context is
|
||||
* to reset it. Do this as the very last operation to avoid confusing
|
||||
* other code, leading to spurious errors. */
|
||||
intel_gpu_reset(dev);
|
||||
intel_gpu_reset(dev, ALL_ENGINES);
|
||||
|
||||
/* When default context is created and switched to, base object refcount
|
||||
* will be 2 (+1 from object creation and +1 from do_switch()).
|
||||
@ -421,17 +421,17 @@ void i915_gem_context_fini(struct drm_device *dev)
|
||||
* to default context. So we need to unreference the base object once
|
||||
* to offset the do_switch part, so that i915_gem_context_unreference()
|
||||
* can then free the base object correctly. */
|
||||
WARN_ON(!dev_priv->ring[RCS].last_context);
|
||||
WARN_ON(!dev_priv->engine[RCS].last_context);
|
||||
|
||||
i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state);
|
||||
}
|
||||
|
||||
for (i = I915_NUM_RINGS; --i >= 0;) {
|
||||
struct intel_engine_cs *ring = &dev_priv->ring[i];
|
||||
for (i = I915_NUM_ENGINES; --i >= 0;) {
|
||||
struct intel_engine_cs *engine = &dev_priv->engine[i];
|
||||
|
||||
if (ring->last_context) {
|
||||
i915_gem_context_unpin(ring->last_context, ring);
|
||||
ring->last_context = NULL;
|
||||
if (engine->last_context) {
|
||||
i915_gem_context_unpin(engine->last_context, engine);
|
||||
engine->last_context = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
@ -441,14 +441,14 @@ void i915_gem_context_fini(struct drm_device *dev)
|
||||
|
||||
int i915_gem_context_enable(struct drm_i915_gem_request *req)
|
||||
{
|
||||
struct intel_engine_cs *ring = req->ring;
|
||||
struct intel_engine_cs *engine = req->engine;
|
||||
int ret;
|
||||
|
||||
if (i915.enable_execlists) {
|
||||
if (ring->init_context == NULL)
|
||||
if (engine->init_context == NULL)
|
||||
return 0;
|
||||
|
||||
ret = ring->init_context(req);
|
||||
ret = engine->init_context(req);
|
||||
} else
|
||||
ret = i915_switch_context(req);
|
||||
|
||||
@ -510,35 +510,35 @@ i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id)
|
||||
static inline int
|
||||
mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
|
||||
{
|
||||
struct intel_engine_cs *ring = req->ring;
|
||||
struct intel_engine_cs *engine = req->engine;
|
||||
u32 flags = hw_flags | MI_MM_SPACE_GTT;
|
||||
const int num_rings =
|
||||
/* Use an extended w/a on ivb+ if signalling from other rings */
|
||||
i915_semaphore_is_enabled(ring->dev) ?
|
||||
hweight32(INTEL_INFO(ring->dev)->ring_mask) - 1 :
|
||||
i915_semaphore_is_enabled(engine->dev) ?
|
||||
hweight32(INTEL_INFO(engine->dev)->ring_mask) - 1 :
|
||||
0;
|
||||
int len, i, ret;
|
||||
int len, ret;
|
||||
|
||||
/* w/a: If Flush TLB Invalidation Mode is enabled, driver must do a TLB
|
||||
* invalidation prior to MI_SET_CONTEXT. On GEN6 we don't set the value
|
||||
* explicitly, so we rely on the value at ring init, stored in
|
||||
* itlb_before_ctx_switch.
|
||||
*/
|
||||
if (IS_GEN6(ring->dev)) {
|
||||
ret = ring->flush(req, I915_GEM_GPU_DOMAINS, 0);
|
||||
if (IS_GEN6(engine->dev)) {
|
||||
ret = engine->flush(req, I915_GEM_GPU_DOMAINS, 0);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* These flags are for resource streamer on HSW+ */
|
||||
if (IS_HASWELL(ring->dev) || INTEL_INFO(ring->dev)->gen >= 8)
|
||||
if (IS_HASWELL(engine->dev) || INTEL_INFO(engine->dev)->gen >= 8)
|
||||
flags |= (HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN);
|
||||
else if (INTEL_INFO(ring->dev)->gen < 8)
|
||||
else if (INTEL_INFO(engine->dev)->gen < 8)
|
||||
flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN);
|
||||
|
||||
|
||||
len = 4;
|
||||
if (INTEL_INFO(ring->dev)->gen >= 7)
|
||||
if (INTEL_INFO(engine->dev)->gen >= 7)
|
||||
len += 2 + (num_rings ? 4*num_rings + 2 : 0);
|
||||
|
||||
ret = intel_ring_begin(req, len);
|
||||
@ -546,54 +546,61 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
|
||||
return ret;
|
||||
|
||||
/* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
|
||||
if (INTEL_INFO(ring->dev)->gen >= 7) {
|
||||
intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE);
|
||||
if (INTEL_INFO(engine->dev)->gen >= 7) {
|
||||
intel_ring_emit(engine, MI_ARB_ON_OFF | MI_ARB_DISABLE);
|
||||
if (num_rings) {
|
||||
struct intel_engine_cs *signaller;
|
||||
|
||||
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings));
|
||||
for_each_ring(signaller, to_i915(ring->dev), i) {
|
||||
if (signaller == ring)
|
||||
intel_ring_emit(engine,
|
||||
MI_LOAD_REGISTER_IMM(num_rings));
|
||||
for_each_engine(signaller, to_i915(engine->dev)) {
|
||||
if (signaller == engine)
|
||||
continue;
|
||||
|
||||
intel_ring_emit_reg(ring, RING_PSMI_CTL(signaller->mmio_base));
|
||||
intel_ring_emit(ring, _MASKED_BIT_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
|
||||
intel_ring_emit_reg(engine,
|
||||
RING_PSMI_CTL(signaller->mmio_base));
|
||||
intel_ring_emit(engine,
|
||||
_MASKED_BIT_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
intel_ring_emit(ring, MI_NOOP);
|
||||
intel_ring_emit(ring, MI_SET_CONTEXT);
|
||||
intel_ring_emit(ring, i915_gem_obj_ggtt_offset(req->ctx->legacy_hw_ctx.rcs_state) |
|
||||
intel_ring_emit(engine, MI_NOOP);
|
||||
intel_ring_emit(engine, MI_SET_CONTEXT);
|
||||
intel_ring_emit(engine,
|
||||
i915_gem_obj_ggtt_offset(req->ctx->legacy_hw_ctx.rcs_state) |
|
||||
flags);
|
||||
/*
|
||||
* w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
|
||||
* WaMiSetContext_Hang:snb,ivb,vlv
|
||||
*/
|
||||
intel_ring_emit(ring, MI_NOOP);
|
||||
intel_ring_emit(engine, MI_NOOP);
|
||||
|
||||
if (INTEL_INFO(ring->dev)->gen >= 7) {
|
||||
if (INTEL_INFO(engine->dev)->gen >= 7) {
|
||||
if (num_rings) {
|
||||
struct intel_engine_cs *signaller;
|
||||
|
||||
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings));
|
||||
for_each_ring(signaller, to_i915(ring->dev), i) {
|
||||
if (signaller == ring)
|
||||
intel_ring_emit(engine,
|
||||
MI_LOAD_REGISTER_IMM(num_rings));
|
||||
for_each_engine(signaller, to_i915(engine->dev)) {
|
||||
if (signaller == engine)
|
||||
continue;
|
||||
|
||||
intel_ring_emit_reg(ring, RING_PSMI_CTL(signaller->mmio_base));
|
||||
intel_ring_emit(ring, _MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
|
||||
intel_ring_emit_reg(engine,
|
||||
RING_PSMI_CTL(signaller->mmio_base));
|
||||
intel_ring_emit(engine,
|
||||
_MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
|
||||
}
|
||||
}
|
||||
intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE);
|
||||
intel_ring_emit(engine, MI_ARB_ON_OFF | MI_ARB_ENABLE);
|
||||
}
|
||||
|
||||
intel_ring_advance(ring);
|
||||
intel_ring_advance(engine);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline bool should_skip_switch(struct intel_engine_cs *ring,
|
||||
static inline bool should_skip_switch(struct intel_engine_cs *engine,
|
||||
struct intel_context *from,
|
||||
struct intel_context *to)
|
||||
{
|
||||
@ -601,42 +608,42 @@ static inline bool should_skip_switch(struct intel_engine_cs *ring,
|
||||
return false;
|
||||
|
||||
if (to->ppgtt && from == to &&
|
||||
!(intel_ring_flag(ring) & to->ppgtt->pd_dirty_rings))
|
||||
!(intel_engine_flag(engine) & to->ppgtt->pd_dirty_rings))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool
|
||||
needs_pd_load_pre(struct intel_engine_cs *ring, struct intel_context *to)
|
||||
needs_pd_load_pre(struct intel_engine_cs *engine, struct intel_context *to)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = ring->dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = engine->dev->dev_private;
|
||||
|
||||
if (!to->ppgtt)
|
||||
return false;
|
||||
|
||||
if (INTEL_INFO(ring->dev)->gen < 8)
|
||||
if (INTEL_INFO(engine->dev)->gen < 8)
|
||||
return true;
|
||||
|
||||
if (ring != &dev_priv->ring[RCS])
|
||||
if (engine != &dev_priv->engine[RCS])
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool
|
||||
needs_pd_load_post(struct intel_engine_cs *ring, struct intel_context *to,
|
||||
u32 hw_flags)
|
||||
needs_pd_load_post(struct intel_engine_cs *engine, struct intel_context *to,
|
||||
u32 hw_flags)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = ring->dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = engine->dev->dev_private;
|
||||
|
||||
if (!to->ppgtt)
|
||||
return false;
|
||||
|
||||
if (!IS_GEN8(ring->dev))
|
||||
if (!IS_GEN8(engine->dev))
|
||||
return false;
|
||||
|
||||
if (ring != &dev_priv->ring[RCS])
|
||||
if (engine != &dev_priv->engine[RCS])
|
||||
return false;
|
||||
|
||||
if (hw_flags & MI_RESTORE_INHIBIT)
|
||||
@ -648,25 +655,26 @@ needs_pd_load_post(struct intel_engine_cs *ring, struct intel_context *to,
|
||||
static int do_switch(struct drm_i915_gem_request *req)
|
||||
{
|
||||
struct intel_context *to = req->ctx;
|
||||
struct intel_engine_cs *ring = req->ring;
|
||||
struct drm_i915_private *dev_priv = ring->dev->dev_private;
|
||||
struct intel_context *from = ring->last_context;
|
||||
struct intel_engine_cs *engine = req->engine;
|
||||
struct drm_i915_private *dev_priv = req->i915;
|
||||
struct intel_context *from = engine->last_context;
|
||||
u32 hw_flags = 0;
|
||||
bool uninitialized = false;
|
||||
int ret, i;
|
||||
|
||||
if (from != NULL && ring == &dev_priv->ring[RCS]) {
|
||||
if (from != NULL && engine == &dev_priv->engine[RCS]) {
|
||||
BUG_ON(from->legacy_hw_ctx.rcs_state == NULL);
|
||||
BUG_ON(!i915_gem_obj_is_pinned(from->legacy_hw_ctx.rcs_state));
|
||||
}
|
||||
|
||||
if (should_skip_switch(ring, from, to))
|
||||
if (should_skip_switch(engine, from, to))
|
||||
return 0;
|
||||
|
||||
/* Trying to pin first makes error handling easier. */
|
||||
if (ring == &dev_priv->ring[RCS]) {
|
||||
if (engine == &dev_priv->engine[RCS]) {
|
||||
ret = i915_gem_obj_ggtt_pin(to->legacy_hw_ctx.rcs_state,
|
||||
get_context_alignment(ring->dev), 0);
|
||||
get_context_alignment(engine->dev),
|
||||
0);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
@ -676,23 +684,23 @@ static int do_switch(struct drm_i915_gem_request *req)
|
||||
* evict_everything - as a last ditch gtt defrag effort that also
|
||||
* switches to the default context. Hence we need to reload from here.
|
||||
*/
|
||||
from = ring->last_context;
|
||||
from = engine->last_context;
|
||||
|
||||
if (needs_pd_load_pre(ring, to)) {
|
||||
if (needs_pd_load_pre(engine, to)) {
|
||||
/* Older GENs and non render rings still want the load first,
|
||||
* "PP_DCLV followed by PP_DIR_BASE register through Load
|
||||
* Register Immediate commands in Ring Buffer before submitting
|
||||
* a context."*/
|
||||
trace_switch_mm(ring, to);
|
||||
trace_switch_mm(engine, to);
|
||||
ret = to->ppgtt->switch_mm(to->ppgtt, req);
|
||||
if (ret)
|
||||
goto unpin_out;
|
||||
|
||||
/* Doing a PD load always reloads the page dirs */
|
||||
to->ppgtt->pd_dirty_rings &= ~intel_ring_flag(ring);
|
||||
to->ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
|
||||
}
|
||||
|
||||
if (ring != &dev_priv->ring[RCS]) {
|
||||
if (engine != &dev_priv->engine[RCS]) {
|
||||
if (from)
|
||||
i915_gem_context_unreference(from);
|
||||
goto done;
|
||||
@ -717,14 +725,14 @@ static int do_switch(struct drm_i915_gem_request *req)
|
||||
* space. This means we must enforce that a page table load
|
||||
* occur when this occurs. */
|
||||
} else if (to->ppgtt &&
|
||||
(intel_ring_flag(ring) & to->ppgtt->pd_dirty_rings)) {
|
||||
(intel_engine_flag(engine) & to->ppgtt->pd_dirty_rings)) {
|
||||
hw_flags |= MI_FORCE_RESTORE;
|
||||
to->ppgtt->pd_dirty_rings &= ~intel_ring_flag(ring);
|
||||
to->ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
|
||||
}
|
||||
|
||||
/* We should never emit switch_mm more than once */
|
||||
WARN_ON(needs_pd_load_pre(ring, to) &&
|
||||
needs_pd_load_post(ring, to, hw_flags));
|
||||
WARN_ON(needs_pd_load_pre(engine, to) &&
|
||||
needs_pd_load_post(engine, to, hw_flags));
|
||||
|
||||
ret = mi_set_context(req, hw_flags);
|
||||
if (ret)
|
||||
@ -733,8 +741,8 @@ static int do_switch(struct drm_i915_gem_request *req)
|
||||
/* GEN8 does *not* require an explicit reload if the PDPs have been
|
||||
* setup, and we do not wish to move them.
|
||||
*/
|
||||
if (needs_pd_load_post(ring, to, hw_flags)) {
|
||||
trace_switch_mm(ring, to);
|
||||
if (needs_pd_load_post(engine, to, hw_flags)) {
|
||||
trace_switch_mm(engine, to);
|
||||
ret = to->ppgtt->switch_mm(to->ppgtt, req);
|
||||
/* The hardware context switch is emitted, but we haven't
|
||||
* actually changed the state - so it's probably safe to bail
|
||||
@ -787,11 +795,11 @@ static int do_switch(struct drm_i915_gem_request *req)
|
||||
|
||||
done:
|
||||
i915_gem_context_reference(to);
|
||||
ring->last_context = to;
|
||||
engine->last_context = to;
|
||||
|
||||
if (uninitialized) {
|
||||
if (ring->init_context) {
|
||||
ret = ring->init_context(req);
|
||||
if (engine->init_context) {
|
||||
ret = engine->init_context(req);
|
||||
if (ret)
|
||||
DRM_ERROR("ring init context: %d\n", ret);
|
||||
}
|
||||
@ -800,7 +808,7 @@ done:
|
||||
return 0;
|
||||
|
||||
unpin_out:
|
||||
if (ring->id == RCS)
|
||||
if (engine->id == RCS)
|
||||
i915_gem_object_ggtt_unpin(to->legacy_hw_ctx.rcs_state);
|
||||
return ret;
|
||||
}
|
||||
@ -820,18 +828,18 @@ unpin_out:
|
||||
*/
|
||||
int i915_switch_context(struct drm_i915_gem_request *req)
|
||||
{
|
||||
struct intel_engine_cs *ring = req->ring;
|
||||
struct drm_i915_private *dev_priv = ring->dev->dev_private;
|
||||
struct intel_engine_cs *engine = req->engine;
|
||||
struct drm_i915_private *dev_priv = req->i915;
|
||||
|
||||
WARN_ON(i915.enable_execlists);
|
||||
WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
|
||||
|
||||
if (req->ctx->legacy_hw_ctx.rcs_state == NULL) { /* We have the fake context */
|
||||
if (req->ctx != ring->last_context) {
|
||||
if (req->ctx != engine->last_context) {
|
||||
i915_gem_context_reference(req->ctx);
|
||||
if (ring->last_context)
|
||||
i915_gem_context_unreference(ring->last_context);
|
||||
ring->last_context = req->ctx;
|
||||
if (engine->last_context)
|
||||
i915_gem_context_unreference(engine->last_context);
|
||||
engine->last_context = req->ctx;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@ -937,7 +945,7 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
|
||||
else if (to_i915(dev)->mm.aliasing_ppgtt)
|
||||
args->value = to_i915(dev)->mm.aliasing_ppgtt->base.total;
|
||||
else
|
||||
args->value = to_i915(dev)->gtt.base.total;
|
||||
args->value = to_i915(dev)->ggtt.base.total;
|
||||
break;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
|
@ -36,29 +36,29 @@ i915_verify_lists(struct drm_device *dev)
|
||||
static int warned;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct drm_i915_gem_object *obj;
|
||||
struct intel_engine_cs *ring;
|
||||
struct intel_engine_cs *engine;
|
||||
int err = 0;
|
||||
int i;
|
||||
|
||||
if (warned)
|
||||
return 0;
|
||||
|
||||
for_each_ring(ring, dev_priv, i) {
|
||||
list_for_each_entry(obj, &ring->active_list, ring_list[ring->id]) {
|
||||
for_each_engine(engine, dev_priv) {
|
||||
list_for_each_entry(obj, &engine->active_list,
|
||||
engine_list[engine->id]) {
|
||||
if (obj->base.dev != dev ||
|
||||
!atomic_read(&obj->base.refcount.refcount)) {
|
||||
DRM_ERROR("%s: freed active obj %p\n",
|
||||
ring->name, obj);
|
||||
engine->name, obj);
|
||||
err++;
|
||||
break;
|
||||
} else if (!obj->active ||
|
||||
obj->last_read_req[ring->id] == NULL) {
|
||||
obj->last_read_req[engine->id] == NULL) {
|
||||
DRM_ERROR("%s: invalid active obj %p\n",
|
||||
ring->name, obj);
|
||||
engine->name, obj);
|
||||
err++;
|
||||
} else if (obj->base.write_domain) {
|
||||
DRM_ERROR("%s: invalid write obj %p (w %x)\n",
|
||||
ring->name,
|
||||
engine->name,
|
||||
obj, obj->base.write_domain);
|
||||
err++;
|
||||
}
|
||||
|
@ -330,7 +330,7 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
|
||||
/* Map the page containing the relocation we're going to perform. */
|
||||
offset = i915_gem_obj_ggtt_offset(obj);
|
||||
offset += reloc->offset;
|
||||
reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
|
||||
reloc_page = io_mapping_map_atomic_wc(dev_priv->ggtt.mappable,
|
||||
offset & PAGE_MASK);
|
||||
iowrite32(lower_32_bits(delta), reloc_page + offset_in_page(offset));
|
||||
|
||||
@ -340,7 +340,7 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
|
||||
if (offset_in_page(offset) == 0) {
|
||||
io_mapping_unmap_atomic(reloc_page);
|
||||
reloc_page =
|
||||
io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
|
||||
io_mapping_map_atomic_wc(dev_priv->ggtt.mappable,
|
||||
offset);
|
||||
}
|
||||
|
||||
@ -599,7 +599,7 @@ static bool only_mappable_for_reloc(unsigned int flags)
|
||||
|
||||
static int
|
||||
i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
|
||||
struct intel_engine_cs *ring,
|
||||
struct intel_engine_cs *engine,
|
||||
bool *need_reloc)
|
||||
{
|
||||
struct drm_i915_gem_object *obj = vma->obj;
|
||||
@ -713,7 +713,7 @@ eb_vma_misplaced(struct i915_vma *vma)
|
||||
}
|
||||
|
||||
static int
|
||||
i915_gem_execbuffer_reserve(struct intel_engine_cs *ring,
|
||||
i915_gem_execbuffer_reserve(struct intel_engine_cs *engine,
|
||||
struct list_head *vmas,
|
||||
struct intel_context *ctx,
|
||||
bool *need_relocs)
|
||||
@ -723,10 +723,10 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *ring,
|
||||
struct i915_address_space *vm;
|
||||
struct list_head ordered_vmas;
|
||||
struct list_head pinned_vmas;
|
||||
bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
|
||||
bool has_fenced_gpu_access = INTEL_INFO(engine->dev)->gen < 4;
|
||||
int retry;
|
||||
|
||||
i915_gem_retire_requests_ring(ring);
|
||||
i915_gem_retire_requests_ring(engine);
|
||||
|
||||
vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;
|
||||
|
||||
@ -788,7 +788,9 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *ring,
|
||||
if (eb_vma_misplaced(vma))
|
||||
ret = i915_vma_unbind(vma);
|
||||
else
|
||||
ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
|
||||
ret = i915_gem_execbuffer_reserve_vma(vma,
|
||||
engine,
|
||||
need_relocs);
|
||||
if (ret)
|
||||
goto err;
|
||||
}
|
||||
@ -798,7 +800,8 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *ring,
|
||||
if (drm_mm_node_allocated(&vma->node))
|
||||
continue;
|
||||
|
||||
ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
|
||||
ret = i915_gem_execbuffer_reserve_vma(vma, engine,
|
||||
need_relocs);
|
||||
if (ret)
|
||||
goto err;
|
||||
}
|
||||
@ -821,7 +824,7 @@ static int
|
||||
i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
|
||||
struct drm_i915_gem_execbuffer2 *args,
|
||||
struct drm_file *file,
|
||||
struct intel_engine_cs *ring,
|
||||
struct intel_engine_cs *engine,
|
||||
struct eb_vmas *eb,
|
||||
struct drm_i915_gem_exec_object2 *exec,
|
||||
struct intel_context *ctx)
|
||||
@ -910,7 +913,8 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
|
||||
goto err;
|
||||
|
||||
need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
|
||||
ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, ctx, &need_relocs);
|
||||
ret = i915_gem_execbuffer_reserve(engine, &eb->vmas, ctx,
|
||||
&need_relocs);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
@ -938,7 +942,7 @@ static int
|
||||
i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
|
||||
struct list_head *vmas)
|
||||
{
|
||||
const unsigned other_rings = ~intel_ring_flag(req->ring);
|
||||
const unsigned other_rings = ~intel_engine_flag(req->engine);
|
||||
struct i915_vma *vma;
|
||||
uint32_t flush_domains = 0;
|
||||
bool flush_chipset = false;
|
||||
@ -948,7 +952,7 @@ i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
|
||||
struct drm_i915_gem_object *obj = vma->obj;
|
||||
|
||||
if (obj->active & other_rings) {
|
||||
ret = i915_gem_object_sync(obj, req->ring, &req);
|
||||
ret = i915_gem_object_sync(obj, req->engine, &req);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
@ -960,7 +964,7 @@ i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
|
||||
}
|
||||
|
||||
if (flush_chipset)
|
||||
i915_gem_chipset_flush(req->ring->dev);
|
||||
i915_gem_chipset_flush(req->engine->dev);
|
||||
|
||||
if (flush_domains & I915_GEM_DOMAIN_GTT)
|
||||
wmb();
|
||||
@ -1062,12 +1066,12 @@ validate_exec_list(struct drm_device *dev,
|
||||
|
||||
static struct intel_context *
|
||||
i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
|
||||
struct intel_engine_cs *ring, const u32 ctx_id)
|
||||
struct intel_engine_cs *engine, const u32 ctx_id)
|
||||
{
|
||||
struct intel_context *ctx = NULL;
|
||||
struct i915_ctx_hang_stats *hs;
|
||||
|
||||
if (ring->id != RCS && ctx_id != DEFAULT_CONTEXT_HANDLE)
|
||||
if (engine->id != RCS && ctx_id != DEFAULT_CONTEXT_HANDLE)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
ctx = i915_gem_context_get(file->driver_priv, ctx_id);
|
||||
@ -1080,8 +1084,8 @@ i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
|
||||
return ERR_PTR(-EIO);
|
||||
}
|
||||
|
||||
if (i915.enable_execlists && !ctx->engine[ring->id].state) {
|
||||
int ret = intel_lr_context_deferred_alloc(ctx, ring);
|
||||
if (i915.enable_execlists && !ctx->engine[engine->id].state) {
|
||||
int ret = intel_lr_context_deferred_alloc(ctx, engine);
|
||||
if (ret) {
|
||||
DRM_DEBUG("Could not create LRC %u: %d\n", ctx_id, ret);
|
||||
return ERR_PTR(ret);
|
||||
@ -1095,7 +1099,7 @@ void
|
||||
i915_gem_execbuffer_move_to_active(struct list_head *vmas,
|
||||
struct drm_i915_gem_request *req)
|
||||
{
|
||||
struct intel_engine_cs *ring = i915_gem_request_get_ring(req);
|
||||
struct intel_engine_cs *engine = i915_gem_request_get_engine(req);
|
||||
struct i915_vma *vma;
|
||||
|
||||
list_for_each_entry(vma, vmas, exec_list) {
|
||||
@ -1122,7 +1126,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
|
||||
if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
|
||||
i915_gem_request_assign(&obj->last_fenced_req, req);
|
||||
if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
|
||||
struct drm_i915_private *dev_priv = to_i915(ring->dev);
|
||||
struct drm_i915_private *dev_priv = to_i915(engine->dev);
|
||||
list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list,
|
||||
&dev_priv->mm.fence_list);
|
||||
}
|
||||
@ -1136,7 +1140,7 @@ void
|
||||
i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params)
|
||||
{
|
||||
/* Unconditionally force add_request to emit a full flush. */
|
||||
params->ring->gpu_caches_dirty = true;
|
||||
params->engine->gpu_caches_dirty = true;
|
||||
|
||||
/* Add a breadcrumb for the completion of the batch buffer */
|
||||
__i915_add_request(params->request, params->batch_obj, true);
|
||||
@ -1146,11 +1150,11 @@ static int
|
||||
i915_reset_gen7_sol_offsets(struct drm_device *dev,
|
||||
struct drm_i915_gem_request *req)
|
||||
{
|
||||
struct intel_engine_cs *ring = req->ring;
|
||||
struct intel_engine_cs *engine = req->engine;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int ret, i;
|
||||
|
||||
if (!IS_GEN7(dev) || ring != &dev_priv->ring[RCS]) {
|
||||
if (!IS_GEN7(dev) || engine != &dev_priv->engine[RCS]) {
|
||||
DRM_DEBUG("sol reset is gen7/rcs only\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -1160,18 +1164,18 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev,
|
||||
return ret;
|
||||
|
||||
for (i = 0; i < 4; i++) {
|
||||
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
|
||||
intel_ring_emit_reg(ring, GEN7_SO_WRITE_OFFSET(i));
|
||||
intel_ring_emit(ring, 0);
|
||||
intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
|
||||
intel_ring_emit_reg(engine, GEN7_SO_WRITE_OFFSET(i));
|
||||
intel_ring_emit(engine, 0);
|
||||
}
|
||||
|
||||
intel_ring_advance(ring);
|
||||
intel_ring_advance(engine);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct drm_i915_gem_object*
|
||||
i915_gem_execbuffer_parse(struct intel_engine_cs *ring,
|
||||
i915_gem_execbuffer_parse(struct intel_engine_cs *engine,
|
||||
struct drm_i915_gem_exec_object2 *shadow_exec_entry,
|
||||
struct eb_vmas *eb,
|
||||
struct drm_i915_gem_object *batch_obj,
|
||||
@ -1183,12 +1187,12 @@ i915_gem_execbuffer_parse(struct intel_engine_cs *ring,
|
||||
struct i915_vma *vma;
|
||||
int ret;
|
||||
|
||||
shadow_batch_obj = i915_gem_batch_pool_get(&ring->batch_pool,
|
||||
shadow_batch_obj = i915_gem_batch_pool_get(&engine->batch_pool,
|
||||
PAGE_ALIGN(batch_len));
|
||||
if (IS_ERR(shadow_batch_obj))
|
||||
return shadow_batch_obj;
|
||||
|
||||
ret = i915_parse_cmds(ring,
|
||||
ret = i915_parse_cmds(engine,
|
||||
batch_obj,
|
||||
shadow_batch_obj,
|
||||
batch_start_offset,
|
||||
@ -1229,7 +1233,7 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
|
||||
struct list_head *vmas)
|
||||
{
|
||||
struct drm_device *dev = params->dev;
|
||||
struct intel_engine_cs *ring = params->ring;
|
||||
struct intel_engine_cs *engine = params->engine;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u64 exec_start, exec_len;
|
||||
int instp_mode;
|
||||
@ -1244,8 +1248,8 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
WARN(params->ctx->ppgtt && params->ctx->ppgtt->pd_dirty_rings & (1<<ring->id),
|
||||
"%s didn't clear reload\n", ring->name);
|
||||
WARN(params->ctx->ppgtt && params->ctx->ppgtt->pd_dirty_rings & (1<<engine->id),
|
||||
"%s didn't clear reload\n", engine->name);
|
||||
|
||||
instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
|
||||
instp_mask = I915_EXEC_CONSTANTS_MASK;
|
||||
@ -1253,7 +1257,7 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
|
||||
case I915_EXEC_CONSTANTS_REL_GENERAL:
|
||||
case I915_EXEC_CONSTANTS_ABSOLUTE:
|
||||
case I915_EXEC_CONSTANTS_REL_SURFACE:
|
||||
if (instp_mode != 0 && ring != &dev_priv->ring[RCS]) {
|
||||
if (instp_mode != 0 && engine != &dev_priv->engine[RCS]) {
|
||||
DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -1280,17 +1284,17 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (ring == &dev_priv->ring[RCS] &&
|
||||
if (engine == &dev_priv->engine[RCS] &&
|
||||
instp_mode != dev_priv->relative_constants_mode) {
|
||||
ret = intel_ring_begin(params->request, 4);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
intel_ring_emit(ring, MI_NOOP);
|
||||
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
|
||||
intel_ring_emit_reg(ring, INSTPM);
|
||||
intel_ring_emit(ring, instp_mask << 16 | instp_mode);
|
||||
intel_ring_advance(ring);
|
||||
intel_ring_emit(engine, MI_NOOP);
|
||||
intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
|
||||
intel_ring_emit_reg(engine, INSTPM);
|
||||
intel_ring_emit(engine, instp_mask << 16 | instp_mode);
|
||||
intel_ring_advance(engine);
|
||||
|
||||
dev_priv->relative_constants_mode = instp_mode;
|
||||
}
|
||||
@ -1308,7 +1312,7 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
|
||||
if (exec_len == 0)
|
||||
exec_len = params->batch_obj->base.size;
|
||||
|
||||
ret = ring->dispatch_execbuffer(params->request,
|
||||
ret = engine->dispatch_execbuffer(params->request,
|
||||
exec_start, exec_len,
|
||||
params->dispatch_flags);
|
||||
if (ret)
|
||||
@ -1365,7 +1369,7 @@ eb_get_batch(struct eb_vmas *eb)
|
||||
|
||||
#define I915_USER_RINGS (4)
|
||||
|
||||
static const enum intel_ring_id user_ring_map[I915_USER_RINGS + 1] = {
|
||||
static const enum intel_engine_id user_ring_map[I915_USER_RINGS + 1] = {
|
||||
[I915_EXEC_DEFAULT] = RCS,
|
||||
[I915_EXEC_RENDER] = RCS,
|
||||
[I915_EXEC_BLT] = BCS,
|
||||
@ -1408,12 +1412,12 @@ eb_select_ring(struct drm_i915_private *dev_priv,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
*ring = &dev_priv->ring[_VCS(bsd_idx)];
|
||||
*ring = &dev_priv->engine[_VCS(bsd_idx)];
|
||||
} else {
|
||||
*ring = &dev_priv->ring[user_ring_map[user_ring_id]];
|
||||
*ring = &dev_priv->engine[user_ring_map[user_ring_id]];
|
||||
}
|
||||
|
||||
if (!intel_ring_initialized(*ring)) {
|
||||
if (!intel_engine_initialized(*ring)) {
|
||||
DRM_DEBUG("execbuf with invalid ring: %u\n", user_ring_id);
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -1432,7 +1436,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
||||
struct eb_vmas *eb;
|
||||
struct drm_i915_gem_object *batch_obj;
|
||||
struct drm_i915_gem_exec_object2 shadow_exec_entry;
|
||||
struct intel_engine_cs *ring;
|
||||
struct intel_engine_cs *engine;
|
||||
struct intel_context *ctx;
|
||||
struct i915_address_space *vm;
|
||||
struct i915_execbuffer_params params_master; /* XXX: will be removed later */
|
||||
@ -1459,7 +1463,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
||||
if (args->flags & I915_EXEC_IS_PINNED)
|
||||
dispatch_flags |= I915_DISPATCH_PINNED;
|
||||
|
||||
ret = eb_select_ring(dev_priv, file, args, &ring);
|
||||
ret = eb_select_ring(dev_priv, file, args, &engine);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -1473,9 +1477,9 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
||||
DRM_DEBUG("RS is only allowed for Haswell, Gen8 and above\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (ring->id != RCS) {
|
||||
if (engine->id != RCS) {
|
||||
DRM_DEBUG("RS is not available on %s\n",
|
||||
ring->name);
|
||||
engine->name);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -1488,7 +1492,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
||||
if (ret)
|
||||
goto pre_mutex_err;
|
||||
|
||||
ctx = i915_gem_validate_context(dev, file, ring, ctx_id);
|
||||
ctx = i915_gem_validate_context(dev, file, engine, ctx_id);
|
||||
if (IS_ERR(ctx)) {
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
ret = PTR_ERR(ctx);
|
||||
@ -1500,7 +1504,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
||||
if (ctx->ppgtt)
|
||||
vm = &ctx->ppgtt->base;
|
||||
else
|
||||
vm = &dev_priv->gtt.base;
|
||||
vm = &dev_priv->ggtt.base;
|
||||
|
||||
memset(¶ms_master, 0x00, sizeof(params_master));
|
||||
|
||||
@ -1522,7 +1526,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
||||
|
||||
/* Move the objects en-masse into the GTT, evicting if necessary. */
|
||||
need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
|
||||
ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, ctx, &need_relocs);
|
||||
ret = i915_gem_execbuffer_reserve(engine, &eb->vmas, ctx,
|
||||
&need_relocs);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
@ -1531,7 +1536,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
||||
ret = i915_gem_execbuffer_relocate(eb);
|
||||
if (ret) {
|
||||
if (ret == -EFAULT) {
|
||||
ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,
|
||||
ret = i915_gem_execbuffer_relocate_slow(dev, args, file,
|
||||
engine,
|
||||
eb, exec, ctx);
|
||||
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
|
||||
}
|
||||
@ -1547,16 +1553,16 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
||||
}
|
||||
|
||||
params->args_batch_start_offset = args->batch_start_offset;
|
||||
if (i915_needs_cmd_parser(ring) && args->batch_len) {
|
||||
if (i915_needs_cmd_parser(engine) && args->batch_len) {
|
||||
struct drm_i915_gem_object *parsed_batch_obj;
|
||||
|
||||
parsed_batch_obj = i915_gem_execbuffer_parse(ring,
|
||||
&shadow_exec_entry,
|
||||
eb,
|
||||
batch_obj,
|
||||
args->batch_start_offset,
|
||||
args->batch_len,
|
||||
file->is_master);
|
||||
parsed_batch_obj = i915_gem_execbuffer_parse(engine,
|
||||
&shadow_exec_entry,
|
||||
eb,
|
||||
batch_obj,
|
||||
args->batch_start_offset,
|
||||
args->batch_len,
|
||||
file->is_master);
|
||||
if (IS_ERR(parsed_batch_obj)) {
|
||||
ret = PTR_ERR(parsed_batch_obj);
|
||||
goto err;
|
||||
@ -1608,7 +1614,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
||||
params->batch_obj_vm_offset = i915_gem_obj_offset(batch_obj, vm);
|
||||
|
||||
/* Allocate a request for this batch buffer nice and early. */
|
||||
req = i915_gem_request_alloc(ring, ctx);
|
||||
req = i915_gem_request_alloc(engine, ctx);
|
||||
if (IS_ERR(req)) {
|
||||
ret = PTR_ERR(req);
|
||||
goto err_batch_unpin;
|
||||
@ -1626,7 +1632,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
||||
*/
|
||||
params->dev = dev;
|
||||
params->file = file;
|
||||
params->ring = ring;
|
||||
params->engine = engine;
|
||||
params->dispatch_flags = dispatch_flags;
|
||||
params->batch_obj = batch_obj;
|
||||
params->ctx = ctx;
|
||||
|
@ -658,7 +658,7 @@ static int gen8_write_pdp(struct drm_i915_gem_request *req,
|
||||
unsigned entry,
|
||||
dma_addr_t addr)
|
||||
{
|
||||
struct intel_engine_cs *ring = req->ring;
|
||||
struct intel_engine_cs *engine = req->engine;
|
||||
int ret;
|
||||
|
||||
BUG_ON(entry >= 4);
|
||||
@ -667,13 +667,13 @@ static int gen8_write_pdp(struct drm_i915_gem_request *req,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
|
||||
intel_ring_emit_reg(ring, GEN8_RING_PDP_UDW(ring, entry));
|
||||
intel_ring_emit(ring, upper_32_bits(addr));
|
||||
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
|
||||
intel_ring_emit_reg(ring, GEN8_RING_PDP_LDW(ring, entry));
|
||||
intel_ring_emit(ring, lower_32_bits(addr));
|
||||
intel_ring_advance(ring);
|
||||
intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
|
||||
intel_ring_emit_reg(engine, GEN8_RING_PDP_UDW(engine, entry));
|
||||
intel_ring_emit(engine, upper_32_bits(addr));
|
||||
intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
|
||||
intel_ring_emit_reg(engine, GEN8_RING_PDP_LDW(engine, entry));
|
||||
intel_ring_emit(engine, lower_32_bits(addr));
|
||||
intel_ring_advance(engine);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1637,7 +1637,7 @@ static void gen6_write_page_range(struct drm_i915_private *dev_priv,
|
||||
|
||||
/* Make sure write is complete before other code can use this page
|
||||
* table. Also require for WC mapped PTEs */
|
||||
readl(dev_priv->gtt.gsm);
|
||||
readl(dev_priv->ggtt.gsm);
|
||||
}
|
||||
|
||||
static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt)
|
||||
@ -1650,11 +1650,11 @@ static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt)
|
||||
static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
|
||||
struct drm_i915_gem_request *req)
|
||||
{
|
||||
struct intel_engine_cs *ring = req->ring;
|
||||
struct intel_engine_cs *engine = req->engine;
|
||||
int ret;
|
||||
|
||||
/* NB: TLBs must be flushed and invalidated before a switch */
|
||||
ret = ring->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
|
||||
ret = engine->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -1662,13 +1662,13 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
|
||||
intel_ring_emit_reg(ring, RING_PP_DIR_DCLV(ring));
|
||||
intel_ring_emit(ring, PP_DIR_DCLV_2G);
|
||||
intel_ring_emit_reg(ring, RING_PP_DIR_BASE(ring));
|
||||
intel_ring_emit(ring, get_pd_offset(ppgtt));
|
||||
intel_ring_emit(ring, MI_NOOP);
|
||||
intel_ring_advance(ring);
|
||||
intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(2));
|
||||
intel_ring_emit_reg(engine, RING_PP_DIR_DCLV(engine));
|
||||
intel_ring_emit(engine, PP_DIR_DCLV_2G);
|
||||
intel_ring_emit_reg(engine, RING_PP_DIR_BASE(engine));
|
||||
intel_ring_emit(engine, get_pd_offset(ppgtt));
|
||||
intel_ring_emit(engine, MI_NOOP);
|
||||
intel_ring_advance(engine);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1676,22 +1676,22 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
|
||||
static int vgpu_mm_switch(struct i915_hw_ppgtt *ppgtt,
|
||||
struct drm_i915_gem_request *req)
|
||||
{
|
||||
struct intel_engine_cs *ring = req->ring;
|
||||
struct intel_engine_cs *engine = req->engine;
|
||||
struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev);
|
||||
|
||||
I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
|
||||
I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt));
|
||||
I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G);
|
||||
I915_WRITE(RING_PP_DIR_BASE(engine), get_pd_offset(ppgtt));
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
|
||||
struct drm_i915_gem_request *req)
|
||||
{
|
||||
struct intel_engine_cs *ring = req->ring;
|
||||
struct intel_engine_cs *engine = req->engine;
|
||||
int ret;
|
||||
|
||||
/* NB: TLBs must be flushed and invalidated before a switch */
|
||||
ret = ring->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
|
||||
ret = engine->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -1699,17 +1699,17 @@ static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
|
||||
intel_ring_emit_reg(ring, RING_PP_DIR_DCLV(ring));
|
||||
intel_ring_emit(ring, PP_DIR_DCLV_2G);
|
||||
intel_ring_emit_reg(ring, RING_PP_DIR_BASE(ring));
|
||||
intel_ring_emit(ring, get_pd_offset(ppgtt));
|
||||
intel_ring_emit(ring, MI_NOOP);
|
||||
intel_ring_advance(ring);
|
||||
intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(2));
|
||||
intel_ring_emit_reg(engine, RING_PP_DIR_DCLV(engine));
|
||||
intel_ring_emit(engine, PP_DIR_DCLV_2G);
|
||||
intel_ring_emit_reg(engine, RING_PP_DIR_BASE(engine));
|
||||
intel_ring_emit(engine, get_pd_offset(ppgtt));
|
||||
intel_ring_emit(engine, MI_NOOP);
|
||||
intel_ring_advance(engine);
|
||||
|
||||
/* XXX: RCS is the only one to auto invalidate the TLBs? */
|
||||
if (ring->id != RCS) {
|
||||
ret = ring->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
|
||||
if (engine->id != RCS) {
|
||||
ret = engine->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
@ -1720,15 +1720,15 @@ static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
|
||||
static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
|
||||
struct drm_i915_gem_request *req)
|
||||
{
|
||||
struct intel_engine_cs *ring = req->ring;
|
||||
struct intel_engine_cs *engine = req->engine;
|
||||
struct drm_device *dev = ppgtt->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
|
||||
I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
|
||||
I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt));
|
||||
I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G);
|
||||
I915_WRITE(RING_PP_DIR_BASE(engine), get_pd_offset(ppgtt));
|
||||
|
||||
POSTING_READ(RING_PP_DIR_DCLV(ring));
|
||||
POSTING_READ(RING_PP_DIR_DCLV(engine));
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1736,12 +1736,11 @@ static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
|
||||
static void gen8_ppgtt_enable(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_engine_cs *ring;
|
||||
int j;
|
||||
struct intel_engine_cs *engine;
|
||||
|
||||
for_each_ring(ring, dev_priv, j) {
|
||||
for_each_engine(engine, dev_priv) {
|
||||
u32 four_level = USES_FULL_48BIT_PPGTT(dev) ? GEN8_GFX_PPGTT_48B : 0;
|
||||
I915_WRITE(RING_MODE_GEN7(ring),
|
||||
I915_WRITE(RING_MODE_GEN7(engine),
|
||||
_MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE | four_level));
|
||||
}
|
||||
}
|
||||
@ -1749,9 +1748,8 @@ static void gen8_ppgtt_enable(struct drm_device *dev)
|
||||
static void gen7_ppgtt_enable(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_engine_cs *ring;
|
||||
struct intel_engine_cs *engine;
|
||||
uint32_t ecochk, ecobits;
|
||||
int i;
|
||||
|
||||
ecobits = I915_READ(GAC_ECO_BITS);
|
||||
I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
|
||||
@ -1765,9 +1763,9 @@ static void gen7_ppgtt_enable(struct drm_device *dev)
|
||||
}
|
||||
I915_WRITE(GAM_ECOCHK, ecochk);
|
||||
|
||||
for_each_ring(ring, dev_priv, i) {
|
||||
for_each_engine(engine, dev_priv) {
|
||||
/* GFX_MODE is per-ring on gen7+ */
|
||||
I915_WRITE(RING_MODE_GEN7(ring),
|
||||
I915_WRITE(RING_MODE_GEN7(engine),
|
||||
_MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
|
||||
}
|
||||
}
|
||||
@ -1932,7 +1930,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
|
||||
|
||||
/* Make sure write is complete before other code can use this page
|
||||
* table. Also require for WC mapped PTEs */
|
||||
readl(dev_priv->gtt.gsm);
|
||||
readl(dev_priv->ggtt.gsm);
|
||||
|
||||
mark_tlbs_dirty(ppgtt);
|
||||
return 0;
|
||||
@ -2005,23 +2003,23 @@ static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
|
||||
* allocator works in address space sizes, so it's multiplied by page
|
||||
* size. We allocate at the top of the GTT to avoid fragmentation.
|
||||
*/
|
||||
BUG_ON(!drm_mm_initialized(&dev_priv->gtt.base.mm));
|
||||
BUG_ON(!drm_mm_initialized(&dev_priv->ggtt.base.mm));
|
||||
|
||||
ret = gen6_init_scratch(vm);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
alloc:
|
||||
ret = drm_mm_insert_node_in_range_generic(&dev_priv->gtt.base.mm,
|
||||
ret = drm_mm_insert_node_in_range_generic(&dev_priv->ggtt.base.mm,
|
||||
&ppgtt->node, GEN6_PD_SIZE,
|
||||
GEN6_PD_ALIGN, 0,
|
||||
0, dev_priv->gtt.base.total,
|
||||
0, dev_priv->ggtt.base.total,
|
||||
DRM_MM_TOPDOWN);
|
||||
if (ret == -ENOSPC && !retried) {
|
||||
ret = i915_gem_evict_something(dev, &dev_priv->gtt.base,
|
||||
ret = i915_gem_evict_something(dev, &dev_priv->ggtt.base,
|
||||
GEN6_PD_SIZE, GEN6_PD_ALIGN,
|
||||
I915_CACHE_NONE,
|
||||
0, dev_priv->gtt.base.total,
|
||||
0, dev_priv->ggtt.base.total,
|
||||
0);
|
||||
if (ret)
|
||||
goto err_out;
|
||||
@ -2034,7 +2032,7 @@ alloc:
|
||||
goto err_out;
|
||||
|
||||
|
||||
if (ppgtt->node.start < dev_priv->gtt.mappable_end)
|
||||
if (ppgtt->node.start < dev_priv->ggtt.mappable_end)
|
||||
DRM_DEBUG("Forced to use aperture for PDEs\n");
|
||||
|
||||
return 0;
|
||||
@ -2065,7 +2063,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int ret;
|
||||
|
||||
ppgtt->base.pte_encode = dev_priv->gtt.base.pte_encode;
|
||||
ppgtt->base.pte_encode = dev_priv->ggtt.base.pte_encode;
|
||||
if (IS_GEN6(dev)) {
|
||||
ppgtt->switch_mm = gen6_mm_switch;
|
||||
} else if (IS_HASWELL(dev)) {
|
||||
@ -2095,7 +2093,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
|
||||
ppgtt->pd.base.ggtt_offset =
|
||||
ppgtt->node.start / PAGE_SIZE * sizeof(gen6_pte_t);
|
||||
|
||||
ppgtt->pd_addr = (gen6_pte_t __iomem *)dev_priv->gtt.gsm +
|
||||
ppgtt->pd_addr = (gen6_pte_t __iomem *)dev_priv->ggtt.gsm +
|
||||
ppgtt->pd.base.ggtt_offset / sizeof(gen6_pte_t);
|
||||
|
||||
gen6_scratch_va_range(ppgtt, 0, ppgtt->base.total);
|
||||
@ -2192,7 +2190,7 @@ int i915_ppgtt_init_hw(struct drm_device *dev)
|
||||
|
||||
int i915_ppgtt_init_ring(struct drm_i915_gem_request *req)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = req->ring->dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = req->i915;
|
||||
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
|
||||
|
||||
if (i915.enable_execlists)
|
||||
@ -2265,7 +2263,7 @@ static bool do_idling(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
bool ret = dev_priv->mm.interruptible;
|
||||
|
||||
if (unlikely(dev_priv->gtt.do_idle_maps)) {
|
||||
if (unlikely(dev_priv->ggtt.do_idle_maps)) {
|
||||
dev_priv->mm.interruptible = false;
|
||||
if (i915_gpu_idle(dev_priv->dev)) {
|
||||
DRM_ERROR("Couldn't idle GPU\n");
|
||||
@ -2279,22 +2277,21 @@ static bool do_idling(struct drm_i915_private *dev_priv)
|
||||
|
||||
static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
|
||||
{
|
||||
if (unlikely(dev_priv->gtt.do_idle_maps))
|
||||
if (unlikely(dev_priv->ggtt.do_idle_maps))
|
||||
dev_priv->mm.interruptible = interruptible;
|
||||
}
|
||||
|
||||
void i915_check_and_clear_faults(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_engine_cs *ring;
|
||||
int i;
|
||||
struct intel_engine_cs *engine;
|
||||
|
||||
if (INTEL_INFO(dev)->gen < 6)
|
||||
return;
|
||||
|
||||
for_each_ring(ring, dev_priv, i) {
|
||||
for_each_engine(engine, dev_priv) {
|
||||
u32 fault_reg;
|
||||
fault_reg = I915_READ(RING_FAULT_REG(ring));
|
||||
fault_reg = I915_READ(RING_FAULT_REG(engine));
|
||||
if (fault_reg & RING_FAULT_VALID) {
|
||||
DRM_DEBUG_DRIVER("Unexpected fault\n"
|
||||
"\tAddr: 0x%08lx\n"
|
||||
@ -2305,11 +2302,11 @@ void i915_check_and_clear_faults(struct drm_device *dev)
|
||||
fault_reg & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT",
|
||||
RING_FAULT_SRCID(fault_reg),
|
||||
RING_FAULT_FAULT_TYPE(fault_reg));
|
||||
I915_WRITE(RING_FAULT_REG(ring),
|
||||
I915_WRITE(RING_FAULT_REG(engine),
|
||||
fault_reg & ~RING_FAULT_VALID);
|
||||
}
|
||||
}
|
||||
POSTING_READ(RING_FAULT_REG(&dev_priv->ring[RCS]));
|
||||
POSTING_READ(RING_FAULT_REG(&dev_priv->engine[RCS]));
|
||||
}
|
||||
|
||||
static void i915_ggtt_flush(struct drm_i915_private *dev_priv)
|
||||
@ -2334,9 +2331,9 @@ void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
|
||||
|
||||
i915_check_and_clear_faults(dev);
|
||||
|
||||
dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
|
||||
dev_priv->gtt.base.start,
|
||||
dev_priv->gtt.base.total,
|
||||
dev_priv->ggtt.base.clear_range(&dev_priv->ggtt.base,
|
||||
dev_priv->ggtt.base.start,
|
||||
dev_priv->ggtt.base.total,
|
||||
true);
|
||||
|
||||
i915_ggtt_flush(dev_priv);
|
||||
@ -2370,7 +2367,7 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
|
||||
struct drm_i915_private *dev_priv = vm->dev->dev_private;
|
||||
unsigned first_entry = start >> PAGE_SHIFT;
|
||||
gen8_pte_t __iomem *gtt_entries =
|
||||
(gen8_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
|
||||
(gen8_pte_t __iomem *)dev_priv->ggtt.gsm + first_entry;
|
||||
int i = 0;
|
||||
struct sg_page_iter sg_iter;
|
||||
dma_addr_t addr = 0; /* shut up gcc */
|
||||
@ -2447,7 +2444,7 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
|
||||
struct drm_i915_private *dev_priv = vm->dev->dev_private;
|
||||
unsigned first_entry = start >> PAGE_SHIFT;
|
||||
gen6_pte_t __iomem *gtt_entries =
|
||||
(gen6_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
|
||||
(gen6_pte_t __iomem *)dev_priv->ggtt.gsm + first_entry;
|
||||
int i = 0;
|
||||
struct sg_page_iter sg_iter;
|
||||
dma_addr_t addr = 0;
|
||||
@ -2491,8 +2488,8 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm,
|
||||
unsigned first_entry = start >> PAGE_SHIFT;
|
||||
unsigned num_entries = length >> PAGE_SHIFT;
|
||||
gen8_pte_t scratch_pte, __iomem *gtt_base =
|
||||
(gen8_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
|
||||
const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
|
||||
(gen8_pte_t __iomem *) dev_priv->ggtt.gsm + first_entry;
|
||||
const int max_entries = gtt_total_entries(dev_priv->ggtt) - first_entry;
|
||||
int i;
|
||||
int rpm_atomic_seq;
|
||||
|
||||
@ -2522,8 +2519,8 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm,
|
||||
unsigned first_entry = start >> PAGE_SHIFT;
|
||||
unsigned num_entries = length >> PAGE_SHIFT;
|
||||
gen6_pte_t scratch_pte, __iomem *gtt_base =
|
||||
(gen6_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
|
||||
const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
|
||||
(gen6_pte_t __iomem *) dev_priv->ggtt.gsm + first_entry;
|
||||
const int max_entries = gtt_total_entries(dev_priv->ggtt) - first_entry;
|
||||
int i;
|
||||
int rpm_atomic_seq;
|
||||
|
||||
@ -2613,32 +2610,31 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
|
||||
enum i915_cache_level cache_level,
|
||||
u32 flags)
|
||||
{
|
||||
struct drm_device *dev = vma->vm->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_gem_object *obj = vma->obj;
|
||||
struct sg_table *pages = obj->pages;
|
||||
u32 pte_flags = 0;
|
||||
u32 pte_flags;
|
||||
int ret;
|
||||
|
||||
ret = i915_get_ggtt_vma_pages(vma);
|
||||
if (ret)
|
||||
return ret;
|
||||
pages = vma->ggtt_view.pages;
|
||||
|
||||
/* Currently applicable only to VLV */
|
||||
if (obj->gt_ro)
|
||||
pte_flags = 0;
|
||||
if (vma->obj->gt_ro)
|
||||
pte_flags |= PTE_READ_ONLY;
|
||||
|
||||
|
||||
if (flags & GLOBAL_BIND) {
|
||||
vma->vm->insert_entries(vma->vm, pages,
|
||||
vma->vm->insert_entries(vma->vm,
|
||||
vma->ggtt_view.pages,
|
||||
vma->node.start,
|
||||
cache_level, pte_flags);
|
||||
}
|
||||
|
||||
if (flags & LOCAL_BIND) {
|
||||
struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt;
|
||||
appgtt->base.insert_entries(&appgtt->base, pages,
|
||||
struct i915_hw_ppgtt *appgtt =
|
||||
to_i915(vma->vm->dev)->mm.aliasing_ppgtt;
|
||||
appgtt->base.insert_entries(&appgtt->base,
|
||||
vma->ggtt_view.pages,
|
||||
vma->node.start,
|
||||
cache_level, pte_flags);
|
||||
}
|
||||
@ -2718,7 +2714,7 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev,
|
||||
* of the aperture.
|
||||
*/
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct i915_address_space *ggtt_vm = &dev_priv->gtt.base;
|
||||
struct i915_address_space *ggtt_vm = &dev_priv->ggtt.base;
|
||||
struct drm_mm_node *entry;
|
||||
struct drm_i915_gem_object *obj;
|
||||
unsigned long hole_start, hole_end;
|
||||
@ -2801,8 +2797,8 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev,
|
||||
true);
|
||||
|
||||
dev_priv->mm.aliasing_ppgtt = ppgtt;
|
||||
WARN_ON(dev_priv->gtt.base.bind_vma != ggtt_bind_vma);
|
||||
dev_priv->gtt.base.bind_vma = aliasing_gtt_bind_vma;
|
||||
WARN_ON(dev_priv->ggtt.base.bind_vma != ggtt_bind_vma);
|
||||
dev_priv->ggtt.base.bind_vma = aliasing_gtt_bind_vma;
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -2813,8 +2809,8 @@ void i915_gem_init_global_gtt(struct drm_device *dev)
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u64 gtt_size, mappable_size;
|
||||
|
||||
gtt_size = dev_priv->gtt.base.total;
|
||||
mappable_size = dev_priv->gtt.mappable_end;
|
||||
gtt_size = dev_priv->ggtt.base.total;
|
||||
mappable_size = dev_priv->ggtt.mappable_end;
|
||||
|
||||
i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
|
||||
}
|
||||
@ -2822,7 +2818,7 @@ void i915_gem_init_global_gtt(struct drm_device *dev)
|
||||
void i915_global_gtt_cleanup(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct i915_address_space *vm = &dev_priv->gtt.base;
|
||||
struct i915_address_space *vm = &dev_priv->ggtt.base;
|
||||
|
||||
if (dev_priv->mm.aliasing_ppgtt) {
|
||||
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
|
||||
@ -2940,10 +2936,10 @@ static int ggtt_probe_common(struct drm_device *dev,
|
||||
* readback check when writing GTT PTE entries.
|
||||
*/
|
||||
if (IS_BROXTON(dev))
|
||||
dev_priv->gtt.gsm = ioremap_nocache(gtt_phys_addr, gtt_size);
|
||||
dev_priv->ggtt.gsm = ioremap_nocache(gtt_phys_addr, gtt_size);
|
||||
else
|
||||
dev_priv->gtt.gsm = ioremap_wc(gtt_phys_addr, gtt_size);
|
||||
if (!dev_priv->gtt.gsm) {
|
||||
dev_priv->ggtt.gsm = ioremap_wc(gtt_phys_addr, gtt_size);
|
||||
if (!dev_priv->ggtt.gsm) {
|
||||
DRM_ERROR("Failed to map the gtt page table\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
@ -2952,11 +2948,11 @@ static int ggtt_probe_common(struct drm_device *dev,
|
||||
if (IS_ERR(scratch_page)) {
|
||||
DRM_ERROR("Scratch setup failed\n");
|
||||
/* iounmap will also get called at remove, but meh */
|
||||
iounmap(dev_priv->gtt.gsm);
|
||||
iounmap(dev_priv->ggtt.gsm);
|
||||
return PTR_ERR(scratch_page);
|
||||
}
|
||||
|
||||
dev_priv->gtt.base.scratch_page = scratch_page;
|
||||
dev_priv->ggtt.base.scratch_page = scratch_page;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -3034,20 +3030,16 @@ static void chv_setup_private_ppat(struct drm_i915_private *dev_priv)
|
||||
I915_WRITE(GEN8_PRIVATE_PAT_HI, pat >> 32);
|
||||
}
|
||||
|
||||
static int gen8_gmch_probe(struct drm_device *dev,
|
||||
u64 *gtt_total,
|
||||
size_t *stolen,
|
||||
phys_addr_t *mappable_base,
|
||||
u64 *mappable_end)
|
||||
static int gen8_gmch_probe(struct i915_ggtt *ggtt)
|
||||
{
|
||||
struct drm_device *dev = ggtt->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u64 gtt_size;
|
||||
u16 snb_gmch_ctl;
|
||||
int ret;
|
||||
|
||||
/* TODO: We're not aware of mappable constraints on gen8 yet */
|
||||
*mappable_base = pci_resource_start(dev->pdev, 2);
|
||||
*mappable_end = pci_resource_len(dev->pdev, 2);
|
||||
ggtt->mappable_base = pci_resource_start(dev->pdev, 2);
|
||||
ggtt->mappable_end = pci_resource_len(dev->pdev, 2);
|
||||
|
||||
if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(39)))
|
||||
pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(39));
|
||||
@ -3055,56 +3047,51 @@ static int gen8_gmch_probe(struct drm_device *dev,
|
||||
pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
|
||||
|
||||
if (INTEL_INFO(dev)->gen >= 9) {
|
||||
*stolen = gen9_get_stolen_size(snb_gmch_ctl);
|
||||
gtt_size = gen8_get_total_gtt_size(snb_gmch_ctl);
|
||||
ggtt->stolen_size = gen9_get_stolen_size(snb_gmch_ctl);
|
||||
ggtt->size = gen8_get_total_gtt_size(snb_gmch_ctl);
|
||||
} else if (IS_CHERRYVIEW(dev)) {
|
||||
*stolen = chv_get_stolen_size(snb_gmch_ctl);
|
||||
gtt_size = chv_get_total_gtt_size(snb_gmch_ctl);
|
||||
ggtt->stolen_size = chv_get_stolen_size(snb_gmch_ctl);
|
||||
ggtt->size = chv_get_total_gtt_size(snb_gmch_ctl);
|
||||
} else {
|
||||
*stolen = gen8_get_stolen_size(snb_gmch_ctl);
|
||||
gtt_size = gen8_get_total_gtt_size(snb_gmch_ctl);
|
||||
ggtt->stolen_size = gen8_get_stolen_size(snb_gmch_ctl);
|
||||
ggtt->size = gen8_get_total_gtt_size(snb_gmch_ctl);
|
||||
}
|
||||
|
||||
*gtt_total = (gtt_size / sizeof(gen8_pte_t)) << PAGE_SHIFT;
|
||||
ggtt->base.total = (ggtt->size / sizeof(gen8_pte_t)) << PAGE_SHIFT;
|
||||
|
||||
if (IS_CHERRYVIEW(dev) || IS_BROXTON(dev))
|
||||
chv_setup_private_ppat(dev_priv);
|
||||
else
|
||||
bdw_setup_private_ppat(dev_priv);
|
||||
|
||||
ret = ggtt_probe_common(dev, gtt_size);
|
||||
|
||||
dev_priv->gtt.base.clear_range = gen8_ggtt_clear_range;
|
||||
dev_priv->gtt.base.insert_entries = gen8_ggtt_insert_entries;
|
||||
dev_priv->gtt.base.bind_vma = ggtt_bind_vma;
|
||||
dev_priv->gtt.base.unbind_vma = ggtt_unbind_vma;
|
||||
ret = ggtt_probe_common(dev, ggtt->size);
|
||||
|
||||
ggtt->base.clear_range = gen8_ggtt_clear_range;
|
||||
if (IS_CHERRYVIEW(dev_priv))
|
||||
dev_priv->gtt.base.insert_entries = gen8_ggtt_insert_entries__BKL;
|
||||
ggtt->base.insert_entries = gen8_ggtt_insert_entries__BKL;
|
||||
else
|
||||
ggtt->base.insert_entries = gen8_ggtt_insert_entries;
|
||||
ggtt->base.bind_vma = ggtt_bind_vma;
|
||||
ggtt->base.unbind_vma = ggtt_unbind_vma;
|
||||
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int gen6_gmch_probe(struct drm_device *dev,
|
||||
u64 *gtt_total,
|
||||
size_t *stolen,
|
||||
phys_addr_t *mappable_base,
|
||||
u64 *mappable_end)
|
||||
static int gen6_gmch_probe(struct i915_ggtt *ggtt)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
unsigned int gtt_size;
|
||||
struct drm_device *dev = ggtt->base.dev;
|
||||
u16 snb_gmch_ctl;
|
||||
int ret;
|
||||
|
||||
*mappable_base = pci_resource_start(dev->pdev, 2);
|
||||
*mappable_end = pci_resource_len(dev->pdev, 2);
|
||||
ggtt->mappable_base = pci_resource_start(dev->pdev, 2);
|
||||
ggtt->mappable_end = pci_resource_len(dev->pdev, 2);
|
||||
|
||||
/* 64/512MB is the current min/max we actually know of, but this is just
|
||||
* a coarse sanity check.
|
||||
*/
|
||||
if ((*mappable_end < (64<<20) || (*mappable_end > (512<<20)))) {
|
||||
DRM_ERROR("Unknown GMADR size (%llx)\n",
|
||||
dev_priv->gtt.mappable_end);
|
||||
if ((ggtt->mappable_end < (64<<20) || (ggtt->mappable_end > (512<<20)))) {
|
||||
DRM_ERROR("Unknown GMADR size (%llx)\n", ggtt->mappable_end);
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
@ -3112,36 +3099,31 @@ static int gen6_gmch_probe(struct drm_device *dev,
|
||||
pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40));
|
||||
pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
|
||||
|
||||
*stolen = gen6_get_stolen_size(snb_gmch_ctl);
|
||||
ggtt->stolen_size = gen6_get_stolen_size(snb_gmch_ctl);
|
||||
ggtt->size = gen6_get_total_gtt_size(snb_gmch_ctl);
|
||||
ggtt->base.total = (ggtt->size / sizeof(gen6_pte_t)) << PAGE_SHIFT;
|
||||
|
||||
gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl);
|
||||
*gtt_total = (gtt_size / sizeof(gen6_pte_t)) << PAGE_SHIFT;
|
||||
ret = ggtt_probe_common(dev, ggtt->size);
|
||||
|
||||
ret = ggtt_probe_common(dev, gtt_size);
|
||||
|
||||
dev_priv->gtt.base.clear_range = gen6_ggtt_clear_range;
|
||||
dev_priv->gtt.base.insert_entries = gen6_ggtt_insert_entries;
|
||||
dev_priv->gtt.base.bind_vma = ggtt_bind_vma;
|
||||
dev_priv->gtt.base.unbind_vma = ggtt_unbind_vma;
|
||||
ggtt->base.clear_range = gen6_ggtt_clear_range;
|
||||
ggtt->base.insert_entries = gen6_ggtt_insert_entries;
|
||||
ggtt->base.bind_vma = ggtt_bind_vma;
|
||||
ggtt->base.unbind_vma = ggtt_unbind_vma;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void gen6_gmch_remove(struct i915_address_space *vm)
|
||||
{
|
||||
struct i915_ggtt *ggtt = container_of(vm, struct i915_ggtt, base);
|
||||
|
||||
struct i915_gtt *gtt = container_of(vm, struct i915_gtt, base);
|
||||
|
||||
iounmap(gtt->gsm);
|
||||
iounmap(ggtt->gsm);
|
||||
free_scratch_page(vm->dev, vm->scratch_page);
|
||||
}
|
||||
|
||||
static int i915_gmch_probe(struct drm_device *dev,
|
||||
u64 *gtt_total,
|
||||
size_t *stolen,
|
||||
phys_addr_t *mappable_base,
|
||||
u64 *mappable_end)
|
||||
static int i915_gmch_probe(struct i915_ggtt *ggtt)
|
||||
{
|
||||
struct drm_device *dev = ggtt->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int ret;
|
||||
|
||||
@ -3151,15 +3133,16 @@ static int i915_gmch_probe(struct drm_device *dev,
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
intel_gtt_get(gtt_total, stolen, mappable_base, mappable_end);
|
||||
intel_gtt_get(&ggtt->base.total, &ggtt->stolen_size,
|
||||
&ggtt->mappable_base, &ggtt->mappable_end);
|
||||
|
||||
dev_priv->gtt.do_idle_maps = needs_idle_maps(dev_priv->dev);
|
||||
dev_priv->gtt.base.insert_entries = i915_ggtt_insert_entries;
|
||||
dev_priv->gtt.base.clear_range = i915_ggtt_clear_range;
|
||||
dev_priv->gtt.base.bind_vma = ggtt_bind_vma;
|
||||
dev_priv->gtt.base.unbind_vma = ggtt_unbind_vma;
|
||||
ggtt->do_idle_maps = needs_idle_maps(dev_priv->dev);
|
||||
ggtt->base.insert_entries = i915_ggtt_insert_entries;
|
||||
ggtt->base.clear_range = i915_ggtt_clear_range;
|
||||
ggtt->base.bind_vma = ggtt_bind_vma;
|
||||
ggtt->base.unbind_vma = ggtt_unbind_vma;
|
||||
|
||||
if (unlikely(dev_priv->gtt.do_idle_maps))
|
||||
if (unlikely(ggtt->do_idle_maps))
|
||||
DRM_INFO("applying Ironlake quirks for intel_iommu\n");
|
||||
|
||||
return 0;
|
||||
@ -3173,38 +3156,45 @@ static void i915_gmch_remove(struct i915_address_space *vm)
|
||||
int i915_gem_gtt_init(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct i915_gtt *gtt = &dev_priv->gtt;
|
||||
struct i915_ggtt *ggtt = &dev_priv->ggtt;
|
||||
int ret;
|
||||
|
||||
if (INTEL_INFO(dev)->gen <= 5) {
|
||||
gtt->gtt_probe = i915_gmch_probe;
|
||||
gtt->base.cleanup = i915_gmch_remove;
|
||||
ggtt->probe = i915_gmch_probe;
|
||||
ggtt->base.cleanup = i915_gmch_remove;
|
||||
} else if (INTEL_INFO(dev)->gen < 8) {
|
||||
gtt->gtt_probe = gen6_gmch_probe;
|
||||
gtt->base.cleanup = gen6_gmch_remove;
|
||||
ggtt->probe = gen6_gmch_probe;
|
||||
ggtt->base.cleanup = gen6_gmch_remove;
|
||||
if (IS_HASWELL(dev) && dev_priv->ellc_size)
|
||||
gtt->base.pte_encode = iris_pte_encode;
|
||||
ggtt->base.pte_encode = iris_pte_encode;
|
||||
else if (IS_HASWELL(dev))
|
||||
gtt->base.pte_encode = hsw_pte_encode;
|
||||
ggtt->base.pte_encode = hsw_pte_encode;
|
||||
else if (IS_VALLEYVIEW(dev))
|
||||
gtt->base.pte_encode = byt_pte_encode;
|
||||
ggtt->base.pte_encode = byt_pte_encode;
|
||||
else if (INTEL_INFO(dev)->gen >= 7)
|
||||
gtt->base.pte_encode = ivb_pte_encode;
|
||||
ggtt->base.pte_encode = ivb_pte_encode;
|
||||
else
|
||||
gtt->base.pte_encode = snb_pte_encode;
|
||||
ggtt->base.pte_encode = snb_pte_encode;
|
||||
} else {
|
||||
dev_priv->gtt.gtt_probe = gen8_gmch_probe;
|
||||
dev_priv->gtt.base.cleanup = gen6_gmch_remove;
|
||||
ggtt->probe = gen8_gmch_probe;
|
||||
ggtt->base.cleanup = gen6_gmch_remove;
|
||||
}
|
||||
|
||||
gtt->base.dev = dev;
|
||||
gtt->base.is_ggtt = true;
|
||||
ggtt->base.dev = dev;
|
||||
ggtt->base.is_ggtt = true;
|
||||
|
||||
ret = gtt->gtt_probe(dev, >t->base.total, >t->stolen_size,
|
||||
>t->mappable_base, >t->mappable_end);
|
||||
ret = ggtt->probe(ggtt);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if ((ggtt->base.total - 1) >> 32) {
|
||||
DRM_ERROR("We never expected a Global GTT with more than 32bits"
|
||||
"of address space! Found %lldM!\n",
|
||||
ggtt->base.total >> 20);
|
||||
ggtt->base.total = 1ULL << 32;
|
||||
ggtt->mappable_end = min(ggtt->mappable_end, ggtt->base.total);
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialise stolen early so that we may reserve preallocated
|
||||
* objects for the BIOS to KMS transition.
|
||||
@ -3215,9 +3205,9 @@ int i915_gem_gtt_init(struct drm_device *dev)
|
||||
|
||||
/* GMADR is the PCI mmio aperture into the global GTT. */
|
||||
DRM_INFO("Memory usable by graphics device = %lluM\n",
|
||||
gtt->base.total >> 20);
|
||||
DRM_DEBUG_DRIVER("GMADR size = %lldM\n", gtt->mappable_end >> 20);
|
||||
DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", gtt->stolen_size >> 20);
|
||||
ggtt->base.total >> 20);
|
||||
DRM_DEBUG_DRIVER("GMADR size = %lldM\n", ggtt->mappable_end >> 20);
|
||||
DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", ggtt->stolen_size >> 20);
|
||||
#ifdef CONFIG_INTEL_IOMMU
|
||||
if (intel_iommu_gfx_mapped)
|
||||
DRM_INFO("VT-d active for gfx access\n");
|
||||
@ -3234,7 +3224,7 @@ int i915_gem_gtt_init(struct drm_device *dev)
|
||||
return 0;
|
||||
|
||||
out_gtt_cleanup:
|
||||
gtt->base.cleanup(&dev_priv->gtt.base);
|
||||
ggtt->base.cleanup(&dev_priv->ggtt.base);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -3250,13 +3240,13 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
|
||||
i915_check_and_clear_faults(dev);
|
||||
|
||||
/* First fill our portion of the GTT with scratch pages */
|
||||
dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
|
||||
dev_priv->gtt.base.start,
|
||||
dev_priv->gtt.base.total,
|
||||
dev_priv->ggtt.base.clear_range(&dev_priv->ggtt.base,
|
||||
dev_priv->ggtt.base.start,
|
||||
dev_priv->ggtt.base.total,
|
||||
true);
|
||||
|
||||
/* Cache flush objects bound into GGTT and rebind them. */
|
||||
vm = &dev_priv->gtt.base;
|
||||
vm = &dev_priv->ggtt.base;
|
||||
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
|
||||
flush = false;
|
||||
list_for_each_entry(vma, &obj->vma_list, obj_link) {
|
||||
@ -3377,11 +3367,6 @@ rotate_pages(const dma_addr_t *in, unsigned int offset,
|
||||
unsigned int column, row;
|
||||
unsigned int src_idx;
|
||||
|
||||
if (!sg) {
|
||||
st->nents = 0;
|
||||
sg = st->sgl;
|
||||
}
|
||||
|
||||
for (column = 0; column < width; column++) {
|
||||
src_idx = stride * (height - 1) + column;
|
||||
for (row = 0; row < height; row++) {
|
||||
@ -3405,7 +3390,7 @@ static struct sg_table *
|
||||
intel_rotate_fb_obj_pages(struct intel_rotation_info *rot_info,
|
||||
struct drm_i915_gem_object *obj)
|
||||
{
|
||||
unsigned int size_pages = rot_info->size >> PAGE_SHIFT;
|
||||
unsigned int size_pages = rot_info->plane[0].width * rot_info->plane[0].height;
|
||||
unsigned int size_pages_uv;
|
||||
struct sg_page_iter sg_iter;
|
||||
unsigned long i;
|
||||
@ -3423,7 +3408,7 @@ intel_rotate_fb_obj_pages(struct intel_rotation_info *rot_info,
|
||||
|
||||
/* Account for UV plane with NV12. */
|
||||
if (rot_info->pixel_format == DRM_FORMAT_NV12)
|
||||
size_pages_uv = rot_info->size_uv >> PAGE_SHIFT;
|
||||
size_pages_uv = rot_info->plane[1].width * rot_info->plane[1].height;
|
||||
else
|
||||
size_pages_uv = 0;
|
||||
|
||||
@ -3443,11 +3428,14 @@ intel_rotate_fb_obj_pages(struct intel_rotation_info *rot_info,
|
||||
i++;
|
||||
}
|
||||
|
||||
st->nents = 0;
|
||||
sg = st->sgl;
|
||||
|
||||
/* Rotate the pages. */
|
||||
sg = rotate_pages(page_addr_list, 0,
|
||||
rot_info->width_pages, rot_info->height_pages,
|
||||
rot_info->width_pages,
|
||||
st, NULL);
|
||||
rot_info->plane[0].width, rot_info->plane[0].height,
|
||||
rot_info->plane[0].width,
|
||||
st, sg);
|
||||
|
||||
/* Append the UV plane if NV12. */
|
||||
if (rot_info->pixel_format == DRM_FORMAT_NV12) {
|
||||
@ -3459,18 +3447,15 @@ intel_rotate_fb_obj_pages(struct intel_rotation_info *rot_info,
|
||||
|
||||
rot_info->uv_start_page = uv_start_page;
|
||||
|
||||
rotate_pages(page_addr_list, uv_start_page,
|
||||
rot_info->width_pages_uv,
|
||||
rot_info->height_pages_uv,
|
||||
rot_info->width_pages_uv,
|
||||
st, sg);
|
||||
sg = rotate_pages(page_addr_list, rot_info->uv_start_page,
|
||||
rot_info->plane[1].width, rot_info->plane[1].height,
|
||||
rot_info->plane[1].width,
|
||||
st, sg);
|
||||
}
|
||||
|
||||
DRM_DEBUG_KMS(
|
||||
"Created rotated page mapping for object size %zu (pitch=%u, height=%u, pixel_format=0x%x, %ux%u tiles, %u pages (%u plane 0)).\n",
|
||||
obj->base.size, rot_info->pitch, rot_info->height,
|
||||
rot_info->pixel_format, rot_info->width_pages,
|
||||
rot_info->height_pages, size_pages + size_pages_uv,
|
||||
DRM_DEBUG_KMS("Created rotated page mapping for object size %zu (%ux%u tiles, %u pages (%u plane 0)).\n",
|
||||
obj->base.size, rot_info->plane[0].width,
|
||||
rot_info->plane[0].height, size_pages + size_pages_uv,
|
||||
size_pages);
|
||||
|
||||
drm_free_large(page_addr_list);
|
||||
@ -3482,11 +3467,9 @@ err_sg_alloc:
|
||||
err_st_alloc:
|
||||
drm_free_large(page_addr_list);
|
||||
|
||||
DRM_DEBUG_KMS(
|
||||
"Failed to create rotated mapping for object size %zu! (%d) (pitch=%u, height=%u, pixel_format=0x%x, %ux%u tiles, %u pages (%u plane 0))\n",
|
||||
obj->base.size, ret, rot_info->pitch, rot_info->height,
|
||||
rot_info->pixel_format, rot_info->width_pages,
|
||||
rot_info->height_pages, size_pages + size_pages_uv,
|
||||
DRM_DEBUG_KMS("Failed to create rotated mapping for object size %zu! (%d) (%ux%u tiles, %u pages (%u plane 0))\n",
|
||||
obj->base.size, ret, rot_info->plane[0].width,
|
||||
rot_info->plane[0].height, size_pages + size_pages_uv,
|
||||
size_pages);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
@ -3634,7 +3617,7 @@ i915_ggtt_view_size(struct drm_i915_gem_object *obj,
|
||||
if (view->type == I915_GGTT_VIEW_NORMAL) {
|
||||
return obj->base.size;
|
||||
} else if (view->type == I915_GGTT_VIEW_ROTATED) {
|
||||
return view->params.rotated.size;
|
||||
return intel_rotation_info_size(&view->params.rotated) << PAGE_SHIFT;
|
||||
} else if (view->type == I915_GGTT_VIEW_PARTIAL) {
|
||||
return view->params.partial.size << PAGE_SHIFT;
|
||||
} else {
|
||||
|
@ -135,16 +135,13 @@ enum i915_ggtt_view_type {
|
||||
};
|
||||
|
||||
struct intel_rotation_info {
|
||||
unsigned int height;
|
||||
unsigned int pitch;
|
||||
unsigned int uv_offset;
|
||||
uint32_t pixel_format;
|
||||
uint64_t fb_modifier;
|
||||
unsigned int width_pages, height_pages;
|
||||
uint64_t size;
|
||||
unsigned int width_pages_uv, height_pages_uv;
|
||||
uint64_t size_uv;
|
||||
unsigned int uv_start_page;
|
||||
struct {
|
||||
/* tiles */
|
||||
unsigned int width, height;
|
||||
} plane[2];
|
||||
};
|
||||
|
||||
struct i915_ggtt_view {
|
||||
@ -342,13 +339,14 @@ struct i915_address_space {
|
||||
* and correct (in cases like swizzling). That region is referred to as GMADR in
|
||||
* the spec.
|
||||
*/
|
||||
struct i915_gtt {
|
||||
struct i915_ggtt {
|
||||
struct i915_address_space base;
|
||||
|
||||
size_t stolen_size; /* Total size of stolen memory */
|
||||
size_t stolen_usable_size; /* Total size minus BIOS reserved */
|
||||
size_t stolen_reserved_base;
|
||||
size_t stolen_reserved_size;
|
||||
size_t size; /* Total size of Global GTT */
|
||||
u64 mappable_end; /* End offset that we can CPU map */
|
||||
struct io_mapping *mappable; /* Mapping to our CPU mappable region */
|
||||
phys_addr_t mappable_base; /* PA of our GMADR */
|
||||
@ -360,10 +358,7 @@ struct i915_gtt {
|
||||
|
||||
int mtrr;
|
||||
|
||||
/* global gtt ops */
|
||||
int (*gtt_probe)(struct drm_device *dev, u64 *gtt_total,
|
||||
size_t *stolen, phys_addr_t *mappable_base,
|
||||
u64 *mappable_end);
|
||||
int (*probe)(struct i915_ggtt *ggtt);
|
||||
};
|
||||
|
||||
struct i915_hw_ppgtt {
|
||||
|
@ -169,15 +169,15 @@ void i915_gem_render_state_fini(struct render_state *so)
|
||||
drm_gem_object_unreference(&so->obj->base);
|
||||
}
|
||||
|
||||
int i915_gem_render_state_prepare(struct intel_engine_cs *ring,
|
||||
int i915_gem_render_state_prepare(struct intel_engine_cs *engine,
|
||||
struct render_state *so)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (WARN_ON(ring->id != RCS))
|
||||
if (WARN_ON(engine->id != RCS))
|
||||
return -ENOENT;
|
||||
|
||||
ret = render_state_init(so, ring->dev);
|
||||
ret = render_state_init(so, engine->dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -198,21 +198,21 @@ int i915_gem_render_state_init(struct drm_i915_gem_request *req)
|
||||
struct render_state so;
|
||||
int ret;
|
||||
|
||||
ret = i915_gem_render_state_prepare(req->ring, &so);
|
||||
ret = i915_gem_render_state_prepare(req->engine, &so);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (so.rodata == NULL)
|
||||
return 0;
|
||||
|
||||
ret = req->ring->dispatch_execbuffer(req, so.ggtt_offset,
|
||||
ret = req->engine->dispatch_execbuffer(req, so.ggtt_offset,
|
||||
so.rodata->batch_items * 4,
|
||||
I915_DISPATCH_SECURE);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
if (so.aux_batch_size > 8) {
|
||||
ret = req->ring->dispatch_execbuffer(req,
|
||||
ret = req->engine->dispatch_execbuffer(req,
|
||||
(so.ggtt_offset +
|
||||
so.aux_batch_offset),
|
||||
so.aux_batch_size,
|
||||
|
@ -43,7 +43,7 @@ struct render_state {
|
||||
|
||||
int i915_gem_render_state_init(struct drm_i915_gem_request *req);
|
||||
void i915_gem_render_state_fini(struct render_state *so);
|
||||
int i915_gem_render_state_prepare(struct intel_engine_cs *ring,
|
||||
int i915_gem_render_state_prepare(struct intel_engine_cs *engine,
|
||||
struct render_state *so);
|
||||
|
||||
#endif /* _I915_GEM_RENDER_STATE_H_ */
|
||||
|
@ -74,7 +74,7 @@ int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv,
|
||||
{
|
||||
return i915_gem_stolen_insert_node_in_range(dev_priv, node, size,
|
||||
alignment, 0,
|
||||
dev_priv->gtt.stolen_usable_size);
|
||||
dev_priv->ggtt.stolen_usable_size);
|
||||
}
|
||||
|
||||
void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
|
||||
@ -134,7 +134,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
|
||||
I85X_DRB3, &tmp);
|
||||
tom = tmp * MB(32);
|
||||
|
||||
base = tom - tseg_size - dev_priv->gtt.stolen_size;
|
||||
base = tom - tseg_size - dev_priv->ggtt.stolen_size;
|
||||
} else if (IS_845G(dev)) {
|
||||
u32 tseg_size = 0;
|
||||
u32 tom;
|
||||
@ -158,7 +158,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
|
||||
I830_DRB3, &tmp);
|
||||
tom = tmp * MB(32);
|
||||
|
||||
base = tom - tseg_size - dev_priv->gtt.stolen_size;
|
||||
base = tom - tseg_size - dev_priv->ggtt.stolen_size;
|
||||
} else if (IS_I830(dev)) {
|
||||
u32 tseg_size = 0;
|
||||
u32 tom;
|
||||
@ -178,7 +178,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
|
||||
I830_DRB3, &tmp);
|
||||
tom = tmp * MB(32);
|
||||
|
||||
base = tom - tseg_size - dev_priv->gtt.stolen_size;
|
||||
base = tom - tseg_size - dev_priv->ggtt.stolen_size;
|
||||
}
|
||||
|
||||
if (base == 0)
|
||||
@ -189,8 +189,8 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
|
||||
struct {
|
||||
u32 start, end;
|
||||
} stolen[2] = {
|
||||
{ .start = base, .end = base + dev_priv->gtt.stolen_size, },
|
||||
{ .start = base, .end = base + dev_priv->gtt.stolen_size, },
|
||||
{ .start = base, .end = base + dev_priv->ggtt.stolen_size, },
|
||||
{ .start = base, .end = base + dev_priv->ggtt.stolen_size, },
|
||||
};
|
||||
u64 gtt_start, gtt_end;
|
||||
|
||||
@ -200,7 +200,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
|
||||
(gtt_start & PGTBL_ADDRESS_HI_MASK) << 28;
|
||||
else
|
||||
gtt_start &= PGTBL_ADDRESS_LO_MASK;
|
||||
gtt_end = gtt_start + gtt_total_entries(dev_priv->gtt) * 4;
|
||||
gtt_end = gtt_start + gtt_total_entries(dev_priv->ggtt) * 4;
|
||||
|
||||
if (gtt_start >= stolen[0].start && gtt_start < stolen[0].end)
|
||||
stolen[0].end = gtt_start;
|
||||
@ -211,10 +211,10 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
|
||||
if (stolen[0].end - stolen[0].start >
|
||||
stolen[1].end - stolen[1].start) {
|
||||
base = stolen[0].start;
|
||||
dev_priv->gtt.stolen_size = stolen[0].end - stolen[0].start;
|
||||
dev_priv->ggtt.stolen_size = stolen[0].end - stolen[0].start;
|
||||
} else {
|
||||
base = stolen[1].start;
|
||||
dev_priv->gtt.stolen_size = stolen[1].end - stolen[1].start;
|
||||
dev_priv->ggtt.stolen_size = stolen[1].end - stolen[1].start;
|
||||
}
|
||||
|
||||
if (stolen[0].start != stolen[1].start ||
|
||||
@ -223,7 +223,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
|
||||
(unsigned long long) gtt_start,
|
||||
(unsigned long long) gtt_end - 1);
|
||||
DRM_DEBUG_KMS("Stolen memory adjusted to 0x%x-0x%x\n",
|
||||
base, base + (u32) dev_priv->gtt.stolen_size - 1);
|
||||
base, base + (u32) dev_priv->ggtt.stolen_size - 1);
|
||||
}
|
||||
}
|
||||
|
||||
@ -233,7 +233,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
|
||||
* kernel. So if the region is already marked as busy, something
|
||||
* is seriously wrong.
|
||||
*/
|
||||
r = devm_request_mem_region(dev->dev, base, dev_priv->gtt.stolen_size,
|
||||
r = devm_request_mem_region(dev->dev, base, dev_priv->ggtt.stolen_size,
|
||||
"Graphics Stolen Memory");
|
||||
if (r == NULL) {
|
||||
/*
|
||||
@ -245,7 +245,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
|
||||
* reservation starting from 1 instead of 0.
|
||||
*/
|
||||
r = devm_request_mem_region(dev->dev, base + 1,
|
||||
dev_priv->gtt.stolen_size - 1,
|
||||
dev_priv->ggtt.stolen_size - 1,
|
||||
"Graphics Stolen Memory");
|
||||
/*
|
||||
* GEN3 firmware likes to smash pci bridges into the stolen
|
||||
@ -253,7 +253,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
|
||||
*/
|
||||
if (r == NULL && !IS_GEN3(dev)) {
|
||||
DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n",
|
||||
base, base + (uint32_t)dev_priv->gtt.stolen_size);
|
||||
base, base + (uint32_t)dev_priv->ggtt.stolen_size);
|
||||
base = 0;
|
||||
}
|
||||
}
|
||||
@ -278,7 +278,7 @@ static void g4x_get_stolen_reserved(struct drm_i915_private *dev_priv,
|
||||
CTG_STOLEN_RESERVED :
|
||||
ELK_STOLEN_RESERVED);
|
||||
unsigned long stolen_top = dev_priv->mm.stolen_base +
|
||||
dev_priv->gtt.stolen_size;
|
||||
dev_priv->ggtt.stolen_size;
|
||||
|
||||
*base = (reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK) << 16;
|
||||
|
||||
@ -372,7 +372,7 @@ static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv,
|
||||
uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
|
||||
unsigned long stolen_top;
|
||||
|
||||
stolen_top = dev_priv->mm.stolen_base + dev_priv->gtt.stolen_size;
|
||||
stolen_top = dev_priv->mm.stolen_base + dev_priv->ggtt.stolen_size;
|
||||
|
||||
*base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
|
||||
|
||||
@ -401,14 +401,14 @@ int i915_gem_init_stolen(struct drm_device *dev)
|
||||
}
|
||||
#endif
|
||||
|
||||
if (dev_priv->gtt.stolen_size == 0)
|
||||
if (dev_priv->ggtt.stolen_size == 0)
|
||||
return 0;
|
||||
|
||||
dev_priv->mm.stolen_base = i915_stolen_to_physical(dev);
|
||||
if (dev_priv->mm.stolen_base == 0)
|
||||
return 0;
|
||||
|
||||
stolen_top = dev_priv->mm.stolen_base + dev_priv->gtt.stolen_size;
|
||||
stolen_top = dev_priv->mm.stolen_base + dev_priv->ggtt.stolen_size;
|
||||
|
||||
switch (INTEL_INFO(dev_priv)->gen) {
|
||||
case 2:
|
||||
@ -458,18 +458,18 @@ int i915_gem_init_stolen(struct drm_device *dev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
dev_priv->gtt.stolen_reserved_base = reserved_base;
|
||||
dev_priv->gtt.stolen_reserved_size = reserved_size;
|
||||
dev_priv->ggtt.stolen_reserved_base = reserved_base;
|
||||
dev_priv->ggtt.stolen_reserved_size = reserved_size;
|
||||
|
||||
/* It is possible for the reserved area to end before the end of stolen
|
||||
* memory, so just consider the start. */
|
||||
reserved_total = stolen_top - reserved_base;
|
||||
|
||||
DRM_DEBUG_KMS("Memory reserved for graphics device: %zuK, usable: %luK\n",
|
||||
dev_priv->gtt.stolen_size >> 10,
|
||||
(dev_priv->gtt.stolen_size - reserved_total) >> 10);
|
||||
dev_priv->ggtt.stolen_size >> 10,
|
||||
(dev_priv->ggtt.stolen_size - reserved_total) >> 10);
|
||||
|
||||
dev_priv->gtt.stolen_usable_size = dev_priv->gtt.stolen_size -
|
||||
dev_priv->ggtt.stolen_usable_size = dev_priv->ggtt.stolen_size -
|
||||
reserved_total;
|
||||
|
||||
/*
|
||||
@ -483,7 +483,7 @@ int i915_gem_init_stolen(struct drm_device *dev)
|
||||
* i915_gem_stolen_insert_node_in_range(). We may want to fix the fbcon
|
||||
* problem later.
|
||||
*/
|
||||
drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->gtt.stolen_usable_size);
|
||||
drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->ggtt.stolen_usable_size);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -497,7 +497,7 @@ i915_pages_create_for_stolen(struct drm_device *dev,
|
||||
struct scatterlist *sg;
|
||||
|
||||
DRM_DEBUG_DRIVER("offset=0x%x, size=%d\n", offset, size);
|
||||
BUG_ON(offset > dev_priv->gtt.stolen_size - size);
|
||||
BUG_ON(offset > dev_priv->ggtt.stolen_size - size);
|
||||
|
||||
/* We hide that we have no struct page backing our stolen object
|
||||
* by wrapping the contiguous physical allocation with a fake
|
||||
@ -629,7 +629,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
|
||||
u32 size)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct i915_address_space *ggtt = &dev_priv->gtt.base;
|
||||
struct i915_address_space *ggtt = &dev_priv->ggtt.base;
|
||||
struct drm_i915_gem_object *obj;
|
||||
struct drm_mm_node *stolen;
|
||||
struct i915_vma *vma;
|
||||
|
@ -758,6 +758,13 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file
|
||||
int ret;
|
||||
u32 handle;
|
||||
|
||||
if (!HAS_LLC(dev) && !HAS_SNOOP(dev)) {
|
||||
/* We cannot support coherent userptr objects on hw without
|
||||
* LLC and broken snooping.
|
||||
*/
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
if (args->flags & ~(I915_USERPTR_READ_ONLY |
|
||||
I915_USERPTR_UNSYNCHRONIZED))
|
||||
return -EINVAL;
|
||||
|
@ -198,7 +198,7 @@ static void print_error_buffers(struct drm_i915_error_state_buf *m,
|
||||
err->size,
|
||||
err->read_domains,
|
||||
err->write_domain);
|
||||
for (i = 0; i < I915_NUM_RINGS; i++)
|
||||
for (i = 0; i < I915_NUM_ENGINES; i++)
|
||||
err_printf(m, "%02x ", err->rseqno[i]);
|
||||
|
||||
err_printf(m, "] %02x", err->wseqno);
|
||||
@ -230,8 +230,6 @@ static const char *hangcheck_action_to_str(enum intel_ring_hangcheck_action a)
|
||||
return "wait";
|
||||
case HANGCHECK_ACTIVE:
|
||||
return "active";
|
||||
case HANGCHECK_ACTIVE_LOOP:
|
||||
return "active (loop)";
|
||||
case HANGCHECK_KICK:
|
||||
return "kick";
|
||||
case HANGCHECK_HUNG:
|
||||
@ -433,7 +431,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
|
||||
for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
|
||||
obj = error->ring[i].batchbuffer;
|
||||
if (obj) {
|
||||
err_puts(m, dev_priv->ring[i].name);
|
||||
err_puts(m, dev_priv->engine[i].name);
|
||||
if (error->ring[i].pid != -1)
|
||||
err_printf(m, " (submitted by %s [%d])",
|
||||
error->ring[i].comm,
|
||||
@ -447,14 +445,14 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
|
||||
obj = error->ring[i].wa_batchbuffer;
|
||||
if (obj) {
|
||||
err_printf(m, "%s (w/a) --- gtt_offset = 0x%08x\n",
|
||||
dev_priv->ring[i].name,
|
||||
dev_priv->engine[i].name,
|
||||
lower_32_bits(obj->gtt_offset));
|
||||
print_error_obj(m, obj);
|
||||
}
|
||||
|
||||
if (error->ring[i].num_requests) {
|
||||
err_printf(m, "%s --- %d requests\n",
|
||||
dev_priv->ring[i].name,
|
||||
dev_priv->engine[i].name,
|
||||
error->ring[i].num_requests);
|
||||
for (j = 0; j < error->ring[i].num_requests; j++) {
|
||||
err_printf(m, " seqno 0x%08x, emitted %ld, tail 0x%08x\n",
|
||||
@ -466,7 +464,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
|
||||
|
||||
if ((obj = error->ring[i].ringbuffer)) {
|
||||
err_printf(m, "%s --- ringbuffer = 0x%08x\n",
|
||||
dev_priv->ring[i].name,
|
||||
dev_priv->engine[i].name,
|
||||
lower_32_bits(obj->gtt_offset));
|
||||
print_error_obj(m, obj);
|
||||
}
|
||||
@ -480,7 +478,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
|
||||
hws_page = &obj->pages[LRC_PPHWSP_PN][0];
|
||||
}
|
||||
err_printf(m, "%s --- HW Status = 0x%08llx\n",
|
||||
dev_priv->ring[i].name, hws_offset);
|
||||
dev_priv->engine[i].name, hws_offset);
|
||||
offset = 0;
|
||||
for (elt = 0; elt < PAGE_SIZE/16; elt += 4) {
|
||||
err_printf(m, "[%04x] %08x %08x %08x %08x\n",
|
||||
@ -493,9 +491,31 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
|
||||
}
|
||||
}
|
||||
|
||||
obj = error->ring[i].wa_ctx;
|
||||
if (obj) {
|
||||
u64 wa_ctx_offset = obj->gtt_offset;
|
||||
u32 *wa_ctx_page = &obj->pages[0][0];
|
||||
struct intel_engine_cs *engine = &dev_priv->engine[RCS];
|
||||
u32 wa_ctx_size = (engine->wa_ctx.indirect_ctx.size +
|
||||
engine->wa_ctx.per_ctx.size);
|
||||
|
||||
err_printf(m, "%s --- WA ctx batch buffer = 0x%08llx\n",
|
||||
dev_priv->engine[i].name, wa_ctx_offset);
|
||||
offset = 0;
|
||||
for (elt = 0; elt < wa_ctx_size; elt += 4) {
|
||||
err_printf(m, "[%04x] %08x %08x %08x %08x\n",
|
||||
offset,
|
||||
wa_ctx_page[elt + 0],
|
||||
wa_ctx_page[elt + 1],
|
||||
wa_ctx_page[elt + 2],
|
||||
wa_ctx_page[elt + 3]);
|
||||
offset += 16;
|
||||
}
|
||||
}
|
||||
|
||||
if ((obj = error->ring[i].ctx)) {
|
||||
err_printf(m, "%s --- HW Context = 0x%08x\n",
|
||||
dev_priv->ring[i].name,
|
||||
dev_priv->engine[i].name,
|
||||
lower_32_bits(obj->gtt_offset));
|
||||
print_error_obj(m, obj);
|
||||
}
|
||||
@ -585,6 +605,7 @@ static void i915_error_state_free(struct kref *error_ref)
|
||||
i915_error_object_free(error->ring[i].hws_page);
|
||||
i915_error_object_free(error->ring[i].ctx);
|
||||
kfree(error->ring[i].requests);
|
||||
i915_error_object_free(error->ring[i].wa_ctx);
|
||||
}
|
||||
|
||||
i915_error_object_free(error->semaphore_obj);
|
||||
@ -632,7 +653,7 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
|
||||
vma = i915_gem_obj_to_ggtt(src);
|
||||
use_ggtt = (src->cache_level == I915_CACHE_NONE &&
|
||||
vma && (vma->bound & GLOBAL_BIND) &&
|
||||
reloc_offset + num_pages * PAGE_SIZE <= dev_priv->gtt.mappable_end);
|
||||
reloc_offset + num_pages * PAGE_SIZE <= dev_priv->ggtt.mappable_end);
|
||||
|
||||
/* Cannot access stolen address directly, try to use the aperture */
|
||||
if (src->stolen) {
|
||||
@ -642,7 +663,7 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
|
||||
goto unwind;
|
||||
|
||||
reloc_offset = i915_gem_obj_ggtt_offset(src);
|
||||
if (reloc_offset + num_pages * PAGE_SIZE > dev_priv->gtt.mappable_end)
|
||||
if (reloc_offset + num_pages * PAGE_SIZE > dev_priv->ggtt.mappable_end)
|
||||
goto unwind;
|
||||
}
|
||||
|
||||
@ -668,7 +689,7 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
|
||||
* captures what the GPU read.
|
||||
*/
|
||||
|
||||
s = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
|
||||
s = io_mapping_map_atomic_wc(dev_priv->ggtt.mappable,
|
||||
reloc_offset);
|
||||
memcpy_fromio(d, s, PAGE_SIZE);
|
||||
io_mapping_unmap_atomic(s);
|
||||
@ -701,7 +722,7 @@ unwind:
|
||||
return NULL;
|
||||
}
|
||||
#define i915_error_ggtt_object_create(dev_priv, src) \
|
||||
i915_error_object_create((dev_priv), (src), &(dev_priv)->gtt.base)
|
||||
i915_error_object_create((dev_priv), (src), &(dev_priv)->ggtt.base)
|
||||
|
||||
static void capture_bo(struct drm_i915_error_buffer *err,
|
||||
struct i915_vma *vma)
|
||||
@ -711,7 +732,7 @@ static void capture_bo(struct drm_i915_error_buffer *err,
|
||||
|
||||
err->size = obj->base.size;
|
||||
err->name = obj->base.name;
|
||||
for (i = 0; i < I915_NUM_RINGS; i++)
|
||||
for (i = 0; i < I915_NUM_ENGINES; i++)
|
||||
err->rseqno[i] = i915_gem_request_get_seqno(obj->last_read_req[i]);
|
||||
err->wseqno = i915_gem_request_get_seqno(obj->last_write_req);
|
||||
err->gtt_offset = vma->node.start;
|
||||
@ -726,7 +747,7 @@ static void capture_bo(struct drm_i915_error_buffer *err,
|
||||
err->purgeable = obj->madv != I915_MADV_WILLNEED;
|
||||
err->userptr = obj->userptr.mm != NULL;
|
||||
err->ring = obj->last_write_req ?
|
||||
i915_gem_request_get_ring(obj->last_write_req)->id : -1;
|
||||
i915_gem_request_get_engine(obj->last_write_req)->id : -1;
|
||||
err->cache_level = obj->cache_level;
|
||||
}
|
||||
|
||||
@ -788,7 +809,7 @@ static uint32_t i915_error_generate_code(struct drm_i915_private *dev_priv,
|
||||
* synchronization commands which almost always appear in the case
|
||||
* strictly a client bug. Use instdone to differentiate those some.
|
||||
*/
|
||||
for (i = 0; i < I915_NUM_RINGS; i++) {
|
||||
for (i = 0; i < I915_NUM_ENGINES; i++) {
|
||||
if (error->ring[i].hangcheck_action == HANGCHECK_HUNG) {
|
||||
if (ring_id)
|
||||
*ring_id = i;
|
||||
@ -821,11 +842,11 @@ static void i915_gem_record_fences(struct drm_device *dev,
|
||||
|
||||
static void gen8_record_semaphore_state(struct drm_i915_private *dev_priv,
|
||||
struct drm_i915_error_state *error,
|
||||
struct intel_engine_cs *ring,
|
||||
struct intel_engine_cs *engine,
|
||||
struct drm_i915_error_ring *ering)
|
||||
{
|
||||
struct intel_engine_cs *to;
|
||||
int i;
|
||||
enum intel_engine_id id;
|
||||
|
||||
if (!i915_semaphore_is_enabled(dev_priv->dev))
|
||||
return;
|
||||
@ -835,68 +856,69 @@ static void gen8_record_semaphore_state(struct drm_i915_private *dev_priv,
|
||||
i915_error_ggtt_object_create(dev_priv,
|
||||
dev_priv->semaphore_obj);
|
||||
|
||||
for_each_ring(to, dev_priv, i) {
|
||||
for_each_engine_id(to, dev_priv, id) {
|
||||
int idx;
|
||||
u16 signal_offset;
|
||||
u32 *tmp;
|
||||
|
||||
if (ring == to)
|
||||
if (engine == to)
|
||||
continue;
|
||||
|
||||
signal_offset = (GEN8_SIGNAL_OFFSET(ring, i) & (PAGE_SIZE - 1))
|
||||
signal_offset = (GEN8_SIGNAL_OFFSET(engine, id) & (PAGE_SIZE - 1))
|
||||
/ 4;
|
||||
tmp = error->semaphore_obj->pages[0];
|
||||
idx = intel_ring_sync_index(ring, to);
|
||||
idx = intel_ring_sync_index(engine, to);
|
||||
|
||||
ering->semaphore_mboxes[idx] = tmp[signal_offset];
|
||||
ering->semaphore_seqno[idx] = ring->semaphore.sync_seqno[idx];
|
||||
ering->semaphore_seqno[idx] = engine->semaphore.sync_seqno[idx];
|
||||
}
|
||||
}
|
||||
|
||||
static void gen6_record_semaphore_state(struct drm_i915_private *dev_priv,
|
||||
struct intel_engine_cs *ring,
|
||||
struct intel_engine_cs *engine,
|
||||
struct drm_i915_error_ring *ering)
|
||||
{
|
||||
ering->semaphore_mboxes[0] = I915_READ(RING_SYNC_0(ring->mmio_base));
|
||||
ering->semaphore_mboxes[1] = I915_READ(RING_SYNC_1(ring->mmio_base));
|
||||
ering->semaphore_seqno[0] = ring->semaphore.sync_seqno[0];
|
||||
ering->semaphore_seqno[1] = ring->semaphore.sync_seqno[1];
|
||||
ering->semaphore_mboxes[0] = I915_READ(RING_SYNC_0(engine->mmio_base));
|
||||
ering->semaphore_mboxes[1] = I915_READ(RING_SYNC_1(engine->mmio_base));
|
||||
ering->semaphore_seqno[0] = engine->semaphore.sync_seqno[0];
|
||||
ering->semaphore_seqno[1] = engine->semaphore.sync_seqno[1];
|
||||
|
||||
if (HAS_VEBOX(dev_priv->dev)) {
|
||||
ering->semaphore_mboxes[2] =
|
||||
I915_READ(RING_SYNC_2(ring->mmio_base));
|
||||
ering->semaphore_seqno[2] = ring->semaphore.sync_seqno[2];
|
||||
I915_READ(RING_SYNC_2(engine->mmio_base));
|
||||
ering->semaphore_seqno[2] = engine->semaphore.sync_seqno[2];
|
||||
}
|
||||
}
|
||||
|
||||
static void i915_record_ring_state(struct drm_device *dev,
|
||||
struct drm_i915_error_state *error,
|
||||
struct intel_engine_cs *ring,
|
||||
struct intel_engine_cs *engine,
|
||||
struct drm_i915_error_ring *ering)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
if (INTEL_INFO(dev)->gen >= 6) {
|
||||
ering->rc_psmi = I915_READ(RING_PSMI_CTL(ring->mmio_base));
|
||||
ering->fault_reg = I915_READ(RING_FAULT_REG(ring));
|
||||
ering->rc_psmi = I915_READ(RING_PSMI_CTL(engine->mmio_base));
|
||||
ering->fault_reg = I915_READ(RING_FAULT_REG(engine));
|
||||
if (INTEL_INFO(dev)->gen >= 8)
|
||||
gen8_record_semaphore_state(dev_priv, error, ring, ering);
|
||||
gen8_record_semaphore_state(dev_priv, error, engine,
|
||||
ering);
|
||||
else
|
||||
gen6_record_semaphore_state(dev_priv, ring, ering);
|
||||
gen6_record_semaphore_state(dev_priv, engine, ering);
|
||||
}
|
||||
|
||||
if (INTEL_INFO(dev)->gen >= 4) {
|
||||
ering->faddr = I915_READ(RING_DMA_FADD(ring->mmio_base));
|
||||
ering->ipeir = I915_READ(RING_IPEIR(ring->mmio_base));
|
||||
ering->ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
|
||||
ering->instdone = I915_READ(RING_INSTDONE(ring->mmio_base));
|
||||
ering->instps = I915_READ(RING_INSTPS(ring->mmio_base));
|
||||
ering->bbaddr = I915_READ(RING_BBADDR(ring->mmio_base));
|
||||
ering->faddr = I915_READ(RING_DMA_FADD(engine->mmio_base));
|
||||
ering->ipeir = I915_READ(RING_IPEIR(engine->mmio_base));
|
||||
ering->ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
|
||||
ering->instdone = I915_READ(RING_INSTDONE(engine->mmio_base));
|
||||
ering->instps = I915_READ(RING_INSTPS(engine->mmio_base));
|
||||
ering->bbaddr = I915_READ(RING_BBADDR(engine->mmio_base));
|
||||
if (INTEL_INFO(dev)->gen >= 8) {
|
||||
ering->faddr |= (u64) I915_READ(RING_DMA_FADD_UDW(ring->mmio_base)) << 32;
|
||||
ering->bbaddr |= (u64) I915_READ(RING_BBADDR_UDW(ring->mmio_base)) << 32;
|
||||
ering->faddr |= (u64) I915_READ(RING_DMA_FADD_UDW(engine->mmio_base)) << 32;
|
||||
ering->bbaddr |= (u64) I915_READ(RING_BBADDR_UDW(engine->mmio_base)) << 32;
|
||||
}
|
||||
ering->bbstate = I915_READ(RING_BBSTATE(ring->mmio_base));
|
||||
ering->bbstate = I915_READ(RING_BBSTATE(engine->mmio_base));
|
||||
} else {
|
||||
ering->faddr = I915_READ(DMA_FADD_I8XX);
|
||||
ering->ipeir = I915_READ(IPEIR);
|
||||
@ -904,20 +926,20 @@ static void i915_record_ring_state(struct drm_device *dev,
|
||||
ering->instdone = I915_READ(GEN2_INSTDONE);
|
||||
}
|
||||
|
||||
ering->waiting = waitqueue_active(&ring->irq_queue);
|
||||
ering->instpm = I915_READ(RING_INSTPM(ring->mmio_base));
|
||||
ering->seqno = ring->get_seqno(ring, false);
|
||||
ering->acthd = intel_ring_get_active_head(ring);
|
||||
ering->start = I915_READ_START(ring);
|
||||
ering->head = I915_READ_HEAD(ring);
|
||||
ering->tail = I915_READ_TAIL(ring);
|
||||
ering->ctl = I915_READ_CTL(ring);
|
||||
ering->waiting = waitqueue_active(&engine->irq_queue);
|
||||
ering->instpm = I915_READ(RING_INSTPM(engine->mmio_base));
|
||||
ering->seqno = engine->get_seqno(engine, false);
|
||||
ering->acthd = intel_ring_get_active_head(engine);
|
||||
ering->start = I915_READ_START(engine);
|
||||
ering->head = I915_READ_HEAD(engine);
|
||||
ering->tail = I915_READ_TAIL(engine);
|
||||
ering->ctl = I915_READ_CTL(engine);
|
||||
|
||||
if (I915_NEED_GFX_HWS(dev)) {
|
||||
i915_reg_t mmio;
|
||||
|
||||
if (IS_GEN7(dev)) {
|
||||
switch (ring->id) {
|
||||
switch (engine->id) {
|
||||
default:
|
||||
case RCS:
|
||||
mmio = RENDER_HWS_PGA_GEN7;
|
||||
@ -932,51 +954,51 @@ static void i915_record_ring_state(struct drm_device *dev,
|
||||
mmio = VEBOX_HWS_PGA_GEN7;
|
||||
break;
|
||||
}
|
||||
} else if (IS_GEN6(ring->dev)) {
|
||||
mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
|
||||
} else if (IS_GEN6(engine->dev)) {
|
||||
mmio = RING_HWS_PGA_GEN6(engine->mmio_base);
|
||||
} else {
|
||||
/* XXX: gen8 returns to sanity */
|
||||
mmio = RING_HWS_PGA(ring->mmio_base);
|
||||
mmio = RING_HWS_PGA(engine->mmio_base);
|
||||
}
|
||||
|
||||
ering->hws = I915_READ(mmio);
|
||||
}
|
||||
|
||||
ering->hangcheck_score = ring->hangcheck.score;
|
||||
ering->hangcheck_action = ring->hangcheck.action;
|
||||
ering->hangcheck_score = engine->hangcheck.score;
|
||||
ering->hangcheck_action = engine->hangcheck.action;
|
||||
|
||||
if (USES_PPGTT(dev)) {
|
||||
int i;
|
||||
|
||||
ering->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(ring));
|
||||
ering->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(engine));
|
||||
|
||||
if (IS_GEN6(dev))
|
||||
ering->vm_info.pp_dir_base =
|
||||
I915_READ(RING_PP_DIR_BASE_READ(ring));
|
||||
I915_READ(RING_PP_DIR_BASE_READ(engine));
|
||||
else if (IS_GEN7(dev))
|
||||
ering->vm_info.pp_dir_base =
|
||||
I915_READ(RING_PP_DIR_BASE(ring));
|
||||
I915_READ(RING_PP_DIR_BASE(engine));
|
||||
else if (INTEL_INFO(dev)->gen >= 8)
|
||||
for (i = 0; i < 4; i++) {
|
||||
ering->vm_info.pdp[i] =
|
||||
I915_READ(GEN8_RING_PDP_UDW(ring, i));
|
||||
I915_READ(GEN8_RING_PDP_UDW(engine, i));
|
||||
ering->vm_info.pdp[i] <<= 32;
|
||||
ering->vm_info.pdp[i] |=
|
||||
I915_READ(GEN8_RING_PDP_LDW(ring, i));
|
||||
I915_READ(GEN8_RING_PDP_LDW(engine, i));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static void i915_gem_record_active_context(struct intel_engine_cs *ring,
|
||||
static void i915_gem_record_active_context(struct intel_engine_cs *engine,
|
||||
struct drm_i915_error_state *error,
|
||||
struct drm_i915_error_ring *ering)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = ring->dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = engine->dev->dev_private;
|
||||
struct drm_i915_gem_object *obj;
|
||||
|
||||
/* Currently render ring is the only HW context user */
|
||||
if (ring->id != RCS || !error->ccid)
|
||||
if (engine->id != RCS || !error->ccid)
|
||||
return;
|
||||
|
||||
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
|
||||
@ -997,26 +1019,26 @@ static void i915_gem_record_rings(struct drm_device *dev,
|
||||
struct drm_i915_gem_request *request;
|
||||
int i, count;
|
||||
|
||||
for (i = 0; i < I915_NUM_RINGS; i++) {
|
||||
struct intel_engine_cs *ring = &dev_priv->ring[i];
|
||||
for (i = 0; i < I915_NUM_ENGINES; i++) {
|
||||
struct intel_engine_cs *engine = &dev_priv->engine[i];
|
||||
struct intel_ringbuffer *rbuf;
|
||||
|
||||
error->ring[i].pid = -1;
|
||||
|
||||
if (ring->dev == NULL)
|
||||
if (engine->dev == NULL)
|
||||
continue;
|
||||
|
||||
error->ring[i].valid = true;
|
||||
|
||||
i915_record_ring_state(dev, error, ring, &error->ring[i]);
|
||||
i915_record_ring_state(dev, error, engine, &error->ring[i]);
|
||||
|
||||
request = i915_gem_find_active_request(ring);
|
||||
request = i915_gem_find_active_request(engine);
|
||||
if (request) {
|
||||
struct i915_address_space *vm;
|
||||
|
||||
vm = request->ctx && request->ctx->ppgtt ?
|
||||
&request->ctx->ppgtt->base :
|
||||
&dev_priv->gtt.base;
|
||||
&dev_priv->ggtt.base;
|
||||
|
||||
/* We need to copy these to an anonymous buffer
|
||||
* as the simplest method to avoid being overwritten
|
||||
@ -1030,7 +1052,7 @@ static void i915_gem_record_rings(struct drm_device *dev,
|
||||
if (HAS_BROKEN_CS_TLB(dev_priv->dev))
|
||||
error->ring[i].wa_batchbuffer =
|
||||
i915_error_ggtt_object_create(dev_priv,
|
||||
ring->scratch.obj);
|
||||
engine->scratch.obj);
|
||||
|
||||
if (request->pid) {
|
||||
struct task_struct *task;
|
||||
@ -1052,11 +1074,11 @@ static void i915_gem_record_rings(struct drm_device *dev,
|
||||
* executed).
|
||||
*/
|
||||
if (request)
|
||||
rbuf = request->ctx->engine[ring->id].ringbuf;
|
||||
rbuf = request->ctx->engine[engine->id].ringbuf;
|
||||
else
|
||||
rbuf = dev_priv->kernel_context->engine[ring->id].ringbuf;
|
||||
rbuf = dev_priv->kernel_context->engine[engine->id].ringbuf;
|
||||
} else
|
||||
rbuf = ring->buffer;
|
||||
rbuf = engine->buffer;
|
||||
|
||||
error->ring[i].cpu_ring_head = rbuf->head;
|
||||
error->ring[i].cpu_ring_tail = rbuf->tail;
|
||||
@ -1065,12 +1087,19 @@ static void i915_gem_record_rings(struct drm_device *dev,
|
||||
i915_error_ggtt_object_create(dev_priv, rbuf->obj);
|
||||
|
||||
error->ring[i].hws_page =
|
||||
i915_error_ggtt_object_create(dev_priv, ring->status_page.obj);
|
||||
i915_error_ggtt_object_create(dev_priv,
|
||||
engine->status_page.obj);
|
||||
|
||||
i915_gem_record_active_context(ring, error, &error->ring[i]);
|
||||
if (engine->wa_ctx.obj) {
|
||||
error->ring[i].wa_ctx =
|
||||
i915_error_ggtt_object_create(dev_priv,
|
||||
engine->wa_ctx.obj);
|
||||
}
|
||||
|
||||
i915_gem_record_active_context(engine, error, &error->ring[i]);
|
||||
|
||||
count = 0;
|
||||
list_for_each_entry(request, &ring->request_list, list)
|
||||
list_for_each_entry(request, &engine->request_list, list)
|
||||
count++;
|
||||
|
||||
error->ring[i].num_requests = count;
|
||||
@ -1083,7 +1112,7 @@ static void i915_gem_record_rings(struct drm_device *dev,
|
||||
}
|
||||
|
||||
count = 0;
|
||||
list_for_each_entry(request, &ring->request_list, list) {
|
||||
list_for_each_entry(request, &engine->request_list, list) {
|
||||
struct drm_i915_error_request *erq;
|
||||
|
||||
if (count >= error->ring[i].num_requests) {
|
||||
@ -1272,7 +1301,7 @@ static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
|
||||
|
||||
static void i915_error_capture_msg(struct drm_device *dev,
|
||||
struct drm_i915_error_state *error,
|
||||
bool wedged,
|
||||
u32 engine_mask,
|
||||
const char *error_msg)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
@ -1295,7 +1324,7 @@ static void i915_error_capture_msg(struct drm_device *dev,
|
||||
scnprintf(error->error_msg + len, sizeof(error->error_msg) - len,
|
||||
", reason: %s, action: %s",
|
||||
error_msg,
|
||||
wedged ? "reset" : "continue");
|
||||
engine_mask ? "reset" : "continue");
|
||||
}
|
||||
|
||||
static void i915_capture_gen_state(struct drm_i915_private *dev_priv,
|
||||
@ -1318,7 +1347,7 @@ static void i915_capture_gen_state(struct drm_i915_private *dev_priv,
|
||||
* out a structure which becomes available in debugfs for user level tools
|
||||
* to pick up.
|
||||
*/
|
||||
void i915_capture_error_state(struct drm_device *dev, bool wedged,
|
||||
void i915_capture_error_state(struct drm_device *dev, u32 engine_mask,
|
||||
const char *error_msg)
|
||||
{
|
||||
static bool warned;
|
||||
@ -1346,7 +1375,7 @@ void i915_capture_error_state(struct drm_device *dev, bool wedged,
|
||||
error->overlay = intel_overlay_capture_error_state(dev);
|
||||
error->display = intel_display_capture_error_state(dev);
|
||||
|
||||
i915_error_capture_msg(dev, error, wedged, error_msg);
|
||||
i915_error_capture_msg(dev, error, engine_mask, error_msg);
|
||||
DRM_INFO("%s\n", error->error_msg);
|
||||
|
||||
spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
|
||||
|
@ -377,11 +377,11 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
|
||||
struct i915_guc_client *client)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = guc_to_i915(guc);
|
||||
struct intel_engine_cs *ring;
|
||||
struct intel_engine_cs *engine;
|
||||
struct intel_context *ctx = client->owner;
|
||||
struct guc_context_desc desc;
|
||||
struct sg_table *sg;
|
||||
int i;
|
||||
enum intel_engine_id id;
|
||||
|
||||
memset(&desc, 0, sizeof(desc));
|
||||
|
||||
@ -390,8 +390,8 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
|
||||
desc.priority = client->priority;
|
||||
desc.db_id = client->doorbell_id;
|
||||
|
||||
for_each_ring(ring, dev_priv, i) {
|
||||
struct guc_execlist_context *lrc = &desc.lrc[ring->guc_id];
|
||||
for_each_engine_id(engine, dev_priv, id) {
|
||||
struct guc_execlist_context *lrc = &desc.lrc[engine->guc_id];
|
||||
struct drm_i915_gem_object *obj;
|
||||
uint64_t ctx_desc;
|
||||
|
||||
@ -402,27 +402,27 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
|
||||
* for now who owns a GuC client. But for future owner of GuC
|
||||
* client, need to make sure lrc is pinned prior to enter here.
|
||||
*/
|
||||
obj = ctx->engine[i].state;
|
||||
obj = ctx->engine[id].state;
|
||||
if (!obj)
|
||||
break; /* XXX: continue? */
|
||||
|
||||
ctx_desc = intel_lr_context_descriptor(ctx, ring);
|
||||
ctx_desc = intel_lr_context_descriptor(ctx, engine);
|
||||
lrc->context_desc = (u32)ctx_desc;
|
||||
|
||||
/* The state page is after PPHWSP */
|
||||
lrc->ring_lcra = i915_gem_obj_ggtt_offset(obj) +
|
||||
LRC_STATE_PN * PAGE_SIZE;
|
||||
lrc->context_id = (client->ctx_index << GUC_ELC_CTXID_OFFSET) |
|
||||
(ring->guc_id << GUC_ELC_ENGINE_OFFSET);
|
||||
(engine->guc_id << GUC_ELC_ENGINE_OFFSET);
|
||||
|
||||
obj = ctx->engine[i].ringbuf->obj;
|
||||
obj = ctx->engine[id].ringbuf->obj;
|
||||
|
||||
lrc->ring_begin = i915_gem_obj_ggtt_offset(obj);
|
||||
lrc->ring_end = lrc->ring_begin + obj->base.size - 1;
|
||||
lrc->ring_next_free_location = lrc->ring_begin;
|
||||
lrc->ring_current_tail_pointer_value = 0;
|
||||
|
||||
desc.engines_used |= (1 << ring->guc_id);
|
||||
desc.engines_used |= (1 << engine->guc_id);
|
||||
}
|
||||
|
||||
WARN_ON(desc.engines_used == 0);
|
||||
@ -542,11 +542,12 @@ static int guc_add_workqueue_item(struct i915_guc_client *gc,
|
||||
wq_len = sizeof(struct guc_wq_item) / sizeof(u32) - 1;
|
||||
wqi->header = WQ_TYPE_INORDER |
|
||||
(wq_len << WQ_LEN_SHIFT) |
|
||||
(rq->ring->guc_id << WQ_TARGET_SHIFT) |
|
||||
(rq->engine->guc_id << WQ_TARGET_SHIFT) |
|
||||
WQ_NO_WCFLUSH_WAIT;
|
||||
|
||||
/* The GuC wants only the low-order word of the context descriptor */
|
||||
wqi->context_desc = (u32)intel_lr_context_descriptor(rq->ctx, rq->ring);
|
||||
wqi->context_desc = (u32)intel_lr_context_descriptor(rq->ctx,
|
||||
rq->engine);
|
||||
|
||||
/* The GuC firmware wants the tail index in QWords, not bytes */
|
||||
tail = rq->ringbuf->tail >> 3;
|
||||
@ -569,7 +570,7 @@ int i915_guc_submit(struct i915_guc_client *client,
|
||||
struct drm_i915_gem_request *rq)
|
||||
{
|
||||
struct intel_guc *guc = client->guc;
|
||||
unsigned int engine_id = rq->ring->guc_id;
|
||||
unsigned int engine_id = rq->engine->guc_id;
|
||||
int q_ret, b_ret;
|
||||
|
||||
q_ret = guc_add_workqueue_item(client, rq);
|
||||
@ -839,9 +840,9 @@ static void guc_create_ads(struct intel_guc *guc)
|
||||
struct guc_ads *ads;
|
||||
struct guc_policies *policies;
|
||||
struct guc_mmio_reg_state *reg_state;
|
||||
struct intel_engine_cs *ring;
|
||||
struct intel_engine_cs *engine;
|
||||
struct page *page;
|
||||
u32 size, i;
|
||||
u32 size;
|
||||
|
||||
/* The ads obj includes the struct itself and buffers passed to GuC */
|
||||
size = sizeof(struct guc_ads) + sizeof(struct guc_policies) +
|
||||
@ -867,11 +868,11 @@ static void guc_create_ads(struct intel_guc *guc)
|
||||
* so its address won't change after we've told the GuC where
|
||||
* to find it.
|
||||
*/
|
||||
ring = &dev_priv->ring[RCS];
|
||||
ads->golden_context_lrca = ring->status_page.gfx_addr;
|
||||
engine = &dev_priv->engine[RCS];
|
||||
ads->golden_context_lrca = engine->status_page.gfx_addr;
|
||||
|
||||
for_each_ring(ring, dev_priv, i)
|
||||
ads->eng_state_size[ring->guc_id] = intel_lr_context_size(ring);
|
||||
for_each_engine(engine, dev_priv)
|
||||
ads->eng_state_size[engine->guc_id] = intel_lr_context_size(engine);
|
||||
|
||||
/* GuC scheduling policies */
|
||||
policies = (void *)ads + sizeof(struct guc_ads);
|
||||
@ -883,12 +884,12 @@ static void guc_create_ads(struct intel_guc *guc)
|
||||
/* MMIO reg state */
|
||||
reg_state = (void *)policies + sizeof(struct guc_policies);
|
||||
|
||||
for_each_ring(ring, dev_priv, i) {
|
||||
reg_state->mmio_white_list[ring->guc_id].mmio_start =
|
||||
ring->mmio_base + GUC_MMIO_WHITE_LIST_START;
|
||||
for_each_engine(engine, dev_priv) {
|
||||
reg_state->mmio_white_list[engine->guc_id].mmio_start =
|
||||
engine->mmio_base + GUC_MMIO_WHITE_LIST_START;
|
||||
|
||||
/* Nothing to be saved or restored for now. */
|
||||
reg_state->mmio_white_list[ring->guc_id].count = 0;
|
||||
reg_state->mmio_white_list[engine->guc_id].count = 0;
|
||||
}
|
||||
|
||||
ads->reg_state_addr = ads->scheduler_policies +
|
||||
|
@ -994,14 +994,14 @@ static void ironlake_rps_change_irq_handler(struct drm_device *dev)
|
||||
return;
|
||||
}
|
||||
|
||||
static void notify_ring(struct intel_engine_cs *ring)
|
||||
static void notify_ring(struct intel_engine_cs *engine)
|
||||
{
|
||||
if (!intel_ring_initialized(ring))
|
||||
if (!intel_engine_initialized(engine))
|
||||
return;
|
||||
|
||||
trace_i915_gem_request_notify(ring);
|
||||
trace_i915_gem_request_notify(engine);
|
||||
|
||||
wake_up_all(&ring->irq_queue);
|
||||
wake_up_all(&engine->irq_queue);
|
||||
}
|
||||
|
||||
static void vlv_c0_read(struct drm_i915_private *dev_priv,
|
||||
@ -1079,11 +1079,10 @@ static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
|
||||
|
||||
static bool any_waiters(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct intel_engine_cs *ring;
|
||||
int i;
|
||||
struct intel_engine_cs *engine;
|
||||
|
||||
for_each_ring(ring, dev_priv, i)
|
||||
if (ring->irq_refcount)
|
||||
for_each_engine(engine, dev_priv)
|
||||
if (engine->irq_refcount)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
@ -1291,9 +1290,9 @@ static void ilk_gt_irq_handler(struct drm_device *dev,
|
||||
{
|
||||
if (gt_iir &
|
||||
(GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
|
||||
notify_ring(&dev_priv->ring[RCS]);
|
||||
notify_ring(&dev_priv->engine[RCS]);
|
||||
if (gt_iir & ILK_BSD_USER_INTERRUPT)
|
||||
notify_ring(&dev_priv->ring[VCS]);
|
||||
notify_ring(&dev_priv->engine[VCS]);
|
||||
}
|
||||
|
||||
static void snb_gt_irq_handler(struct drm_device *dev,
|
||||
@ -1303,11 +1302,11 @@ static void snb_gt_irq_handler(struct drm_device *dev,
|
||||
|
||||
if (gt_iir &
|
||||
(GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
|
||||
notify_ring(&dev_priv->ring[RCS]);
|
||||
notify_ring(&dev_priv->engine[RCS]);
|
||||
if (gt_iir & GT_BSD_USER_INTERRUPT)
|
||||
notify_ring(&dev_priv->ring[VCS]);
|
||||
notify_ring(&dev_priv->engine[VCS]);
|
||||
if (gt_iir & GT_BLT_USER_INTERRUPT)
|
||||
notify_ring(&dev_priv->ring[BCS]);
|
||||
notify_ring(&dev_priv->engine[BCS]);
|
||||
|
||||
if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
|
||||
GT_BSD_CS_ERROR_INTERRUPT |
|
||||
@ -1319,12 +1318,12 @@ static void snb_gt_irq_handler(struct drm_device *dev,
|
||||
}
|
||||
|
||||
static __always_inline void
|
||||
gen8_cs_irq_handler(struct intel_engine_cs *ring, u32 iir, int test_shift)
|
||||
gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir, int test_shift)
|
||||
{
|
||||
if (iir & (GT_RENDER_USER_INTERRUPT << test_shift))
|
||||
notify_ring(ring);
|
||||
notify_ring(engine);
|
||||
if (iir & (GT_CONTEXT_SWITCH_INTERRUPT << test_shift))
|
||||
intel_lrc_irq_handler(ring);
|
||||
intel_lrc_irq_handler(engine);
|
||||
}
|
||||
|
||||
static irqreturn_t gen8_gt_irq_handler(struct drm_i915_private *dev_priv,
|
||||
@ -1338,11 +1337,11 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_i915_private *dev_priv,
|
||||
I915_WRITE_FW(GEN8_GT_IIR(0), iir);
|
||||
ret = IRQ_HANDLED;
|
||||
|
||||
gen8_cs_irq_handler(&dev_priv->ring[RCS],
|
||||
iir, GEN8_RCS_IRQ_SHIFT);
|
||||
gen8_cs_irq_handler(&dev_priv->engine[RCS],
|
||||
iir, GEN8_RCS_IRQ_SHIFT);
|
||||
|
||||
gen8_cs_irq_handler(&dev_priv->ring[BCS],
|
||||
iir, GEN8_BCS_IRQ_SHIFT);
|
||||
gen8_cs_irq_handler(&dev_priv->engine[BCS],
|
||||
iir, GEN8_BCS_IRQ_SHIFT);
|
||||
} else
|
||||
DRM_ERROR("The master control interrupt lied (GT0)!\n");
|
||||
}
|
||||
@ -1353,11 +1352,11 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_i915_private *dev_priv,
|
||||
I915_WRITE_FW(GEN8_GT_IIR(1), iir);
|
||||
ret = IRQ_HANDLED;
|
||||
|
||||
gen8_cs_irq_handler(&dev_priv->ring[VCS],
|
||||
iir, GEN8_VCS1_IRQ_SHIFT);
|
||||
gen8_cs_irq_handler(&dev_priv->engine[VCS],
|
||||
iir, GEN8_VCS1_IRQ_SHIFT);
|
||||
|
||||
gen8_cs_irq_handler(&dev_priv->ring[VCS2],
|
||||
iir, GEN8_VCS2_IRQ_SHIFT);
|
||||
gen8_cs_irq_handler(&dev_priv->engine[VCS2],
|
||||
iir, GEN8_VCS2_IRQ_SHIFT);
|
||||
} else
|
||||
DRM_ERROR("The master control interrupt lied (GT1)!\n");
|
||||
}
|
||||
@ -1368,8 +1367,8 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_i915_private *dev_priv,
|
||||
I915_WRITE_FW(GEN8_GT_IIR(3), iir);
|
||||
ret = IRQ_HANDLED;
|
||||
|
||||
gen8_cs_irq_handler(&dev_priv->ring[VECS],
|
||||
iir, GEN8_VECS_IRQ_SHIFT);
|
||||
gen8_cs_irq_handler(&dev_priv->engine[VECS],
|
||||
iir, GEN8_VECS_IRQ_SHIFT);
|
||||
} else
|
||||
DRM_ERROR("The master control interrupt lied (GT3)!\n");
|
||||
}
|
||||
@ -1629,7 +1628,7 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
|
||||
|
||||
if (HAS_VEBOX(dev_priv->dev)) {
|
||||
if (pm_iir & PM_VEBOX_USER_INTERRUPT)
|
||||
notify_ring(&dev_priv->ring[VECS]);
|
||||
notify_ring(&dev_priv->engine[VECS]);
|
||||
|
||||
if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
|
||||
DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
|
||||
@ -2449,8 +2448,7 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
|
||||
static void i915_error_wake_up(struct drm_i915_private *dev_priv,
|
||||
bool reset_completed)
|
||||
{
|
||||
struct intel_engine_cs *ring;
|
||||
int i;
|
||||
struct intel_engine_cs *engine;
|
||||
|
||||
/*
|
||||
* Notify all waiters for GPU completion events that reset state has
|
||||
@ -2460,8 +2458,8 @@ static void i915_error_wake_up(struct drm_i915_private *dev_priv,
|
||||
*/
|
||||
|
||||
/* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
|
||||
for_each_ring(ring, dev_priv, i)
|
||||
wake_up_all(&ring->irq_queue);
|
||||
for_each_engine(engine, dev_priv)
|
||||
wake_up_all(&engine->irq_queue);
|
||||
|
||||
/* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
|
||||
wake_up_all(&dev_priv->pending_flip_queue);
|
||||
@ -2653,14 +2651,14 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
|
||||
/**
|
||||
* i915_handle_error - handle a gpu error
|
||||
* @dev: drm device
|
||||
*
|
||||
* @engine_mask: mask representing engines that are hung
|
||||
* Do some basic checking of register state at error time and
|
||||
* dump it to the syslog. Also call i915_capture_error_state() to make
|
||||
* sure we get a record and make it available in debugfs. Fire a uevent
|
||||
* so userspace knows something bad happened (should trigger collection
|
||||
* of a ring dump etc.).
|
||||
*/
|
||||
void i915_handle_error(struct drm_device *dev, bool wedged,
|
||||
void i915_handle_error(struct drm_device *dev, u32 engine_mask,
|
||||
const char *fmt, ...)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
@ -2671,10 +2669,10 @@ void i915_handle_error(struct drm_device *dev, bool wedged,
|
||||
vscnprintf(error_msg, sizeof(error_msg), fmt, args);
|
||||
va_end(args);
|
||||
|
||||
i915_capture_error_state(dev, wedged, error_msg);
|
||||
i915_capture_error_state(dev, engine_mask, error_msg);
|
||||
i915_report_and_clear_eir(dev);
|
||||
|
||||
if (wedged) {
|
||||
if (engine_mask) {
|
||||
atomic_or(I915_RESET_IN_PROGRESS_FLAG,
|
||||
&dev_priv->gpu_error.reset_counter);
|
||||
|
||||
@ -2805,10 +2803,10 @@ static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
|
||||
}
|
||||
|
||||
static bool
|
||||
ring_idle(struct intel_engine_cs *ring, u32 seqno)
|
||||
ring_idle(struct intel_engine_cs *engine, u32 seqno)
|
||||
{
|
||||
return (list_empty(&ring->request_list) ||
|
||||
i915_seqno_passed(seqno, ring->last_submitted_seqno));
|
||||
return (list_empty(&engine->request_list) ||
|
||||
i915_seqno_passed(seqno, engine->last_submitted_seqno));
|
||||
}
|
||||
|
||||
static bool
|
||||
@ -2824,42 +2822,42 @@ ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr)
|
||||
}
|
||||
|
||||
static struct intel_engine_cs *
|
||||
semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr, u64 offset)
|
||||
semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr,
|
||||
u64 offset)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = ring->dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = engine->dev->dev_private;
|
||||
struct intel_engine_cs *signaller;
|
||||
int i;
|
||||
|
||||
if (INTEL_INFO(dev_priv->dev)->gen >= 8) {
|
||||
for_each_ring(signaller, dev_priv, i) {
|
||||
if (ring == signaller)
|
||||
for_each_engine(signaller, dev_priv) {
|
||||
if (engine == signaller)
|
||||
continue;
|
||||
|
||||
if (offset == signaller->semaphore.signal_ggtt[ring->id])
|
||||
if (offset == signaller->semaphore.signal_ggtt[engine->id])
|
||||
return signaller;
|
||||
}
|
||||
} else {
|
||||
u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
|
||||
|
||||
for_each_ring(signaller, dev_priv, i) {
|
||||
if(ring == signaller)
|
||||
for_each_engine(signaller, dev_priv) {
|
||||
if(engine == signaller)
|
||||
continue;
|
||||
|
||||
if (sync_bits == signaller->semaphore.mbox.wait[ring->id])
|
||||
if (sync_bits == signaller->semaphore.mbox.wait[engine->id])
|
||||
return signaller;
|
||||
}
|
||||
}
|
||||
|
||||
DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n",
|
||||
ring->id, ipehr, offset);
|
||||
engine->id, ipehr, offset);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct intel_engine_cs *
|
||||
semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
|
||||
semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = ring->dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = engine->dev->dev_private;
|
||||
u32 cmd, ipehr, head;
|
||||
u64 offset = 0;
|
||||
int i, backwards;
|
||||
@ -2881,11 +2879,11 @@ semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
|
||||
* Therefore, this function does not support execlist mode in its
|
||||
* current form. Just return NULL and move on.
|
||||
*/
|
||||
if (ring->buffer == NULL)
|
||||
if (engine->buffer == NULL)
|
||||
return NULL;
|
||||
|
||||
ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
|
||||
if (!ipehr_is_semaphore_wait(ring->dev, ipehr))
|
||||
ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
|
||||
if (!ipehr_is_semaphore_wait(engine->dev, ipehr))
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
@ -2896,8 +2894,8 @@ semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
|
||||
* point at at batch, and semaphores are always emitted into the
|
||||
* ringbuffer itself.
|
||||
*/
|
||||
head = I915_READ_HEAD(ring) & HEAD_ADDR;
|
||||
backwards = (INTEL_INFO(ring->dev)->gen >= 8) ? 5 : 4;
|
||||
head = I915_READ_HEAD(engine) & HEAD_ADDR;
|
||||
backwards = (INTEL_INFO(engine->dev)->gen >= 8) ? 5 : 4;
|
||||
|
||||
for (i = backwards; i; --i) {
|
||||
/*
|
||||
@ -2905,10 +2903,10 @@ semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
|
||||
* our ring is smaller than what the hardware (and hence
|
||||
* HEAD_ADDR) allows. Also handles wrap-around.
|
||||
*/
|
||||
head &= ring->buffer->size - 1;
|
||||
head &= engine->buffer->size - 1;
|
||||
|
||||
/* This here seems to blow up */
|
||||
cmd = ioread32(ring->buffer->virtual_start + head);
|
||||
cmd = ioread32(engine->buffer->virtual_start + head);
|
||||
if (cmd == ipehr)
|
||||
break;
|
||||
|
||||
@ -2918,29 +2916,29 @@ semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
|
||||
if (!i)
|
||||
return NULL;
|
||||
|
||||
*seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1;
|
||||
if (INTEL_INFO(ring->dev)->gen >= 8) {
|
||||
offset = ioread32(ring->buffer->virtual_start + head + 12);
|
||||
*seqno = ioread32(engine->buffer->virtual_start + head + 4) + 1;
|
||||
if (INTEL_INFO(engine->dev)->gen >= 8) {
|
||||
offset = ioread32(engine->buffer->virtual_start + head + 12);
|
||||
offset <<= 32;
|
||||
offset = ioread32(ring->buffer->virtual_start + head + 8);
|
||||
offset = ioread32(engine->buffer->virtual_start + head + 8);
|
||||
}
|
||||
return semaphore_wait_to_signaller_ring(ring, ipehr, offset);
|
||||
return semaphore_wait_to_signaller_ring(engine, ipehr, offset);
|
||||
}
|
||||
|
||||
static int semaphore_passed(struct intel_engine_cs *ring)
|
||||
static int semaphore_passed(struct intel_engine_cs *engine)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = ring->dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = engine->dev->dev_private;
|
||||
struct intel_engine_cs *signaller;
|
||||
u32 seqno;
|
||||
|
||||
ring->hangcheck.deadlock++;
|
||||
engine->hangcheck.deadlock++;
|
||||
|
||||
signaller = semaphore_waits_for(ring, &seqno);
|
||||
signaller = semaphore_waits_for(engine, &seqno);
|
||||
if (signaller == NULL)
|
||||
return -1;
|
||||
|
||||
/* Prevent pathological recursion due to driver bugs */
|
||||
if (signaller->hangcheck.deadlock >= I915_NUM_RINGS)
|
||||
if (signaller->hangcheck.deadlock >= I915_NUM_ENGINES)
|
||||
return -1;
|
||||
|
||||
if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno))
|
||||
@ -2956,23 +2954,22 @@ static int semaphore_passed(struct intel_engine_cs *ring)
|
||||
|
||||
static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct intel_engine_cs *ring;
|
||||
int i;
|
||||
struct intel_engine_cs *engine;
|
||||
|
||||
for_each_ring(ring, dev_priv, i)
|
||||
ring->hangcheck.deadlock = 0;
|
||||
for_each_engine(engine, dev_priv)
|
||||
engine->hangcheck.deadlock = 0;
|
||||
}
|
||||
|
||||
static bool subunits_stuck(struct intel_engine_cs *ring)
|
||||
static bool subunits_stuck(struct intel_engine_cs *engine)
|
||||
{
|
||||
u32 instdone[I915_NUM_INSTDONE_REG];
|
||||
bool stuck;
|
||||
int i;
|
||||
|
||||
if (ring->id != RCS)
|
||||
if (engine->id != RCS)
|
||||
return true;
|
||||
|
||||
i915_get_extra_instdone(ring->dev, instdone);
|
||||
i915_get_extra_instdone(engine->dev, instdone);
|
||||
|
||||
/* There might be unstable subunit states even when
|
||||
* actual head is not moving. Filter out the unstable ones by
|
||||
@ -2981,49 +2978,44 @@ static bool subunits_stuck(struct intel_engine_cs *ring)
|
||||
*/
|
||||
stuck = true;
|
||||
for (i = 0; i < I915_NUM_INSTDONE_REG; i++) {
|
||||
const u32 tmp = instdone[i] | ring->hangcheck.instdone[i];
|
||||
const u32 tmp = instdone[i] | engine->hangcheck.instdone[i];
|
||||
|
||||
if (tmp != ring->hangcheck.instdone[i])
|
||||
if (tmp != engine->hangcheck.instdone[i])
|
||||
stuck = false;
|
||||
|
||||
ring->hangcheck.instdone[i] |= tmp;
|
||||
engine->hangcheck.instdone[i] |= tmp;
|
||||
}
|
||||
|
||||
return stuck;
|
||||
}
|
||||
|
||||
static enum intel_ring_hangcheck_action
|
||||
head_stuck(struct intel_engine_cs *ring, u64 acthd)
|
||||
head_stuck(struct intel_engine_cs *engine, u64 acthd)
|
||||
{
|
||||
if (acthd != ring->hangcheck.acthd) {
|
||||
if (acthd != engine->hangcheck.acthd) {
|
||||
|
||||
/* Clear subunit states on head movement */
|
||||
memset(ring->hangcheck.instdone, 0,
|
||||
sizeof(ring->hangcheck.instdone));
|
||||
memset(engine->hangcheck.instdone, 0,
|
||||
sizeof(engine->hangcheck.instdone));
|
||||
|
||||
if (acthd > ring->hangcheck.max_acthd) {
|
||||
ring->hangcheck.max_acthd = acthd;
|
||||
return HANGCHECK_ACTIVE;
|
||||
}
|
||||
|
||||
return HANGCHECK_ACTIVE_LOOP;
|
||||
return HANGCHECK_ACTIVE;
|
||||
}
|
||||
|
||||
if (!subunits_stuck(ring))
|
||||
if (!subunits_stuck(engine))
|
||||
return HANGCHECK_ACTIVE;
|
||||
|
||||
return HANGCHECK_HUNG;
|
||||
}
|
||||
|
||||
static enum intel_ring_hangcheck_action
|
||||
ring_stuck(struct intel_engine_cs *ring, u64 acthd)
|
||||
ring_stuck(struct intel_engine_cs *engine, u64 acthd)
|
||||
{
|
||||
struct drm_device *dev = ring->dev;
|
||||
struct drm_device *dev = engine->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
enum intel_ring_hangcheck_action ha;
|
||||
u32 tmp;
|
||||
|
||||
ha = head_stuck(ring, acthd);
|
||||
ha = head_stuck(engine, acthd);
|
||||
if (ha != HANGCHECK_HUNG)
|
||||
return ha;
|
||||
|
||||
@ -3035,24 +3027,24 @@ ring_stuck(struct intel_engine_cs *ring, u64 acthd)
|
||||
* and break the hang. This should work on
|
||||
* all but the second generation chipsets.
|
||||
*/
|
||||
tmp = I915_READ_CTL(ring);
|
||||
tmp = I915_READ_CTL(engine);
|
||||
if (tmp & RING_WAIT) {
|
||||
i915_handle_error(dev, false,
|
||||
i915_handle_error(dev, 0,
|
||||
"Kicking stuck wait on %s",
|
||||
ring->name);
|
||||
I915_WRITE_CTL(ring, tmp);
|
||||
engine->name);
|
||||
I915_WRITE_CTL(engine, tmp);
|
||||
return HANGCHECK_KICK;
|
||||
}
|
||||
|
||||
if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
|
||||
switch (semaphore_passed(ring)) {
|
||||
switch (semaphore_passed(engine)) {
|
||||
default:
|
||||
return HANGCHECK_HUNG;
|
||||
case 1:
|
||||
i915_handle_error(dev, false,
|
||||
i915_handle_error(dev, 0,
|
||||
"Kicking stuck semaphore on %s",
|
||||
ring->name);
|
||||
I915_WRITE_CTL(ring, tmp);
|
||||
engine->name);
|
||||
I915_WRITE_CTL(engine, tmp);
|
||||
return HANGCHECK_KICK;
|
||||
case 0:
|
||||
return HANGCHECK_WAIT;
|
||||
@ -3076,13 +3068,14 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
|
||||
container_of(work, typeof(*dev_priv),
|
||||
gpu_error.hangcheck_work.work);
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct intel_engine_cs *ring;
|
||||
int i;
|
||||
struct intel_engine_cs *engine;
|
||||
enum intel_engine_id id;
|
||||
int busy_count = 0, rings_hung = 0;
|
||||
bool stuck[I915_NUM_RINGS] = { 0 };
|
||||
bool stuck[I915_NUM_ENGINES] = { 0 };
|
||||
#define BUSY 1
|
||||
#define KICK 5
|
||||
#define HUNG 20
|
||||
#define ACTIVE_DECAY 15
|
||||
|
||||
if (!i915.enable_hangcheck)
|
||||
return;
|
||||
@ -3100,33 +3093,33 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
|
||||
*/
|
||||
intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
|
||||
|
||||
for_each_ring(ring, dev_priv, i) {
|
||||
for_each_engine_id(engine, dev_priv, id) {
|
||||
u64 acthd;
|
||||
u32 seqno;
|
||||
bool busy = true;
|
||||
|
||||
semaphore_clear_deadlocks(dev_priv);
|
||||
|
||||
seqno = ring->get_seqno(ring, false);
|
||||
acthd = intel_ring_get_active_head(ring);
|
||||
seqno = engine->get_seqno(engine, false);
|
||||
acthd = intel_ring_get_active_head(engine);
|
||||
|
||||
if (ring->hangcheck.seqno == seqno) {
|
||||
if (ring_idle(ring, seqno)) {
|
||||
ring->hangcheck.action = HANGCHECK_IDLE;
|
||||
if (engine->hangcheck.seqno == seqno) {
|
||||
if (ring_idle(engine, seqno)) {
|
||||
engine->hangcheck.action = HANGCHECK_IDLE;
|
||||
|
||||
if (waitqueue_active(&ring->irq_queue)) {
|
||||
if (waitqueue_active(&engine->irq_queue)) {
|
||||
/* Issue a wake-up to catch stuck h/w. */
|
||||
if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) {
|
||||
if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)))
|
||||
if (!test_and_set_bit(engine->id, &dev_priv->gpu_error.missed_irq_rings)) {
|
||||
if (!(dev_priv->gpu_error.test_irq_rings & intel_engine_flag(engine)))
|
||||
DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
|
||||
ring->name);
|
||||
engine->name);
|
||||
else
|
||||
DRM_INFO("Fake missed irq on %s\n",
|
||||
ring->name);
|
||||
wake_up_all(&ring->irq_queue);
|
||||
engine->name);
|
||||
wake_up_all(&engine->irq_queue);
|
||||
}
|
||||
/* Safeguard against driver failure */
|
||||
ring->hangcheck.score += BUSY;
|
||||
engine->hangcheck.score += BUSY;
|
||||
} else
|
||||
busy = false;
|
||||
} else {
|
||||
@ -3145,58 +3138,59 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
|
||||
* being repeatedly kicked and so responsible
|
||||
* for stalling the machine.
|
||||
*/
|
||||
ring->hangcheck.action = ring_stuck(ring,
|
||||
acthd);
|
||||
engine->hangcheck.action = ring_stuck(engine,
|
||||
acthd);
|
||||
|
||||
switch (ring->hangcheck.action) {
|
||||
switch (engine->hangcheck.action) {
|
||||
case HANGCHECK_IDLE:
|
||||
case HANGCHECK_WAIT:
|
||||
case HANGCHECK_ACTIVE:
|
||||
break;
|
||||
case HANGCHECK_ACTIVE_LOOP:
|
||||
ring->hangcheck.score += BUSY;
|
||||
case HANGCHECK_ACTIVE:
|
||||
engine->hangcheck.score += BUSY;
|
||||
break;
|
||||
case HANGCHECK_KICK:
|
||||
ring->hangcheck.score += KICK;
|
||||
engine->hangcheck.score += KICK;
|
||||
break;
|
||||
case HANGCHECK_HUNG:
|
||||
ring->hangcheck.score += HUNG;
|
||||
stuck[i] = true;
|
||||
engine->hangcheck.score += HUNG;
|
||||
stuck[id] = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
ring->hangcheck.action = HANGCHECK_ACTIVE;
|
||||
engine->hangcheck.action = HANGCHECK_ACTIVE;
|
||||
|
||||
/* Gradually reduce the count so that we catch DoS
|
||||
* attempts across multiple batches.
|
||||
*/
|
||||
if (ring->hangcheck.score > 0)
|
||||
ring->hangcheck.score--;
|
||||
if (engine->hangcheck.score > 0)
|
||||
engine->hangcheck.score -= ACTIVE_DECAY;
|
||||
if (engine->hangcheck.score < 0)
|
||||
engine->hangcheck.score = 0;
|
||||
|
||||
/* Clear head and subunit states on seqno movement */
|
||||
ring->hangcheck.acthd = ring->hangcheck.max_acthd = 0;
|
||||
engine->hangcheck.acthd = 0;
|
||||
|
||||
memset(ring->hangcheck.instdone, 0,
|
||||
sizeof(ring->hangcheck.instdone));
|
||||
memset(engine->hangcheck.instdone, 0,
|
||||
sizeof(engine->hangcheck.instdone));
|
||||
}
|
||||
|
||||
ring->hangcheck.seqno = seqno;
|
||||
ring->hangcheck.acthd = acthd;
|
||||
engine->hangcheck.seqno = seqno;
|
||||
engine->hangcheck.acthd = acthd;
|
||||
busy_count += busy;
|
||||
}
|
||||
|
||||
for_each_ring(ring, dev_priv, i) {
|
||||
if (ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
|
||||
for_each_engine_id(engine, dev_priv, id) {
|
||||
if (engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
|
||||
DRM_INFO("%s on %s\n",
|
||||
stuck[i] ? "stuck" : "no progress",
|
||||
ring->name);
|
||||
rings_hung++;
|
||||
stuck[id] ? "stuck" : "no progress",
|
||||
engine->name);
|
||||
rings_hung |= intel_engine_flag(engine);
|
||||
}
|
||||
}
|
||||
|
||||
if (rings_hung) {
|
||||
i915_handle_error(dev, true, "Ring hung");
|
||||
i915_handle_error(dev, rings_hung, "Engine(s) hung");
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -4044,7 +4038,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
|
||||
new_iir = I915_READ16(IIR); /* Flush posted writes */
|
||||
|
||||
if (iir & I915_USER_INTERRUPT)
|
||||
notify_ring(&dev_priv->ring[RCS]);
|
||||
notify_ring(&dev_priv->engine[RCS]);
|
||||
|
||||
for_each_pipe(dev_priv, pipe) {
|
||||
int plane = pipe;
|
||||
@ -4240,7 +4234,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
|
||||
new_iir = I915_READ(IIR); /* Flush posted writes */
|
||||
|
||||
if (iir & I915_USER_INTERRUPT)
|
||||
notify_ring(&dev_priv->ring[RCS]);
|
||||
notify_ring(&dev_priv->engine[RCS]);
|
||||
|
||||
for_each_pipe(dev_priv, pipe) {
|
||||
int plane = pipe;
|
||||
@ -4470,9 +4464,9 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
|
||||
new_iir = I915_READ(IIR); /* Flush posted writes */
|
||||
|
||||
if (iir & I915_USER_INTERRUPT)
|
||||
notify_ring(&dev_priv->ring[RCS]);
|
||||
notify_ring(&dev_priv->engine[RCS]);
|
||||
if (iir & I915_BSD_USER_INTERRUPT)
|
||||
notify_ring(&dev_priv->ring[VCS]);
|
||||
notify_ring(&dev_priv->engine[VCS]);
|
||||
|
||||
for_each_pipe(dev_priv, pipe) {
|
||||
if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
|
||||
@ -4567,8 +4561,6 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
|
||||
INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work,
|
||||
i915_hangcheck_elapsed);
|
||||
|
||||
pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
|
||||
|
||||
if (IS_GEN2(dev_priv)) {
|
||||
dev->max_vblank_count = 0;
|
||||
dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
|
||||
|
@ -56,6 +56,8 @@ struct i915_params i915 __read_mostly = {
|
||||
.edp_vswing = 0,
|
||||
.enable_guc_submission = false,
|
||||
.guc_log_level = -1,
|
||||
.enable_dp_mst = true,
|
||||
.inject_load_failure = 0,
|
||||
};
|
||||
|
||||
module_param_named(modeset, i915.modeset, int, 0400);
|
||||
@ -201,3 +203,10 @@ MODULE_PARM_DESC(enable_guc_submission, "Enable GuC submission (default:false)")
|
||||
module_param_named(guc_log_level, i915.guc_log_level, int, 0400);
|
||||
MODULE_PARM_DESC(guc_log_level,
|
||||
"GuC firmware logging level (-1:disabled (default), 0-3:enabled)");
|
||||
|
||||
module_param_named_unsafe(enable_dp_mst, i915.enable_dp_mst, bool, 0600);
|
||||
MODULE_PARM_DESC(enable_dp_mst,
|
||||
"Enable multi-stream transport (MST) for new DisplayPort sinks. (default: true)");
|
||||
module_param_named_unsafe(inject_load_failure, i915.inject_load_failure, uint, 0400);
|
||||
MODULE_PARM_DESC(inject_load_failure,
|
||||
"Force an error after a number of failure check points (0:disabled (default), N:force failure at the Nth failure check point)");
|
||||
|
@ -49,6 +49,7 @@ struct i915_params {
|
||||
int use_mmio_flip;
|
||||
int mmio_debug;
|
||||
int edp_vswing;
|
||||
unsigned int inject_load_failure;
|
||||
/* leave bools at the end to not create holes */
|
||||
bool enable_hangcheck;
|
||||
bool fastboot;
|
||||
@ -59,6 +60,7 @@ struct i915_params {
|
||||
bool enable_guc_submission;
|
||||
bool verbose_state_checks;
|
||||
bool nuclear_pageflip;
|
||||
bool enable_dp_mst;
|
||||
};
|
||||
|
||||
extern struct i915_params i915 __read_mostly;
|
||||
|
@ -164,6 +164,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
|
||||
#define GEN6_GRDOM_RENDER (1 << 1)
|
||||
#define GEN6_GRDOM_MEDIA (1 << 2)
|
||||
#define GEN6_GRDOM_BLT (1 << 3)
|
||||
#define GEN6_GRDOM_VECS (1 << 4)
|
||||
#define GEN8_GRDOM_MEDIA2 (1 << 7)
|
||||
|
||||
#define RING_PP_DIR_BASE(ring) _MMIO((ring)->mmio_base+0x228)
|
||||
#define RING_PP_DIR_BASE_READ(ring) _MMIO((ring)->mmio_base+0x518)
|
||||
@ -586,6 +588,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
|
||||
#define GEN7_GPGPU_DISPATCHDIMY _MMIO(0x2504)
|
||||
#define GEN7_GPGPU_DISPATCHDIMZ _MMIO(0x2508)
|
||||
|
||||
/* There are the 16 64-bit CS General Purpose Registers */
|
||||
#define HSW_CS_GPR(n) _MMIO(0x2600 + (n) * 8)
|
||||
#define HSW_CS_GPR_UDW(n) _MMIO(0x2600 + (n) * 8 + 4)
|
||||
|
||||
#define OACONTROL _MMIO(0x2360)
|
||||
|
||||
#define _GEN7_PIPEA_DE_LOAD_SL 0x70068
|
||||
@ -786,6 +792,7 @@ enum skl_disp_power_wells {
|
||||
#define DSI_PLL_M1_DIV_MASK (0x1ff << 0)
|
||||
#define CCK_CZ_CLOCK_CONTROL 0x62
|
||||
#define CCK_DISPLAY_CLOCK_CONTROL 0x6b
|
||||
#define CCK_DISPLAY_REF_CLOCK_CONTROL 0x6c
|
||||
#define CCK_TRUNK_FORCE_ON (1 << 17)
|
||||
#define CCK_TRUNK_FORCE_OFF (1 << 16)
|
||||
#define CCK_FREQUENCY_STATUS (0x1f << 8)
|
||||
@ -1795,6 +1802,7 @@ enum skl_disp_power_wells {
|
||||
|
||||
#define VLV_DISPLAY_BASE 0x180000
|
||||
#define VLV_MIPI_BASE VLV_DISPLAY_BASE
|
||||
#define BXT_MIPI_BASE 0x60000
|
||||
|
||||
#define VLV_GU_CTL0 _MMIO(VLV_DISPLAY_BASE + 0x2030)
|
||||
#define VLV_GU_CTL1 _MMIO(VLV_DISPLAY_BASE + 0x2034)
|
||||
@ -7102,6 +7110,7 @@ enum skl_disp_power_wells {
|
||||
#define GEN9_CCS_TLB_PREFETCH_ENABLE (1<<3)
|
||||
|
||||
#define GEN8_ROW_CHICKEN _MMIO(0xe4f0)
|
||||
#define FLOW_CONTROL_ENABLE (1<<15)
|
||||
#define PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE (1<<8)
|
||||
#define STALL_DOP_GATING_DISABLE (1<<5)
|
||||
|
||||
@ -7362,9 +7371,11 @@ enum skl_disp_power_wells {
|
||||
/* SBI offsets */
|
||||
#define SBI_SSCDIVINTPHASE 0x0200
|
||||
#define SBI_SSCDIVINTPHASE6 0x0600
|
||||
#define SBI_SSCDIVINTPHASE_DIVSEL_MASK ((0x7f)<<1)
|
||||
#define SBI_SSCDIVINTPHASE_DIVSEL_SHIFT 1
|
||||
#define SBI_SSCDIVINTPHASE_DIVSEL_MASK (0x7f<<1)
|
||||
#define SBI_SSCDIVINTPHASE_DIVSEL(x) ((x)<<1)
|
||||
#define SBI_SSCDIVINTPHASE_INCVAL_MASK ((0x7f)<<8)
|
||||
#define SBI_SSCDIVINTPHASE_INCVAL_SHIFT 8
|
||||
#define SBI_SSCDIVINTPHASE_INCVAL_MASK (0x7f<<8)
|
||||
#define SBI_SSCDIVINTPHASE_INCVAL(x) ((x)<<8)
|
||||
#define SBI_SSCDIVINTPHASE_DIR(x) ((x)<<15)
|
||||
#define SBI_SSCDIVINTPHASE_PROPAGATE (1<<0)
|
||||
@ -7374,6 +7385,8 @@ enum skl_disp_power_wells {
|
||||
#define SBI_SSCCTL_PATHALT (1<<3)
|
||||
#define SBI_SSCCTL_DISABLE (1<<0)
|
||||
#define SBI_SSCAUXDIV6 0x0610
|
||||
#define SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT 4
|
||||
#define SBI_SSCAUXDIV_FINALDIV2SEL_MASK (1<<4)
|
||||
#define SBI_SSCAUXDIV_FINALDIV2SEL(x) ((x)<<4)
|
||||
#define SBI_DBUFF0 0x2a00
|
||||
#define SBI_GEN0 0x1f00
|
||||
@ -7651,6 +7664,59 @@ enum skl_disp_power_wells {
|
||||
#define PIPE_CSC_POSTOFF_ME(pipe) _MMIO_PIPE(pipe, _PIPE_A_CSC_POSTOFF_ME, _PIPE_B_CSC_POSTOFF_ME)
|
||||
#define PIPE_CSC_POSTOFF_LO(pipe) _MMIO_PIPE(pipe, _PIPE_A_CSC_POSTOFF_LO, _PIPE_B_CSC_POSTOFF_LO)
|
||||
|
||||
/* pipe degamma/gamma LUTs on IVB+ */
|
||||
#define _PAL_PREC_INDEX_A 0x4A400
|
||||
#define _PAL_PREC_INDEX_B 0x4AC00
|
||||
#define _PAL_PREC_INDEX_C 0x4B400
|
||||
#define PAL_PREC_10_12_BIT (0 << 31)
|
||||
#define PAL_PREC_SPLIT_MODE (1 << 31)
|
||||
#define PAL_PREC_AUTO_INCREMENT (1 << 15)
|
||||
#define _PAL_PREC_DATA_A 0x4A404
|
||||
#define _PAL_PREC_DATA_B 0x4AC04
|
||||
#define _PAL_PREC_DATA_C 0x4B404
|
||||
#define _PAL_PREC_GC_MAX_A 0x4A410
|
||||
#define _PAL_PREC_GC_MAX_B 0x4AC10
|
||||
#define _PAL_PREC_GC_MAX_C 0x4B410
|
||||
#define _PAL_PREC_EXT_GC_MAX_A 0x4A420
|
||||
#define _PAL_PREC_EXT_GC_MAX_B 0x4AC20
|
||||
#define _PAL_PREC_EXT_GC_MAX_C 0x4B420
|
||||
|
||||
#define PREC_PAL_INDEX(pipe) _MMIO_PIPE(pipe, _PAL_PREC_INDEX_A, _PAL_PREC_INDEX_B)
|
||||
#define PREC_PAL_DATA(pipe) _MMIO_PIPE(pipe, _PAL_PREC_DATA_A, _PAL_PREC_DATA_B)
|
||||
#define PREC_PAL_GC_MAX(pipe, i) _MMIO(_PIPE(pipe, _PAL_PREC_GC_MAX_A, _PAL_PREC_GC_MAX_B) + (i) * 4)
|
||||
#define PREC_PAL_EXT_GC_MAX(pipe, i) _MMIO(_PIPE(pipe, _PAL_PREC_EXT_GC_MAX_A, _PAL_PREC_EXT_GC_MAX_B) + (i) * 4)
|
||||
|
||||
/* pipe CSC & degamma/gamma LUTs on CHV */
|
||||
#define _CGM_PIPE_A_CSC_COEFF01 (VLV_DISPLAY_BASE + 0x67900)
|
||||
#define _CGM_PIPE_A_CSC_COEFF23 (VLV_DISPLAY_BASE + 0x67904)
|
||||
#define _CGM_PIPE_A_CSC_COEFF45 (VLV_DISPLAY_BASE + 0x67908)
|
||||
#define _CGM_PIPE_A_CSC_COEFF67 (VLV_DISPLAY_BASE + 0x6790C)
|
||||
#define _CGM_PIPE_A_CSC_COEFF8 (VLV_DISPLAY_BASE + 0x67910)
|
||||
#define _CGM_PIPE_A_DEGAMMA (VLV_DISPLAY_BASE + 0x66000)
|
||||
#define _CGM_PIPE_A_GAMMA (VLV_DISPLAY_BASE + 0x67000)
|
||||
#define _CGM_PIPE_A_MODE (VLV_DISPLAY_BASE + 0x67A00)
|
||||
#define CGM_PIPE_MODE_GAMMA (1 << 2)
|
||||
#define CGM_PIPE_MODE_CSC (1 << 1)
|
||||
#define CGM_PIPE_MODE_DEGAMMA (1 << 0)
|
||||
|
||||
#define _CGM_PIPE_B_CSC_COEFF01 (VLV_DISPLAY_BASE + 0x69900)
|
||||
#define _CGM_PIPE_B_CSC_COEFF23 (VLV_DISPLAY_BASE + 0x69904)
|
||||
#define _CGM_PIPE_B_CSC_COEFF45 (VLV_DISPLAY_BASE + 0x69908)
|
||||
#define _CGM_PIPE_B_CSC_COEFF67 (VLV_DISPLAY_BASE + 0x6990C)
|
||||
#define _CGM_PIPE_B_CSC_COEFF8 (VLV_DISPLAY_BASE + 0x69910)
|
||||
#define _CGM_PIPE_B_DEGAMMA (VLV_DISPLAY_BASE + 0x68000)
|
||||
#define _CGM_PIPE_B_GAMMA (VLV_DISPLAY_BASE + 0x69000)
|
||||
#define _CGM_PIPE_B_MODE (VLV_DISPLAY_BASE + 0x69A00)
|
||||
|
||||
#define CGM_PIPE_CSC_COEFF01(pipe) _MMIO_PIPE(pipe, _CGM_PIPE_A_CSC_COEFF01, _CGM_PIPE_B_CSC_COEFF01)
|
||||
#define CGM_PIPE_CSC_COEFF23(pipe) _MMIO_PIPE(pipe, _CGM_PIPE_A_CSC_COEFF23, _CGM_PIPE_B_CSC_COEFF23)
|
||||
#define CGM_PIPE_CSC_COEFF45(pipe) _MMIO_PIPE(pipe, _CGM_PIPE_A_CSC_COEFF45, _CGM_PIPE_B_CSC_COEFF45)
|
||||
#define CGM_PIPE_CSC_COEFF67(pipe) _MMIO_PIPE(pipe, _CGM_PIPE_A_CSC_COEFF67, _CGM_PIPE_B_CSC_COEFF67)
|
||||
#define CGM_PIPE_CSC_COEFF8(pipe) _MMIO_PIPE(pipe, _CGM_PIPE_A_CSC_COEFF8, _CGM_PIPE_B_CSC_COEFF8)
|
||||
#define CGM_PIPE_DEGAMMA(pipe, i, w) _MMIO(_PIPE(pipe, _CGM_PIPE_A_DEGAMMA, _CGM_PIPE_B_DEGAMMA) + (i) * 8 + (w) * 4)
|
||||
#define CGM_PIPE_GAMMA(pipe, i, w) _MMIO(_PIPE(pipe, _CGM_PIPE_A_GAMMA, _CGM_PIPE_B_GAMMA) + (i) * 8 + (w) * 4)
|
||||
#define CGM_PIPE_MODE(pipe) _MMIO_PIPE(pipe, _CGM_PIPE_A_MODE, _CGM_PIPE_B_MODE)
|
||||
|
||||
/* MIPI DSI registers */
|
||||
|
||||
#define _MIPI_PORT(port, a, c) _PORT3(port, a, 0, c) /* ports A and C only */
|
||||
@ -7665,58 +7731,62 @@ enum skl_disp_power_wells {
|
||||
#define BXT_MIPI_DIV_SHIFT(port) \
|
||||
_MIPI_PORT(port, BXT_MIPI1_DIV_SHIFT, \
|
||||
BXT_MIPI2_DIV_SHIFT)
|
||||
/* Var clock divider to generate TX source. Result must be < 39.5 M */
|
||||
#define BXT_MIPI1_ESCLK_VAR_DIV_MASK (0x3F << 26)
|
||||
#define BXT_MIPI2_ESCLK_VAR_DIV_MASK (0x3F << 10)
|
||||
#define BXT_MIPI_ESCLK_VAR_DIV_MASK(port) \
|
||||
_MIPI_PORT(port, BXT_MIPI1_ESCLK_VAR_DIV_MASK, \
|
||||
BXT_MIPI2_ESCLK_VAR_DIV_MASK)
|
||||
|
||||
#define BXT_MIPI_ESCLK_VAR_DIV(port, val) \
|
||||
(val << BXT_MIPI_DIV_SHIFT(port))
|
||||
/* TX control divider to select actual TX clock output from (8x/var) */
|
||||
#define BXT_MIPI1_TX_ESCLK_SHIFT 21
|
||||
#define BXT_MIPI2_TX_ESCLK_SHIFT 5
|
||||
#define BXT_MIPI1_TX_ESCLK_SHIFT 26
|
||||
#define BXT_MIPI2_TX_ESCLK_SHIFT 10
|
||||
#define BXT_MIPI_TX_ESCLK_SHIFT(port) \
|
||||
_MIPI_PORT(port, BXT_MIPI1_TX_ESCLK_SHIFT, \
|
||||
BXT_MIPI2_TX_ESCLK_SHIFT)
|
||||
#define BXT_MIPI1_TX_ESCLK_FIXDIV_MASK (3 << 21)
|
||||
#define BXT_MIPI2_TX_ESCLK_FIXDIV_MASK (3 << 5)
|
||||
#define BXT_MIPI1_TX_ESCLK_FIXDIV_MASK (0x3F << 26)
|
||||
#define BXT_MIPI2_TX_ESCLK_FIXDIV_MASK (0x3F << 10)
|
||||
#define BXT_MIPI_TX_ESCLK_FIXDIV_MASK(port) \
|
||||
_MIPI_PORT(port, BXT_MIPI1_TX_ESCLK_FIXDIV_MASK, \
|
||||
BXT_MIPI2_TX_ESCLK_FIXDIV_MASK)
|
||||
#define BXT_MIPI_TX_ESCLK_8XDIV_BY2(port) \
|
||||
(0x0 << BXT_MIPI_TX_ESCLK_SHIFT(port))
|
||||
#define BXT_MIPI_TX_ESCLK_8XDIV_BY4(port) \
|
||||
(0x1 << BXT_MIPI_TX_ESCLK_SHIFT(port))
|
||||
#define BXT_MIPI_TX_ESCLK_8XDIV_BY8(port) \
|
||||
(0x2 << BXT_MIPI_TX_ESCLK_SHIFT(port))
|
||||
/* RX control divider to select actual RX clock output from 8x*/
|
||||
#define BXT_MIPI1_RX_ESCLK_SHIFT 19
|
||||
#define BXT_MIPI2_RX_ESCLK_SHIFT 3
|
||||
#define BXT_MIPI_RX_ESCLK_SHIFT(port) \
|
||||
_MIPI_PORT(port, BXT_MIPI1_RX_ESCLK_SHIFT, \
|
||||
BXT_MIPI2_RX_ESCLK_SHIFT)
|
||||
#define BXT_MIPI1_RX_ESCLK_FIXDIV_MASK (3 << 19)
|
||||
#define BXT_MIPI2_RX_ESCLK_FIXDIV_MASK (3 << 3)
|
||||
#define BXT_MIPI_RX_ESCLK_FIXDIV_MASK(port) \
|
||||
(3 << BXT_MIPI_RX_ESCLK_SHIFT(port))
|
||||
#define BXT_MIPI_RX_ESCLK_8X_BY2(port) \
|
||||
(1 << BXT_MIPI_RX_ESCLK_SHIFT(port))
|
||||
#define BXT_MIPI_RX_ESCLK_8X_BY3(port) \
|
||||
(2 << BXT_MIPI_RX_ESCLK_SHIFT(port))
|
||||
#define BXT_MIPI_RX_ESCLK_8X_BY4(port) \
|
||||
(3 << BXT_MIPI_RX_ESCLK_SHIFT(port))
|
||||
/* BXT-A WA: Always prog DPHY dividers to 00 */
|
||||
#define BXT_MIPI1_DPHY_DIV_SHIFT 16
|
||||
#define BXT_MIPI2_DPHY_DIV_SHIFT 0
|
||||
#define BXT_MIPI_DPHY_DIV_SHIFT(port) \
|
||||
_MIPI_PORT(port, BXT_MIPI1_DPHY_DIV_SHIFT, \
|
||||
BXT_MIPI2_DPHY_DIV_SHIFT)
|
||||
#define BXT_MIPI_1_DPHY_DIVIDER_MASK (3 << 16)
|
||||
#define BXT_MIPI_2_DPHY_DIVIDER_MASK (3 << 0)
|
||||
#define BXT_MIPI_DPHY_DIVIDER_MASK(port) \
|
||||
(3 << BXT_MIPI_DPHY_DIV_SHIFT(port))
|
||||
BXT_MIPI2_TX_ESCLK_FIXDIV_MASK)
|
||||
#define BXT_MIPI_TX_ESCLK_DIVIDER(port, val) \
|
||||
((val & 0x3F) << BXT_MIPI_TX_ESCLK_SHIFT(port))
|
||||
/* RX upper control divider to select actual RX clock output from 8x */
|
||||
#define BXT_MIPI1_RX_ESCLK_UPPER_SHIFT 21
|
||||
#define BXT_MIPI2_RX_ESCLK_UPPER_SHIFT 5
|
||||
#define BXT_MIPI_RX_ESCLK_UPPER_SHIFT(port) \
|
||||
_MIPI_PORT(port, BXT_MIPI1_RX_ESCLK_UPPER_SHIFT, \
|
||||
BXT_MIPI2_RX_ESCLK_UPPER_SHIFT)
|
||||
#define BXT_MIPI1_RX_ESCLK_UPPER_FIXDIV_MASK (3 << 21)
|
||||
#define BXT_MIPI2_RX_ESCLK_UPPER_FIXDIV_MASK (3 << 5)
|
||||
#define BXT_MIPI_RX_ESCLK_UPPER_FIXDIV_MASK(port) \
|
||||
_MIPI_PORT(port, BXT_MIPI1_RX_ESCLK_UPPER_FIXDIV_MASK, \
|
||||
BXT_MIPI2_RX_ESCLK_UPPER_FIXDIV_MASK)
|
||||
#define BXT_MIPI_RX_ESCLK_UPPER_DIVIDER(port, val) \
|
||||
((val & 3) << BXT_MIPI_RX_ESCLK_UPPER_SHIFT(port))
|
||||
/* 8/3X divider to select the actual 8/3X clock output from 8x */
|
||||
#define BXT_MIPI1_8X_BY3_SHIFT 19
|
||||
#define BXT_MIPI2_8X_BY3_SHIFT 3
|
||||
#define BXT_MIPI_8X_BY3_SHIFT(port) \
|
||||
_MIPI_PORT(port, BXT_MIPI1_8X_BY3_SHIFT, \
|
||||
BXT_MIPI2_8X_BY3_SHIFT)
|
||||
#define BXT_MIPI1_8X_BY3_DIVIDER_MASK (3 << 19)
|
||||
#define BXT_MIPI2_8X_BY3_DIVIDER_MASK (3 << 3)
|
||||
#define BXT_MIPI_8X_BY3_DIVIDER_MASK(port) \
|
||||
_MIPI_PORT(port, BXT_MIPI1_8X_BY3_DIVIDER_MASK, \
|
||||
BXT_MIPI2_8X_BY3_DIVIDER_MASK)
|
||||
#define BXT_MIPI_8X_BY3_DIVIDER(port, val) \
|
||||
((val & 3) << BXT_MIPI_8X_BY3_SHIFT(port))
|
||||
/* RX lower control divider to select actual RX clock output from 8x */
|
||||
#define BXT_MIPI1_RX_ESCLK_LOWER_SHIFT 16
|
||||
#define BXT_MIPI2_RX_ESCLK_LOWER_SHIFT 0
|
||||
#define BXT_MIPI_RX_ESCLK_LOWER_SHIFT(port) \
|
||||
_MIPI_PORT(port, BXT_MIPI1_RX_ESCLK_LOWER_SHIFT, \
|
||||
BXT_MIPI2_RX_ESCLK_LOWER_SHIFT)
|
||||
#define BXT_MIPI1_RX_ESCLK_LOWER_FIXDIV_MASK (3 << 16)
|
||||
#define BXT_MIPI2_RX_ESCLK_LOWER_FIXDIV_MASK (3 << 0)
|
||||
#define BXT_MIPI_RX_ESCLK_LOWER_FIXDIV_MASK(port) \
|
||||
_MIPI_PORT(port, BXT_MIPI1_RX_ESCLK_LOWER_FIXDIV_MASK, \
|
||||
BXT_MIPI2_RX_ESCLK_LOWER_FIXDIV_MASK)
|
||||
#define BXT_MIPI_RX_ESCLK_LOWER_DIVIDER(port, val) \
|
||||
((val & 3) << BXT_MIPI_RX_ESCLK_LOWER_SHIFT(port))
|
||||
|
||||
#define RX_DIVIDER_BIT_1_2 0x3
|
||||
#define RX_DIVIDER_BIT_3_4 0xC
|
||||
|
||||
/* BXT MIPI mode configure */
|
||||
#define _BXT_MIPIA_TRANS_HACTIVE 0x6B0F8
|
||||
@ -7741,9 +7811,11 @@ enum skl_disp_power_wells {
|
||||
#define BXT_DSIC_16X_BY2 (1 << 10)
|
||||
#define BXT_DSIC_16X_BY3 (2 << 10)
|
||||
#define BXT_DSIC_16X_BY4 (3 << 10)
|
||||
#define BXT_DSIC_16X_MASK (3 << 10)
|
||||
#define BXT_DSIA_16X_BY2 (1 << 8)
|
||||
#define BXT_DSIA_16X_BY3 (2 << 8)
|
||||
#define BXT_DSIA_16X_BY4 (3 << 8)
|
||||
#define BXT_DSIA_16X_MASK (3 << 8)
|
||||
#define BXT_DSI_FREQ_SEL_SHIFT 8
|
||||
#define BXT_DSI_FREQ_SEL_MASK (0xF << BXT_DSI_FREQ_SEL_SHIFT)
|
||||
|
||||
@ -7878,8 +7950,8 @@ enum skl_disp_power_wells {
|
||||
#define VID_MODE_FORMAT_MASK (0xf << 7)
|
||||
#define VID_MODE_NOT_SUPPORTED (0 << 7)
|
||||
#define VID_MODE_FORMAT_RGB565 (1 << 7)
|
||||
#define VID_MODE_FORMAT_RGB666 (2 << 7)
|
||||
#define VID_MODE_FORMAT_RGB666_LOOSE (3 << 7)
|
||||
#define VID_MODE_FORMAT_RGB666_PACKED (2 << 7)
|
||||
#define VID_MODE_FORMAT_RGB666 (3 << 7)
|
||||
#define VID_MODE_FORMAT_RGB888 (4 << 7)
|
||||
#define CMD_MODE_CHANNEL_NUMBER_SHIFT 5
|
||||
#define CMD_MODE_CHANNEL_NUMBER_MASK (3 << 5)
|
||||
@ -8135,6 +8207,7 @@ enum skl_disp_power_wells {
|
||||
#define READ_REQUEST_PRIORITY_HIGH (3 << 3)
|
||||
#define RGB_FLIP_TO_BGR (1 << 2)
|
||||
|
||||
#define BXT_PIPE_SELECT_SHIFT 7
|
||||
#define BXT_PIPE_SELECT_MASK (7 << 7)
|
||||
#define BXT_PIPE_SELECT(pipe) ((pipe) << 7)
|
||||
|
||||
|
@ -370,6 +370,8 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
|
||||
|
||||
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
|
||||
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
|
||||
mutex_lock(&dev_priv->rps.hw_lock);
|
||||
|
||||
val = intel_freq_opcode(dev_priv, val);
|
||||
@ -378,6 +380,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
|
||||
val > dev_priv->rps.max_freq ||
|
||||
val < dev_priv->rps.min_freq_softlimit) {
|
||||
mutex_unlock(&dev_priv->rps.hw_lock);
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -398,6 +401,8 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
|
||||
|
||||
mutex_unlock(&dev_priv->rps.hw_lock);
|
||||
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
@ -433,6 +438,8 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
|
||||
|
||||
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
|
||||
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
|
||||
mutex_lock(&dev_priv->rps.hw_lock);
|
||||
|
||||
val = intel_freq_opcode(dev_priv, val);
|
||||
@ -441,6 +448,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
|
||||
val > dev_priv->rps.max_freq ||
|
||||
val > dev_priv->rps.max_freq_softlimit) {
|
||||
mutex_unlock(&dev_priv->rps.hw_lock);
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -457,6 +465,8 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
|
||||
|
||||
mutex_unlock(&dev_priv->rps.hw_lock);
|
||||
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
|
||||
return count;
|
||||
|
||||
}
|
||||
|
@ -464,7 +464,7 @@ TRACE_EVENT(i915_gem_ring_sync_to,
|
||||
TP_fast_assign(
|
||||
__entry->dev = from->dev->primary->index;
|
||||
__entry->sync_from = from->id;
|
||||
__entry->sync_to = to_req->ring->id;
|
||||
__entry->sync_to = to_req->engine->id;
|
||||
__entry->seqno = i915_gem_request_get_seqno(req);
|
||||
),
|
||||
|
||||
@ -486,13 +486,13 @@ TRACE_EVENT(i915_gem_ring_dispatch,
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
struct intel_engine_cs *ring =
|
||||
i915_gem_request_get_ring(req);
|
||||
__entry->dev = ring->dev->primary->index;
|
||||
__entry->ring = ring->id;
|
||||
struct intel_engine_cs *engine =
|
||||
i915_gem_request_get_engine(req);
|
||||
__entry->dev = engine->dev->primary->index;
|
||||
__entry->ring = engine->id;
|
||||
__entry->seqno = i915_gem_request_get_seqno(req);
|
||||
__entry->flags = flags;
|
||||
i915_trace_irq_get(ring, req);
|
||||
i915_trace_irq_get(engine, req);
|
||||
),
|
||||
|
||||
TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x",
|
||||
@ -511,8 +511,8 @@ TRACE_EVENT(i915_gem_ring_flush,
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->dev = req->ring->dev->primary->index;
|
||||
__entry->ring = req->ring->id;
|
||||
__entry->dev = req->engine->dev->primary->index;
|
||||
__entry->ring = req->engine->id;
|
||||
__entry->invalidate = invalidate;
|
||||
__entry->flush = flush;
|
||||
),
|
||||
@ -533,10 +533,10 @@ DECLARE_EVENT_CLASS(i915_gem_request,
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
struct intel_engine_cs *ring =
|
||||
i915_gem_request_get_ring(req);
|
||||
__entry->dev = ring->dev->primary->index;
|
||||
__entry->ring = ring->id;
|
||||
struct intel_engine_cs *engine =
|
||||
i915_gem_request_get_engine(req);
|
||||
__entry->dev = engine->dev->primary->index;
|
||||
__entry->ring = engine->id;
|
||||
__entry->seqno = i915_gem_request_get_seqno(req);
|
||||
),
|
||||
|
||||
@ -550,8 +550,8 @@ DEFINE_EVENT(i915_gem_request, i915_gem_request_add,
|
||||
);
|
||||
|
||||
TRACE_EVENT(i915_gem_request_notify,
|
||||
TP_PROTO(struct intel_engine_cs *ring),
|
||||
TP_ARGS(ring),
|
||||
TP_PROTO(struct intel_engine_cs *engine),
|
||||
TP_ARGS(engine),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(u32, dev)
|
||||
@ -560,9 +560,9 @@ TRACE_EVENT(i915_gem_request_notify,
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->dev = ring->dev->primary->index;
|
||||
__entry->ring = ring->id;
|
||||
__entry->seqno = ring->get_seqno(ring, false);
|
||||
__entry->dev = engine->dev->primary->index;
|
||||
__entry->ring = engine->id;
|
||||
__entry->seqno = engine->get_seqno(engine, false);
|
||||
),
|
||||
|
||||
TP_printk("dev=%u, ring=%u, seqno=%u",
|
||||
@ -597,13 +597,13 @@ TRACE_EVENT(i915_gem_request_wait_begin,
|
||||
* less desirable.
|
||||
*/
|
||||
TP_fast_assign(
|
||||
struct intel_engine_cs *ring =
|
||||
i915_gem_request_get_ring(req);
|
||||
__entry->dev = ring->dev->primary->index;
|
||||
__entry->ring = ring->id;
|
||||
struct intel_engine_cs *engine =
|
||||
i915_gem_request_get_engine(req);
|
||||
__entry->dev = engine->dev->primary->index;
|
||||
__entry->ring = engine->id;
|
||||
__entry->seqno = i915_gem_request_get_seqno(req);
|
||||
__entry->blocking =
|
||||
mutex_is_locked(&ring->dev->struct_mutex);
|
||||
mutex_is_locked(&engine->dev->struct_mutex);
|
||||
),
|
||||
|
||||
TP_printk("dev=%u, ring=%u, seqno=%u, blocking=%s",
|
||||
@ -777,9 +777,9 @@ DEFINE_EVENT(i915_context, i915_context_free,
|
||||
* called only if full ppgtt is enabled.
|
||||
*/
|
||||
TRACE_EVENT(switch_mm,
|
||||
TP_PROTO(struct intel_engine_cs *ring, struct intel_context *to),
|
||||
TP_PROTO(struct intel_engine_cs *engine, struct intel_context *to),
|
||||
|
||||
TP_ARGS(ring, to),
|
||||
TP_ARGS(engine, to),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(u32, ring)
|
||||
@ -789,10 +789,10 @@ TRACE_EVENT(switch_mm,
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->ring = ring->id;
|
||||
__entry->ring = engine->id;
|
||||
__entry->to = to;
|
||||
__entry->vm = to->ppgtt? &to->ppgtt->base : NULL;
|
||||
__entry->dev = ring->dev->primary->index;
|
||||
__entry->dev = engine->dev->primary->index;
|
||||
),
|
||||
|
||||
TP_printk("dev=%u, ring=%u, ctx=%p, ctx_vm=%p",
|
||||
|
@ -181,7 +181,7 @@ static int vgt_balloon_space(struct drm_mm *mm,
|
||||
int intel_vgt_balloon(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct i915_address_space *ggtt_vm = &dev_priv->gtt.base;
|
||||
struct i915_address_space *ggtt_vm = &dev_priv->ggtt.base;
|
||||
unsigned long ggtt_vm_end = ggtt_vm->start + ggtt_vm->total;
|
||||
|
||||
unsigned long mappable_base, mappable_size, mappable_end;
|
||||
@ -203,18 +203,18 @@ int intel_vgt_balloon(struct drm_device *dev)
|
||||
unmappable_base, unmappable_size / 1024);
|
||||
|
||||
if (mappable_base < ggtt_vm->start ||
|
||||
mappable_end > dev_priv->gtt.mappable_end ||
|
||||
unmappable_base < dev_priv->gtt.mappable_end ||
|
||||
mappable_end > dev_priv->ggtt.mappable_end ||
|
||||
unmappable_base < dev_priv->ggtt.mappable_end ||
|
||||
unmappable_end > ggtt_vm_end) {
|
||||
DRM_ERROR("Invalid ballooning configuration!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Unmappable graphic memory ballooning */
|
||||
if (unmappable_base > dev_priv->gtt.mappable_end) {
|
||||
if (unmappable_base > dev_priv->ggtt.mappable_end) {
|
||||
ret = vgt_balloon_space(&ggtt_vm->mm,
|
||||
&bl_info.space[2],
|
||||
dev_priv->gtt.mappable_end,
|
||||
dev_priv->ggtt.mappable_end,
|
||||
unmappable_base);
|
||||
|
||||
if (ret)
|
||||
@ -244,11 +244,11 @@ int intel_vgt_balloon(struct drm_device *dev)
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (mappable_end < dev_priv->gtt.mappable_end) {
|
||||
if (mappable_end < dev_priv->ggtt.mappable_end) {
|
||||
ret = vgt_balloon_space(&ggtt_vm->mm,
|
||||
&bl_info.space[1],
|
||||
mappable_end,
|
||||
dev_priv->gtt.mappable_end);
|
||||
dev_priv->ggtt.mappable_end);
|
||||
|
||||
if (ret)
|
||||
goto err;
|
||||
|
@ -96,8 +96,11 @@ intel_crtc_duplicate_state(struct drm_crtc *crtc)
|
||||
crtc_state->update_pipe = false;
|
||||
crtc_state->disable_lp_wm = false;
|
||||
crtc_state->disable_cxsr = false;
|
||||
crtc_state->wm_changed = false;
|
||||
crtc_state->update_wm_pre = false;
|
||||
crtc_state->update_wm_post = false;
|
||||
crtc_state->fb_changed = false;
|
||||
crtc_state->wm.need_postvbl_update = false;
|
||||
crtc_state->fb_bits = 0;
|
||||
|
||||
return &crtc_state->base;
|
||||
}
|
||||
|
@ -195,12 +195,10 @@ static void intel_plane_atomic_update(struct drm_plane *plane,
|
||||
struct intel_plane_state *intel_state =
|
||||
to_intel_plane_state(plane->state);
|
||||
struct drm_crtc *crtc = plane->state->crtc ?: old_state->crtc;
|
||||
struct drm_crtc_state *crtc_state =
|
||||
drm_atomic_get_existing_crtc_state(old_state->state, crtc);
|
||||
|
||||
if (intel_state->visible)
|
||||
intel_plane->update_plane(plane,
|
||||
to_intel_crtc_state(crtc_state),
|
||||
to_intel_crtc_state(crtc->state),
|
||||
intel_state);
|
||||
else
|
||||
intel_plane->disable_plane(plane, crtc);
|
||||
|
@ -564,23 +564,21 @@ void intel_audio_codec_disable(struct intel_encoder *intel_encoder)
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_init_audio - Set up chip specific audio functions
|
||||
* @dev: drm device
|
||||
* intel_init_audio_hooks - Set up chip specific audio hooks
|
||||
* @dev_priv: device private
|
||||
*/
|
||||
void intel_init_audio(struct drm_device *dev)
|
||||
void intel_init_audio_hooks(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
if (IS_G4X(dev)) {
|
||||
if (IS_G4X(dev_priv)) {
|
||||
dev_priv->display.audio_codec_enable = g4x_audio_codec_enable;
|
||||
dev_priv->display.audio_codec_disable = g4x_audio_codec_disable;
|
||||
} else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
|
||||
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
|
||||
dev_priv->display.audio_codec_enable = ilk_audio_codec_enable;
|
||||
dev_priv->display.audio_codec_disable = ilk_audio_codec_disable;
|
||||
} else if (IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8) {
|
||||
} else if (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8) {
|
||||
dev_priv->display.audio_codec_enable = hsw_audio_codec_enable;
|
||||
dev_priv->display.audio_codec_disable = hsw_audio_codec_disable;
|
||||
} else if (HAS_PCH_SPLIT(dev)) {
|
||||
} else if (HAS_PCH_SPLIT(dev_priv)) {
|
||||
dev_priv->display.audio_codec_enable = ilk_audio_codec_enable;
|
||||
dev_priv->display.audio_codec_disable = ilk_audio_codec_disable;
|
||||
}
|
||||
|
@ -29,7 +29,9 @@
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/i915_drm.h>
|
||||
#include "i915_drv.h"
|
||||
#include "intel_bios.h"
|
||||
|
||||
#define _INTEL_BIOS_PRIVATE
|
||||
#include "intel_vbt_defs.h"
|
||||
|
||||
/**
|
||||
* DOC: Video BIOS Table (VBT)
|
||||
@ -480,7 +482,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
|
||||
child->slave_addr,
|
||||
(child->dvo_port == DEVICE_PORT_DVOB) ?
|
||||
"SDVOB" : "SDVOC");
|
||||
p_mapping = &(dev_priv->sdvo_mappings[child->dvo_port - 1]);
|
||||
p_mapping = &dev_priv->vbt.sdvo_mappings[child->dvo_port - 1];
|
||||
if (!p_mapping->initialized) {
|
||||
p_mapping->dvo_port = child->dvo_port;
|
||||
p_mapping->slave_addr = child->slave_addr;
|
||||
@ -525,10 +527,7 @@ parse_driver_features(struct drm_i915_private *dev_priv,
|
||||
return;
|
||||
|
||||
if (driver->lvds_config == BDB_DRIVER_FEATURE_EDP)
|
||||
dev_priv->vbt.edp_support = 1;
|
||||
|
||||
if (driver->dual_frequency)
|
||||
dev_priv->render_reclock_avail = true;
|
||||
dev_priv->vbt.edp.support = 1;
|
||||
|
||||
DRM_DEBUG_KMS("DRRS State Enabled:%d\n", driver->drrs_enabled);
|
||||
/*
|
||||
@ -550,20 +549,20 @@ parse_edp(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
|
||||
|
||||
edp = find_section(bdb, BDB_EDP);
|
||||
if (!edp) {
|
||||
if (dev_priv->vbt.edp_support)
|
||||
if (dev_priv->vbt.edp.support)
|
||||
DRM_DEBUG_KMS("No eDP BDB found but eDP panel supported.\n");
|
||||
return;
|
||||
}
|
||||
|
||||
switch ((edp->color_depth >> (panel_type * 2)) & 3) {
|
||||
case EDP_18BPP:
|
||||
dev_priv->vbt.edp_bpp = 18;
|
||||
dev_priv->vbt.edp.bpp = 18;
|
||||
break;
|
||||
case EDP_24BPP:
|
||||
dev_priv->vbt.edp_bpp = 24;
|
||||
dev_priv->vbt.edp.bpp = 24;
|
||||
break;
|
||||
case EDP_30BPP:
|
||||
dev_priv->vbt.edp_bpp = 30;
|
||||
dev_priv->vbt.edp.bpp = 30;
|
||||
break;
|
||||
}
|
||||
|
||||
@ -571,14 +570,14 @@ parse_edp(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
|
||||
edp_pps = &edp->power_seqs[panel_type];
|
||||
edp_link_params = &edp->link_params[panel_type];
|
||||
|
||||
dev_priv->vbt.edp_pps = *edp_pps;
|
||||
dev_priv->vbt.edp.pps = *edp_pps;
|
||||
|
||||
switch (edp_link_params->rate) {
|
||||
case EDP_RATE_1_62:
|
||||
dev_priv->vbt.edp_rate = DP_LINK_BW_1_62;
|
||||
dev_priv->vbt.edp.rate = DP_LINK_BW_1_62;
|
||||
break;
|
||||
case EDP_RATE_2_7:
|
||||
dev_priv->vbt.edp_rate = DP_LINK_BW_2_7;
|
||||
dev_priv->vbt.edp.rate = DP_LINK_BW_2_7;
|
||||
break;
|
||||
default:
|
||||
DRM_DEBUG_KMS("VBT has unknown eDP link rate value %u\n",
|
||||
@ -588,13 +587,13 @@ parse_edp(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
|
||||
|
||||
switch (edp_link_params->lanes) {
|
||||
case EDP_LANE_1:
|
||||
dev_priv->vbt.edp_lanes = 1;
|
||||
dev_priv->vbt.edp.lanes = 1;
|
||||
break;
|
||||
case EDP_LANE_2:
|
||||
dev_priv->vbt.edp_lanes = 2;
|
||||
dev_priv->vbt.edp.lanes = 2;
|
||||
break;
|
||||
case EDP_LANE_4:
|
||||
dev_priv->vbt.edp_lanes = 4;
|
||||
dev_priv->vbt.edp.lanes = 4;
|
||||
break;
|
||||
default:
|
||||
DRM_DEBUG_KMS("VBT has unknown eDP lane count value %u\n",
|
||||
@ -604,16 +603,16 @@ parse_edp(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
|
||||
|
||||
switch (edp_link_params->preemphasis) {
|
||||
case EDP_PREEMPHASIS_NONE:
|
||||
dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_0;
|
||||
dev_priv->vbt.edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_0;
|
||||
break;
|
||||
case EDP_PREEMPHASIS_3_5dB:
|
||||
dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_1;
|
||||
dev_priv->vbt.edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_1;
|
||||
break;
|
||||
case EDP_PREEMPHASIS_6dB:
|
||||
dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_2;
|
||||
dev_priv->vbt.edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_2;
|
||||
break;
|
||||
case EDP_PREEMPHASIS_9_5dB:
|
||||
dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_3;
|
||||
dev_priv->vbt.edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_3;
|
||||
break;
|
||||
default:
|
||||
DRM_DEBUG_KMS("VBT has unknown eDP pre-emphasis value %u\n",
|
||||
@ -623,16 +622,16 @@ parse_edp(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
|
||||
|
||||
switch (edp_link_params->vswing) {
|
||||
case EDP_VSWING_0_4V:
|
||||
dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_0;
|
||||
dev_priv->vbt.edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_0;
|
||||
break;
|
||||
case EDP_VSWING_0_6V:
|
||||
dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_1;
|
||||
dev_priv->vbt.edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_1;
|
||||
break;
|
||||
case EDP_VSWING_0_8V:
|
||||
dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
|
||||
dev_priv->vbt.edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
|
||||
break;
|
||||
case EDP_VSWING_1_2V:
|
||||
dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
|
||||
dev_priv->vbt.edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
|
||||
break;
|
||||
default:
|
||||
DRM_DEBUG_KMS("VBT has unknown eDP voltage swing value %u\n",
|
||||
@ -645,10 +644,10 @@ parse_edp(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
|
||||
|
||||
/* Don't read from VBT if module parameter has valid value*/
|
||||
if (i915.edp_vswing) {
|
||||
dev_priv->edp_low_vswing = i915.edp_vswing == 1;
|
||||
dev_priv->vbt.edp.low_vswing = i915.edp_vswing == 1;
|
||||
} else {
|
||||
vswing = (edp->edp_vswing_preemph >> (panel_type * 4)) & 0xF;
|
||||
dev_priv->edp_low_vswing = vswing == 0;
|
||||
dev_priv->vbt.edp.low_vswing = vswing == 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -706,7 +705,7 @@ parse_mipi_config(struct drm_i915_private *dev_priv,
|
||||
const struct mipi_pps_data *pps;
|
||||
|
||||
/* parse MIPI blocks only if LFP type is MIPI */
|
||||
if (!dev_priv->vbt.has_mipi)
|
||||
if (!intel_bios_is_dsi_present(dev_priv, NULL))
|
||||
return;
|
||||
|
||||
/* Initialize this to undefined indicating no generic MIPI support */
|
||||
@ -1232,14 +1231,6 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
|
||||
continue;
|
||||
}
|
||||
|
||||
if (p_child->common.dvo_port >= DVO_PORT_MIPIA
|
||||
&& p_child->common.dvo_port <= DVO_PORT_MIPID
|
||||
&&p_child->common.device_type & DEVICE_TYPE_MIPI_OUTPUT) {
|
||||
DRM_DEBUG_KMS("Found MIPI as LFP\n");
|
||||
dev_priv->vbt.has_mipi = 1;
|
||||
dev_priv->vbt.dsi.port = p_child->common.dvo_port;
|
||||
}
|
||||
|
||||
child_dev_ptr = dev_priv->vbt.child_dev + count;
|
||||
count++;
|
||||
|
||||
@ -1431,3 +1422,166 @@ intel_bios_init(struct drm_i915_private *dev_priv)
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_bios_is_tv_present - is integrated TV present in VBT
|
||||
* @dev_priv: i915 device instance
|
||||
*
|
||||
* Return true if TV is present. If no child devices were parsed from VBT,
|
||||
* assume TV is present.
|
||||
*/
|
||||
bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
union child_device_config *p_child;
|
||||
int i;
|
||||
|
||||
if (!dev_priv->vbt.int_tv_support)
|
||||
return false;
|
||||
|
||||
if (!dev_priv->vbt.child_dev_num)
|
||||
return true;
|
||||
|
||||
for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
|
||||
p_child = dev_priv->vbt.child_dev + i;
|
||||
/*
|
||||
* If the device type is not TV, continue.
|
||||
*/
|
||||
switch (p_child->old.device_type) {
|
||||
case DEVICE_TYPE_INT_TV:
|
||||
case DEVICE_TYPE_TV:
|
||||
case DEVICE_TYPE_TV_SVIDEO_COMPOSITE:
|
||||
break;
|
||||
default:
|
||||
continue;
|
||||
}
|
||||
/* Only when the addin_offset is non-zero, it is regarded
|
||||
* as present.
|
||||
*/
|
||||
if (p_child->old.addin_offset)
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_bios_is_lvds_present - is LVDS present in VBT
|
||||
* @dev_priv: i915 device instance
|
||||
* @i2c_pin: i2c pin for LVDS if present
|
||||
*
|
||||
* Return true if LVDS is present. If no child devices were parsed from VBT,
|
||||
* assume LVDS is present.
|
||||
*/
|
||||
bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (!dev_priv->vbt.child_dev_num)
|
||||
return true;
|
||||
|
||||
for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
|
||||
union child_device_config *uchild = dev_priv->vbt.child_dev + i;
|
||||
struct old_child_dev_config *child = &uchild->old;
|
||||
|
||||
/* If the device type is not LFP, continue.
|
||||
* We have to check both the new identifiers as well as the
|
||||
* old for compatibility with some BIOSes.
|
||||
*/
|
||||
if (child->device_type != DEVICE_TYPE_INT_LFP &&
|
||||
child->device_type != DEVICE_TYPE_LFP)
|
||||
continue;
|
||||
|
||||
if (intel_gmbus_is_valid_pin(dev_priv, child->i2c_pin))
|
||||
*i2c_pin = child->i2c_pin;
|
||||
|
||||
/* However, we cannot trust the BIOS writers to populate
|
||||
* the VBT correctly. Since LVDS requires additional
|
||||
* information from AIM blocks, a non-zero addin offset is
|
||||
* a good indicator that the LVDS is actually present.
|
||||
*/
|
||||
if (child->addin_offset)
|
||||
return true;
|
||||
|
||||
/* But even then some BIOS writers perform some black magic
|
||||
* and instantiate the device without reference to any
|
||||
* additional data. Trust that if the VBT was written into
|
||||
* the OpRegion then they have validated the LVDS's existence.
|
||||
*/
|
||||
if (dev_priv->opregion.vbt)
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_bios_is_port_edp - is the device in given port eDP
|
||||
* @dev_priv: i915 device instance
|
||||
* @port: port to check
|
||||
*
|
||||
* Return true if the device in %port is eDP.
|
||||
*/
|
||||
bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port)
|
||||
{
|
||||
union child_device_config *p_child;
|
||||
static const short port_mapping[] = {
|
||||
[PORT_B] = DVO_PORT_DPB,
|
||||
[PORT_C] = DVO_PORT_DPC,
|
||||
[PORT_D] = DVO_PORT_DPD,
|
||||
[PORT_E] = DVO_PORT_DPE,
|
||||
};
|
||||
int i;
|
||||
|
||||
if (!dev_priv->vbt.child_dev_num)
|
||||
return false;
|
||||
|
||||
for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
|
||||
p_child = dev_priv->vbt.child_dev + i;
|
||||
|
||||
if (p_child->common.dvo_port == port_mapping[port] &&
|
||||
(p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
|
||||
(DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_bios_is_dsi_present - is DSI present in VBT
|
||||
* @dev_priv: i915 device instance
|
||||
* @port: port for DSI if present
|
||||
*
|
||||
* Return true if DSI is present, and return the port in %port.
|
||||
*/
|
||||
bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv,
|
||||
enum port *port)
|
||||
{
|
||||
union child_device_config *p_child;
|
||||
u8 dvo_port;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
|
||||
p_child = dev_priv->vbt.child_dev + i;
|
||||
|
||||
if (!(p_child->common.device_type & DEVICE_TYPE_MIPI_OUTPUT))
|
||||
continue;
|
||||
|
||||
dvo_port = p_child->common.dvo_port;
|
||||
|
||||
switch (dvo_port) {
|
||||
case DVO_PORT_MIPIA:
|
||||
case DVO_PORT_MIPIC:
|
||||
if (port)
|
||||
*port = dvo_port - DVO_PORT_MIPIA;
|
||||
return true;
|
||||
case DVO_PORT_MIPIB:
|
||||
case DVO_PORT_MIPID:
|
||||
DRM_DEBUG_KMS("VBT has unsupported DSI port %c\n",
|
||||
port_name(dvo_port - DVO_PORT_MIPIA));
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright © 2006 Intel Corporation
|
||||
* Copyright © 2016 Intel Corporation
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@ -19,544 +19,17 @@
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*
|
||||
* Authors:
|
||||
* Eric Anholt <eric@anholt.net>
|
||||
*
|
||||
*/
|
||||
|
||||
/*
|
||||
* Please use intel_vbt_defs.h for VBT private data, to hide and abstract away
|
||||
* the VBT from the rest of the driver. Add the parsed, clean data to struct
|
||||
* intel_vbt_data within struct drm_i915_private.
|
||||
*/
|
||||
|
||||
#ifndef _INTEL_BIOS_H_
|
||||
#define _INTEL_BIOS_H_
|
||||
|
||||
/**
|
||||
* struct vbt_header - VBT Header structure
|
||||
* @signature: VBT signature, always starts with "$VBT"
|
||||
* @version: Version of this structure
|
||||
* @header_size: Size of this structure
|
||||
* @vbt_size: Size of VBT (VBT Header, BDB Header and data blocks)
|
||||
* @vbt_checksum: Checksum
|
||||
* @reserved0: Reserved
|
||||
* @bdb_offset: Offset of &struct bdb_header from beginning of VBT
|
||||
* @aim_offset: Offsets of add-in data blocks from beginning of VBT
|
||||
*/
|
||||
struct vbt_header {
|
||||
u8 signature[20];
|
||||
u16 version;
|
||||
u16 header_size;
|
||||
u16 vbt_size;
|
||||
u8 vbt_checksum;
|
||||
u8 reserved0;
|
||||
u32 bdb_offset;
|
||||
u32 aim_offset[4];
|
||||
} __packed;
|
||||
|
||||
/**
|
||||
* struct bdb_header - BDB Header structure
|
||||
* @signature: BDB signature "BIOS_DATA_BLOCK"
|
||||
* @version: Version of the data block definitions
|
||||
* @header_size: Size of this structure
|
||||
* @bdb_size: Size of BDB (BDB Header and data blocks)
|
||||
*/
|
||||
struct bdb_header {
|
||||
u8 signature[16];
|
||||
u16 version;
|
||||
u16 header_size;
|
||||
u16 bdb_size;
|
||||
} __packed;
|
||||
|
||||
/* strictly speaking, this is a "skip" block, but it has interesting info */
|
||||
struct vbios_data {
|
||||
u8 type; /* 0 == desktop, 1 == mobile */
|
||||
u8 relstage;
|
||||
u8 chipset;
|
||||
u8 lvds_present:1;
|
||||
u8 tv_present:1;
|
||||
u8 rsvd2:6; /* finish byte */
|
||||
u8 rsvd3[4];
|
||||
u8 signon[155];
|
||||
u8 copyright[61];
|
||||
u16 code_segment;
|
||||
u8 dos_boot_mode;
|
||||
u8 bandwidth_percent;
|
||||
u8 rsvd4; /* popup memory size */
|
||||
u8 resize_pci_bios;
|
||||
u8 rsvd5; /* is crt already on ddc2 */
|
||||
} __packed;
|
||||
|
||||
/*
|
||||
* There are several types of BIOS data blocks (BDBs), each block has
|
||||
* an ID and size in the first 3 bytes (ID in first, size in next 2).
|
||||
* Known types are listed below.
|
||||
*/
|
||||
#define BDB_GENERAL_FEATURES 1
|
||||
#define BDB_GENERAL_DEFINITIONS 2
|
||||
#define BDB_OLD_TOGGLE_LIST 3
|
||||
#define BDB_MODE_SUPPORT_LIST 4
|
||||
#define BDB_GENERIC_MODE_TABLE 5
|
||||
#define BDB_EXT_MMIO_REGS 6
|
||||
#define BDB_SWF_IO 7
|
||||
#define BDB_SWF_MMIO 8
|
||||
#define BDB_PSR 9
|
||||
#define BDB_MODE_REMOVAL_TABLE 10
|
||||
#define BDB_CHILD_DEVICE_TABLE 11
|
||||
#define BDB_DRIVER_FEATURES 12
|
||||
#define BDB_DRIVER_PERSISTENCE 13
|
||||
#define BDB_EXT_TABLE_PTRS 14
|
||||
#define BDB_DOT_CLOCK_OVERRIDE 15
|
||||
#define BDB_DISPLAY_SELECT 16
|
||||
/* 17 rsvd */
|
||||
#define BDB_DRIVER_ROTATION 18
|
||||
#define BDB_DISPLAY_REMOVE 19
|
||||
#define BDB_OEM_CUSTOM 20
|
||||
#define BDB_EFP_LIST 21 /* workarounds for VGA hsync/vsync */
|
||||
#define BDB_SDVO_LVDS_OPTIONS 22
|
||||
#define BDB_SDVO_PANEL_DTDS 23
|
||||
#define BDB_SDVO_LVDS_PNP_IDS 24
|
||||
#define BDB_SDVO_LVDS_POWER_SEQ 25
|
||||
#define BDB_TV_OPTIONS 26
|
||||
#define BDB_EDP 27
|
||||
#define BDB_LVDS_OPTIONS 40
|
||||
#define BDB_LVDS_LFP_DATA_PTRS 41
|
||||
#define BDB_LVDS_LFP_DATA 42
|
||||
#define BDB_LVDS_BACKLIGHT 43
|
||||
#define BDB_LVDS_POWER 44
|
||||
#define BDB_MIPI_CONFIG 52
|
||||
#define BDB_MIPI_SEQUENCE 53
|
||||
#define BDB_SKIP 254 /* VBIOS private block, ignore */
|
||||
|
||||
struct bdb_general_features {
|
||||
/* bits 1 */
|
||||
u8 panel_fitting:2;
|
||||
u8 flexaim:1;
|
||||
u8 msg_enable:1;
|
||||
u8 clear_screen:3;
|
||||
u8 color_flip:1;
|
||||
|
||||
/* bits 2 */
|
||||
u8 download_ext_vbt:1;
|
||||
u8 enable_ssc:1;
|
||||
u8 ssc_freq:1;
|
||||
u8 enable_lfp_on_override:1;
|
||||
u8 disable_ssc_ddt:1;
|
||||
u8 rsvd7:1;
|
||||
u8 display_clock_mode:1;
|
||||
u8 rsvd8:1; /* finish byte */
|
||||
|
||||
/* bits 3 */
|
||||
u8 disable_smooth_vision:1;
|
||||
u8 single_dvi:1;
|
||||
u8 rsvd9:1;
|
||||
u8 fdi_rx_polarity_inverted:1;
|
||||
u8 rsvd10:4; /* finish byte */
|
||||
|
||||
/* bits 4 */
|
||||
u8 legacy_monitor_detect;
|
||||
|
||||
/* bits 5 */
|
||||
u8 int_crt_support:1;
|
||||
u8 int_tv_support:1;
|
||||
u8 int_efp_support:1;
|
||||
u8 dp_ssc_enb:1; /* PCH attached eDP supports SSC */
|
||||
u8 dp_ssc_freq:1; /* SSC freq for PCH attached eDP */
|
||||
u8 rsvd11:3; /* finish byte */
|
||||
} __packed;
|
||||
|
||||
/* pre-915 */
|
||||
#define GPIO_PIN_DVI_LVDS 0x03 /* "DVI/LVDS DDC GPIO pins" */
|
||||
#define GPIO_PIN_ADD_I2C 0x05 /* "ADDCARD I2C GPIO pins" */
|
||||
#define GPIO_PIN_ADD_DDC 0x04 /* "ADDCARD DDC GPIO pins" */
|
||||
#define GPIO_PIN_ADD_DDC_I2C 0x06 /* "ADDCARD DDC/I2C GPIO pins" */
|
||||
|
||||
/* Pre 915 */
|
||||
#define DEVICE_TYPE_NONE 0x00
|
||||
#define DEVICE_TYPE_CRT 0x01
|
||||
#define DEVICE_TYPE_TV 0x09
|
||||
#define DEVICE_TYPE_EFP 0x12
|
||||
#define DEVICE_TYPE_LFP 0x22
|
||||
/* On 915+ */
|
||||
#define DEVICE_TYPE_CRT_DPMS 0x6001
|
||||
#define DEVICE_TYPE_CRT_DPMS_HOTPLUG 0x4001
|
||||
#define DEVICE_TYPE_TV_COMPOSITE 0x0209
|
||||
#define DEVICE_TYPE_TV_MACROVISION 0x0289
|
||||
#define DEVICE_TYPE_TV_RF_COMPOSITE 0x020c
|
||||
#define DEVICE_TYPE_TV_SVIDEO_COMPOSITE 0x0609
|
||||
#define DEVICE_TYPE_TV_SCART 0x0209
|
||||
#define DEVICE_TYPE_TV_CODEC_HOTPLUG_PWR 0x6009
|
||||
#define DEVICE_TYPE_EFP_HOTPLUG_PWR 0x6012
|
||||
#define DEVICE_TYPE_EFP_DVI_HOTPLUG_PWR 0x6052
|
||||
#define DEVICE_TYPE_EFP_DVI_I 0x6053
|
||||
#define DEVICE_TYPE_EFP_DVI_D_DUAL 0x6152
|
||||
#define DEVICE_TYPE_EFP_DVI_D_HDCP 0x60d2
|
||||
#define DEVICE_TYPE_OPENLDI_HOTPLUG_PWR 0x6062
|
||||
#define DEVICE_TYPE_OPENLDI_DUALPIX 0x6162
|
||||
#define DEVICE_TYPE_LFP_PANELLINK 0x5012
|
||||
#define DEVICE_TYPE_LFP_CMOS_PWR 0x5042
|
||||
#define DEVICE_TYPE_LFP_LVDS_PWR 0x5062
|
||||
#define DEVICE_TYPE_LFP_LVDS_DUAL 0x5162
|
||||
#define DEVICE_TYPE_LFP_LVDS_DUAL_HDCP 0x51e2
|
||||
|
||||
#define DEVICE_CFG_NONE 0x00
|
||||
#define DEVICE_CFG_12BIT_DVOB 0x01
|
||||
#define DEVICE_CFG_12BIT_DVOC 0x02
|
||||
#define DEVICE_CFG_24BIT_DVOBC 0x09
|
||||
#define DEVICE_CFG_24BIT_DVOCB 0x0a
|
||||
#define DEVICE_CFG_DUAL_DVOB 0x11
|
||||
#define DEVICE_CFG_DUAL_DVOC 0x12
|
||||
#define DEVICE_CFG_DUAL_DVOBC 0x13
|
||||
#define DEVICE_CFG_DUAL_LINK_DVOBC 0x19
|
||||
#define DEVICE_CFG_DUAL_LINK_DVOCB 0x1a
|
||||
|
||||
#define DEVICE_WIRE_NONE 0x00
|
||||
#define DEVICE_WIRE_DVOB 0x01
|
||||
#define DEVICE_WIRE_DVOC 0x02
|
||||
#define DEVICE_WIRE_DVOBC 0x03
|
||||
#define DEVICE_WIRE_DVOBB 0x05
|
||||
#define DEVICE_WIRE_DVOCC 0x06
|
||||
#define DEVICE_WIRE_DVOB_MASTER 0x0d
|
||||
#define DEVICE_WIRE_DVOC_MASTER 0x0e
|
||||
|
||||
#define DEVICE_PORT_DVOA 0x00 /* none on 845+ */
|
||||
#define DEVICE_PORT_DVOB 0x01
|
||||
#define DEVICE_PORT_DVOC 0x02
|
||||
|
||||
/*
|
||||
* We used to keep this struct but without any version control. We should avoid
|
||||
* using it in the future, but it should be safe to keep using it in the old
|
||||
* code. Do not change; we rely on its size.
|
||||
*/
|
||||
struct old_child_dev_config {
|
||||
u16 handle;
|
||||
u16 device_type;
|
||||
u8 device_id[10]; /* ascii string */
|
||||
u16 addin_offset;
|
||||
u8 dvo_port; /* See Device_PORT_* above */
|
||||
u8 i2c_pin;
|
||||
u8 slave_addr;
|
||||
u8 ddc_pin;
|
||||
u16 edid_ptr;
|
||||
u8 dvo_cfg; /* See DEVICE_CFG_* above */
|
||||
u8 dvo2_port;
|
||||
u8 i2c2_pin;
|
||||
u8 slave2_addr;
|
||||
u8 ddc2_pin;
|
||||
u8 capabilities;
|
||||
u8 dvo_wiring;/* See DEVICE_WIRE_* above */
|
||||
u8 dvo2_wiring;
|
||||
u16 extended_type;
|
||||
u8 dvo_function;
|
||||
} __packed;
|
||||
|
||||
/* This one contains field offsets that are known to be common for all BDB
|
||||
* versions. Notice that the meaning of the contents contents may still change,
|
||||
* but at least the offsets are consistent. */
|
||||
|
||||
/* Definitions for flags_1 */
|
||||
#define IBOOST_ENABLE (1<<3)
|
||||
|
||||
struct common_child_dev_config {
|
||||
u16 handle;
|
||||
u16 device_type;
|
||||
u8 not_common1[12];
|
||||
u8 dvo_port;
|
||||
u8 not_common2[2];
|
||||
u8 ddc_pin;
|
||||
u16 edid_ptr;
|
||||
u8 obsolete;
|
||||
u8 flags_1;
|
||||
u8 not_common3[13];
|
||||
u8 iboost_level;
|
||||
} __packed;
|
||||
|
||||
|
||||
/* This field changes depending on the BDB version, so the most reliable way to
|
||||
* read it is by checking the BDB version and reading the raw pointer. */
|
||||
union child_device_config {
|
||||
/* This one is safe to be used anywhere, but the code should still check
|
||||
* the BDB version. */
|
||||
u8 raw[33];
|
||||
/* This one should only be kept for legacy code. */
|
||||
struct old_child_dev_config old;
|
||||
/* This one should also be safe to use anywhere, even without version
|
||||
* checks. */
|
||||
struct common_child_dev_config common;
|
||||
} __packed;
|
||||
|
||||
struct bdb_general_definitions {
|
||||
/* DDC GPIO */
|
||||
u8 crt_ddc_gmbus_pin;
|
||||
|
||||
/* DPMS bits */
|
||||
u8 dpms_acpi:1;
|
||||
u8 skip_boot_crt_detect:1;
|
||||
u8 dpms_aim:1;
|
||||
u8 rsvd1:5; /* finish byte */
|
||||
|
||||
/* boot device bits */
|
||||
u8 boot_display[2];
|
||||
u8 child_dev_size;
|
||||
|
||||
/*
|
||||
* Device info:
|
||||
* If TV is present, it'll be at devices[0].
|
||||
* LVDS will be next, either devices[0] or [1], if present.
|
||||
* On some platforms the number of device is 6. But could be as few as
|
||||
* 4 if both TV and LVDS are missing.
|
||||
* And the device num is related with the size of general definition
|
||||
* block. It is obtained by using the following formula:
|
||||
* number = (block_size - sizeof(bdb_general_definitions))/
|
||||
* defs->child_dev_size;
|
||||
*/
|
||||
uint8_t devices[0];
|
||||
} __packed;
|
||||
|
||||
/* Mask for DRRS / Panel Channel / SSC / BLT control bits extraction */
|
||||
#define MODE_MASK 0x3
|
||||
|
||||
struct bdb_lvds_options {
|
||||
u8 panel_type;
|
||||
u8 rsvd1;
|
||||
/* LVDS capabilities, stored in a dword */
|
||||
u8 pfit_mode:2;
|
||||
u8 pfit_text_mode_enhanced:1;
|
||||
u8 pfit_gfx_mode_enhanced:1;
|
||||
u8 pfit_ratio_auto:1;
|
||||
u8 pixel_dither:1;
|
||||
u8 lvds_edid:1;
|
||||
u8 rsvd2:1;
|
||||
u8 rsvd4;
|
||||
/* LVDS Panel channel bits stored here */
|
||||
u32 lvds_panel_channel_bits;
|
||||
/* LVDS SSC (Spread Spectrum Clock) bits stored here. */
|
||||
u16 ssc_bits;
|
||||
u16 ssc_freq;
|
||||
u16 ssc_ddt;
|
||||
/* Panel color depth defined here */
|
||||
u16 panel_color_depth;
|
||||
/* LVDS panel type bits stored here */
|
||||
u32 dps_panel_type_bits;
|
||||
/* LVDS backlight control type bits stored here */
|
||||
u32 blt_control_type_bits;
|
||||
} __packed;
|
||||
|
||||
/* LFP pointer table contains entries to the struct below */
|
||||
struct bdb_lvds_lfp_data_ptr {
|
||||
u16 fp_timing_offset; /* offsets are from start of bdb */
|
||||
u8 fp_table_size;
|
||||
u16 dvo_timing_offset;
|
||||
u8 dvo_table_size;
|
||||
u16 panel_pnp_id_offset;
|
||||
u8 pnp_table_size;
|
||||
} __packed;
|
||||
|
||||
struct bdb_lvds_lfp_data_ptrs {
|
||||
u8 lvds_entries; /* followed by one or more lvds_data_ptr structs */
|
||||
struct bdb_lvds_lfp_data_ptr ptr[16];
|
||||
} __packed;
|
||||
|
||||
/* LFP data has 3 blocks per entry */
|
||||
struct lvds_fp_timing {
|
||||
u16 x_res;
|
||||
u16 y_res;
|
||||
u32 lvds_reg;
|
||||
u32 lvds_reg_val;
|
||||
u32 pp_on_reg;
|
||||
u32 pp_on_reg_val;
|
||||
u32 pp_off_reg;
|
||||
u32 pp_off_reg_val;
|
||||
u32 pp_cycle_reg;
|
||||
u32 pp_cycle_reg_val;
|
||||
u32 pfit_reg;
|
||||
u32 pfit_reg_val;
|
||||
u16 terminator;
|
||||
} __packed;
|
||||
|
||||
struct lvds_dvo_timing {
|
||||
u16 clock; /**< In 10khz */
|
||||
u8 hactive_lo;
|
||||
u8 hblank_lo;
|
||||
u8 hblank_hi:4;
|
||||
u8 hactive_hi:4;
|
||||
u8 vactive_lo;
|
||||
u8 vblank_lo;
|
||||
u8 vblank_hi:4;
|
||||
u8 vactive_hi:4;
|
||||
u8 hsync_off_lo;
|
||||
u8 hsync_pulse_width;
|
||||
u8 vsync_pulse_width:4;
|
||||
u8 vsync_off:4;
|
||||
u8 rsvd0:6;
|
||||
u8 hsync_off_hi:2;
|
||||
u8 h_image;
|
||||
u8 v_image;
|
||||
u8 max_hv;
|
||||
u8 h_border;
|
||||
u8 v_border;
|
||||
u8 rsvd1:3;
|
||||
u8 digital:2;
|
||||
u8 vsync_positive:1;
|
||||
u8 hsync_positive:1;
|
||||
u8 rsvd2:1;
|
||||
} __packed;
|
||||
|
||||
struct lvds_pnp_id {
|
||||
u16 mfg_name;
|
||||
u16 product_code;
|
||||
u32 serial;
|
||||
u8 mfg_week;
|
||||
u8 mfg_year;
|
||||
} __packed;
|
||||
|
||||
struct bdb_lvds_lfp_data_entry {
|
||||
struct lvds_fp_timing fp_timing;
|
||||
struct lvds_dvo_timing dvo_timing;
|
||||
struct lvds_pnp_id pnp_id;
|
||||
} __packed;
|
||||
|
||||
struct bdb_lvds_lfp_data {
|
||||
struct bdb_lvds_lfp_data_entry data[16];
|
||||
} __packed;
|
||||
|
||||
#define BDB_BACKLIGHT_TYPE_NONE 0
|
||||
#define BDB_BACKLIGHT_TYPE_PWM 2
|
||||
|
||||
struct bdb_lfp_backlight_data_entry {
|
||||
u8 type:2;
|
||||
u8 active_low_pwm:1;
|
||||
u8 obsolete1:5;
|
||||
u16 pwm_freq_hz;
|
||||
u8 min_brightness;
|
||||
u8 obsolete2;
|
||||
u8 obsolete3;
|
||||
} __packed;
|
||||
|
||||
struct bdb_lfp_backlight_data {
|
||||
u8 entry_size;
|
||||
struct bdb_lfp_backlight_data_entry data[16];
|
||||
u8 level[16];
|
||||
} __packed;
|
||||
|
||||
struct aimdb_header {
|
||||
char signature[16];
|
||||
char oem_device[20];
|
||||
u16 aimdb_version;
|
||||
u16 aimdb_header_size;
|
||||
u16 aimdb_size;
|
||||
} __packed;
|
||||
|
||||
struct aimdb_block {
|
||||
u8 aimdb_id;
|
||||
u16 aimdb_size;
|
||||
} __packed;
|
||||
|
||||
struct vch_panel_data {
|
||||
u16 fp_timing_offset;
|
||||
u8 fp_timing_size;
|
||||
u16 dvo_timing_offset;
|
||||
u8 dvo_timing_size;
|
||||
u16 text_fitting_offset;
|
||||
u8 text_fitting_size;
|
||||
u16 graphics_fitting_offset;
|
||||
u8 graphics_fitting_size;
|
||||
} __packed;
|
||||
|
||||
struct vch_bdb_22 {
|
||||
struct aimdb_block aimdb_block;
|
||||
struct vch_panel_data panels[16];
|
||||
} __packed;
|
||||
|
||||
struct bdb_sdvo_lvds_options {
|
||||
u8 panel_backlight;
|
||||
u8 h40_set_panel_type;
|
||||
u8 panel_type;
|
||||
u8 ssc_clk_freq;
|
||||
u16 als_low_trip;
|
||||
u16 als_high_trip;
|
||||
u8 sclalarcoeff_tab_row_num;
|
||||
u8 sclalarcoeff_tab_row_size;
|
||||
u8 coefficient[8];
|
||||
u8 panel_misc_bits_1;
|
||||
u8 panel_misc_bits_2;
|
||||
u8 panel_misc_bits_3;
|
||||
u8 panel_misc_bits_4;
|
||||
} __packed;
|
||||
|
||||
|
||||
#define BDB_DRIVER_FEATURE_NO_LVDS 0
|
||||
#define BDB_DRIVER_FEATURE_INT_LVDS 1
|
||||
#define BDB_DRIVER_FEATURE_SDVO_LVDS 2
|
||||
#define BDB_DRIVER_FEATURE_EDP 3
|
||||
|
||||
struct bdb_driver_features {
|
||||
u8 boot_dev_algorithm:1;
|
||||
u8 block_display_switch:1;
|
||||
u8 allow_display_switch:1;
|
||||
u8 hotplug_dvo:1;
|
||||
u8 dual_view_zoom:1;
|
||||
u8 int15h_hook:1;
|
||||
u8 sprite_in_clone:1;
|
||||
u8 primary_lfp_id:1;
|
||||
|
||||
u16 boot_mode_x;
|
||||
u16 boot_mode_y;
|
||||
u8 boot_mode_bpp;
|
||||
u8 boot_mode_refresh;
|
||||
|
||||
u16 enable_lfp_primary:1;
|
||||
u16 selective_mode_pruning:1;
|
||||
u16 dual_frequency:1;
|
||||
u16 render_clock_freq:1; /* 0: high freq; 1: low freq */
|
||||
u16 nt_clone_support:1;
|
||||
u16 power_scheme_ui:1; /* 0: CUI; 1: 3rd party */
|
||||
u16 sprite_display_assign:1; /* 0: secondary; 1: primary */
|
||||
u16 cui_aspect_scaling:1;
|
||||
u16 preserve_aspect_ratio:1;
|
||||
u16 sdvo_device_power_down:1;
|
||||
u16 crt_hotplug:1;
|
||||
u16 lvds_config:2;
|
||||
u16 tv_hotplug:1;
|
||||
u16 hdmi_config:2;
|
||||
|
||||
u8 static_display:1;
|
||||
u8 reserved2:7;
|
||||
u16 legacy_crt_max_x;
|
||||
u16 legacy_crt_max_y;
|
||||
u8 legacy_crt_max_refresh;
|
||||
|
||||
u8 hdmi_termination;
|
||||
u8 custom_vbt_version;
|
||||
/* Driver features data block */
|
||||
u16 rmpm_enabled:1;
|
||||
u16 s2ddt_enabled:1;
|
||||
u16 dpst_enabled:1;
|
||||
u16 bltclt_enabled:1;
|
||||
u16 adb_enabled:1;
|
||||
u16 drrs_enabled:1;
|
||||
u16 grs_enabled:1;
|
||||
u16 gpmt_enabled:1;
|
||||
u16 tbt_enabled:1;
|
||||
u16 psr_enabled:1;
|
||||
u16 ips_enabled:1;
|
||||
u16 reserved3:4;
|
||||
u16 pc_feature_valid:1;
|
||||
} __packed;
|
||||
|
||||
#define EDP_18BPP 0
|
||||
#define EDP_24BPP 1
|
||||
#define EDP_30BPP 2
|
||||
#define EDP_RATE_1_62 0
|
||||
#define EDP_RATE_2_7 1
|
||||
#define EDP_LANE_1 0
|
||||
#define EDP_LANE_2 1
|
||||
#define EDP_LANE_4 3
|
||||
#define EDP_PREEMPHASIS_NONE 0
|
||||
#define EDP_PREEMPHASIS_3_5dB 1
|
||||
#define EDP_PREEMPHASIS_6dB 2
|
||||
#define EDP_PREEMPHASIS_9_5dB 3
|
||||
#define EDP_VSWING_0_4V 0
|
||||
#define EDP_VSWING_0_6V 1
|
||||
#define EDP_VSWING_0_8V 2
|
||||
#define EDP_VSWING_1_2V 3
|
||||
|
||||
struct edp_power_seq {
|
||||
u16 t1_t3;
|
||||
u16 t8;
|
||||
@ -565,245 +38,37 @@ struct edp_power_seq {
|
||||
u16 t11_t12;
|
||||
} __packed;
|
||||
|
||||
struct edp_link_params {
|
||||
u8 rate:4;
|
||||
u8 lanes:4;
|
||||
u8 preemphasis:4;
|
||||
u8 vswing:4;
|
||||
} __packed;
|
||||
/* MIPI Sequence Block definitions */
|
||||
enum mipi_seq {
|
||||
MIPI_SEQ_END = 0,
|
||||
MIPI_SEQ_ASSERT_RESET,
|
||||
MIPI_SEQ_INIT_OTP,
|
||||
MIPI_SEQ_DISPLAY_ON,
|
||||
MIPI_SEQ_DISPLAY_OFF,
|
||||
MIPI_SEQ_DEASSERT_RESET,
|
||||
MIPI_SEQ_BACKLIGHT_ON, /* sequence block v2+ */
|
||||
MIPI_SEQ_BACKLIGHT_OFF, /* sequence block v2+ */
|
||||
MIPI_SEQ_TEAR_ON, /* sequence block v2+ */
|
||||
MIPI_SEQ_TEAR_OFF, /* sequence block v3+ */
|
||||
MIPI_SEQ_POWER_ON, /* sequence block v3+ */
|
||||
MIPI_SEQ_POWER_OFF, /* sequence block v3+ */
|
||||
MIPI_SEQ_MAX
|
||||
};
|
||||
|
||||
struct bdb_edp {
|
||||
struct edp_power_seq power_seqs[16];
|
||||
u32 color_depth;
|
||||
struct edp_link_params link_params[16];
|
||||
u32 sdrrs_msa_timing_delay;
|
||||
|
||||
/* ith bit indicates enabled/disabled for (i+1)th panel */
|
||||
u16 edp_s3d_feature;
|
||||
u16 edp_t3_optimization;
|
||||
u64 edp_vswing_preemph; /* v173 */
|
||||
} __packed;
|
||||
|
||||
struct psr_table {
|
||||
/* Feature bits */
|
||||
u8 full_link:1;
|
||||
u8 require_aux_to_wakeup:1;
|
||||
u8 feature_bits_rsvd:6;
|
||||
|
||||
/* Wait times */
|
||||
u8 idle_frames:4;
|
||||
u8 lines_to_wait:3;
|
||||
u8 wait_times_rsvd:1;
|
||||
|
||||
/* TP wake up time in multiple of 100 */
|
||||
u16 tp1_wakeup_time;
|
||||
u16 tp2_tp3_wakeup_time;
|
||||
} __packed;
|
||||
|
||||
struct bdb_psr {
|
||||
struct psr_table psr_table[16];
|
||||
} __packed;
|
||||
|
||||
/*
|
||||
* Driver<->VBIOS interaction occurs through scratch bits in
|
||||
* GR18 & SWF*.
|
||||
*/
|
||||
|
||||
/* GR18 bits are set on display switch and hotkey events */
|
||||
#define GR18_DRIVER_SWITCH_EN (1<<7) /* 0: VBIOS control, 1: driver control */
|
||||
#define GR18_HOTKEY_MASK 0x78 /* See also SWF4 15:0 */
|
||||
#define GR18_HK_NONE (0x0<<3)
|
||||
#define GR18_HK_LFP_STRETCH (0x1<<3)
|
||||
#define GR18_HK_TOGGLE_DISP (0x2<<3)
|
||||
#define GR18_HK_DISP_SWITCH (0x4<<3) /* see SWF14 15:0 for what to enable */
|
||||
#define GR18_HK_POPUP_DISABLED (0x6<<3)
|
||||
#define GR18_HK_POPUP_ENABLED (0x7<<3)
|
||||
#define GR18_HK_PFIT (0x8<<3)
|
||||
#define GR18_HK_APM_CHANGE (0xa<<3)
|
||||
#define GR18_HK_MULTIPLE (0xc<<3)
|
||||
#define GR18_USER_INT_EN (1<<2)
|
||||
#define GR18_A0000_FLUSH_EN (1<<1)
|
||||
#define GR18_SMM_EN (1<<0)
|
||||
|
||||
/* Set by driver, cleared by VBIOS */
|
||||
#define SWF00_YRES_SHIFT 16
|
||||
#define SWF00_XRES_SHIFT 0
|
||||
#define SWF00_RES_MASK 0xffff
|
||||
|
||||
/* Set by VBIOS at boot time and driver at runtime */
|
||||
#define SWF01_TV2_FORMAT_SHIFT 8
|
||||
#define SWF01_TV1_FORMAT_SHIFT 0
|
||||
#define SWF01_TV_FORMAT_MASK 0xffff
|
||||
|
||||
#define SWF10_VBIOS_BLC_I2C_EN (1<<29)
|
||||
#define SWF10_GTT_OVERRIDE_EN (1<<28)
|
||||
#define SWF10_LFP_DPMS_OVR (1<<27) /* override DPMS on display switch */
|
||||
#define SWF10_ACTIVE_TOGGLE_LIST_MASK (7<<24)
|
||||
#define SWF10_OLD_TOGGLE 0x0
|
||||
#define SWF10_TOGGLE_LIST_1 0x1
|
||||
#define SWF10_TOGGLE_LIST_2 0x2
|
||||
#define SWF10_TOGGLE_LIST_3 0x3
|
||||
#define SWF10_TOGGLE_LIST_4 0x4
|
||||
#define SWF10_PANNING_EN (1<<23)
|
||||
#define SWF10_DRIVER_LOADED (1<<22)
|
||||
#define SWF10_EXTENDED_DESKTOP (1<<21)
|
||||
#define SWF10_EXCLUSIVE_MODE (1<<20)
|
||||
#define SWF10_OVERLAY_EN (1<<19)
|
||||
#define SWF10_PLANEB_HOLDOFF (1<<18)
|
||||
#define SWF10_PLANEA_HOLDOFF (1<<17)
|
||||
#define SWF10_VGA_HOLDOFF (1<<16)
|
||||
#define SWF10_ACTIVE_DISP_MASK 0xffff
|
||||
#define SWF10_PIPEB_LFP2 (1<<15)
|
||||
#define SWF10_PIPEB_EFP2 (1<<14)
|
||||
#define SWF10_PIPEB_TV2 (1<<13)
|
||||
#define SWF10_PIPEB_CRT2 (1<<12)
|
||||
#define SWF10_PIPEB_LFP (1<<11)
|
||||
#define SWF10_PIPEB_EFP (1<<10)
|
||||
#define SWF10_PIPEB_TV (1<<9)
|
||||
#define SWF10_PIPEB_CRT (1<<8)
|
||||
#define SWF10_PIPEA_LFP2 (1<<7)
|
||||
#define SWF10_PIPEA_EFP2 (1<<6)
|
||||
#define SWF10_PIPEA_TV2 (1<<5)
|
||||
#define SWF10_PIPEA_CRT2 (1<<4)
|
||||
#define SWF10_PIPEA_LFP (1<<3)
|
||||
#define SWF10_PIPEA_EFP (1<<2)
|
||||
#define SWF10_PIPEA_TV (1<<1)
|
||||
#define SWF10_PIPEA_CRT (1<<0)
|
||||
|
||||
#define SWF11_MEMORY_SIZE_SHIFT 16
|
||||
#define SWF11_SV_TEST_EN (1<<15)
|
||||
#define SWF11_IS_AGP (1<<14)
|
||||
#define SWF11_DISPLAY_HOLDOFF (1<<13)
|
||||
#define SWF11_DPMS_REDUCED (1<<12)
|
||||
#define SWF11_IS_VBE_MODE (1<<11)
|
||||
#define SWF11_PIPEB_ACCESS (1<<10) /* 0 here means pipe a */
|
||||
#define SWF11_DPMS_MASK 0x07
|
||||
#define SWF11_DPMS_OFF (1<<2)
|
||||
#define SWF11_DPMS_SUSPEND (1<<1)
|
||||
#define SWF11_DPMS_STANDBY (1<<0)
|
||||
#define SWF11_DPMS_ON 0
|
||||
|
||||
#define SWF14_GFX_PFIT_EN (1<<31)
|
||||
#define SWF14_TEXT_PFIT_EN (1<<30)
|
||||
#define SWF14_LID_STATUS_CLOSED (1<<29) /* 0 here means open */
|
||||
#define SWF14_POPUP_EN (1<<28)
|
||||
#define SWF14_DISPLAY_HOLDOFF (1<<27)
|
||||
#define SWF14_DISP_DETECT_EN (1<<26)
|
||||
#define SWF14_DOCKING_STATUS_DOCKED (1<<25) /* 0 here means undocked */
|
||||
#define SWF14_DRIVER_STATUS (1<<24)
|
||||
#define SWF14_OS_TYPE_WIN9X (1<<23)
|
||||
#define SWF14_OS_TYPE_WINNT (1<<22)
|
||||
/* 21:19 rsvd */
|
||||
#define SWF14_PM_TYPE_MASK 0x00070000
|
||||
#define SWF14_PM_ACPI_VIDEO (0x4 << 16)
|
||||
#define SWF14_PM_ACPI (0x3 << 16)
|
||||
#define SWF14_PM_APM_12 (0x2 << 16)
|
||||
#define SWF14_PM_APM_11 (0x1 << 16)
|
||||
#define SWF14_HK_REQUEST_MASK 0x0000ffff /* see GR18 6:3 for event type */
|
||||
/* if GR18 indicates a display switch */
|
||||
#define SWF14_DS_PIPEB_LFP2_EN (1<<15)
|
||||
#define SWF14_DS_PIPEB_EFP2_EN (1<<14)
|
||||
#define SWF14_DS_PIPEB_TV2_EN (1<<13)
|
||||
#define SWF14_DS_PIPEB_CRT2_EN (1<<12)
|
||||
#define SWF14_DS_PIPEB_LFP_EN (1<<11)
|
||||
#define SWF14_DS_PIPEB_EFP_EN (1<<10)
|
||||
#define SWF14_DS_PIPEB_TV_EN (1<<9)
|
||||
#define SWF14_DS_PIPEB_CRT_EN (1<<8)
|
||||
#define SWF14_DS_PIPEA_LFP2_EN (1<<7)
|
||||
#define SWF14_DS_PIPEA_EFP2_EN (1<<6)
|
||||
#define SWF14_DS_PIPEA_TV2_EN (1<<5)
|
||||
#define SWF14_DS_PIPEA_CRT2_EN (1<<4)
|
||||
#define SWF14_DS_PIPEA_LFP_EN (1<<3)
|
||||
#define SWF14_DS_PIPEA_EFP_EN (1<<2)
|
||||
#define SWF14_DS_PIPEA_TV_EN (1<<1)
|
||||
#define SWF14_DS_PIPEA_CRT_EN (1<<0)
|
||||
/* if GR18 indicates a panel fitting request */
|
||||
#define SWF14_PFIT_EN (1<<0) /* 0 means disable */
|
||||
/* if GR18 indicates an APM change request */
|
||||
#define SWF14_APM_HIBERNATE 0x4
|
||||
#define SWF14_APM_SUSPEND 0x3
|
||||
#define SWF14_APM_STANDBY 0x1
|
||||
#define SWF14_APM_RESTORE 0x0
|
||||
|
||||
/* Add the device class for LFP, TV, HDMI */
|
||||
#define DEVICE_TYPE_INT_LFP 0x1022
|
||||
#define DEVICE_TYPE_INT_TV 0x1009
|
||||
#define DEVICE_TYPE_HDMI 0x60D2
|
||||
#define DEVICE_TYPE_DP 0x68C6
|
||||
#define DEVICE_TYPE_eDP 0x78C6
|
||||
|
||||
#define DEVICE_TYPE_CLASS_EXTENSION (1 << 15)
|
||||
#define DEVICE_TYPE_POWER_MANAGEMENT (1 << 14)
|
||||
#define DEVICE_TYPE_HOTPLUG_SIGNALING (1 << 13)
|
||||
#define DEVICE_TYPE_INTERNAL_CONNECTOR (1 << 12)
|
||||
#define DEVICE_TYPE_NOT_HDMI_OUTPUT (1 << 11)
|
||||
#define DEVICE_TYPE_MIPI_OUTPUT (1 << 10)
|
||||
#define DEVICE_TYPE_COMPOSITE_OUTPUT (1 << 9)
|
||||
#define DEVICE_TYPE_DUAL_CHANNEL (1 << 8)
|
||||
#define DEVICE_TYPE_HIGH_SPEED_LINK (1 << 6)
|
||||
#define DEVICE_TYPE_LVDS_SINGALING (1 << 5)
|
||||
#define DEVICE_TYPE_TMDS_DVI_SIGNALING (1 << 4)
|
||||
#define DEVICE_TYPE_VIDEO_SIGNALING (1 << 3)
|
||||
#define DEVICE_TYPE_DISPLAYPORT_OUTPUT (1 << 2)
|
||||
#define DEVICE_TYPE_DIGITAL_OUTPUT (1 << 1)
|
||||
#define DEVICE_TYPE_ANALOG_OUTPUT (1 << 0)
|
||||
|
||||
/*
|
||||
* Bits we care about when checking for DEVICE_TYPE_eDP
|
||||
* Depending on the system, the other bits may or may not
|
||||
* be set for eDP outputs.
|
||||
*/
|
||||
#define DEVICE_TYPE_eDP_BITS \
|
||||
(DEVICE_TYPE_INTERNAL_CONNECTOR | \
|
||||
DEVICE_TYPE_MIPI_OUTPUT | \
|
||||
DEVICE_TYPE_COMPOSITE_OUTPUT | \
|
||||
DEVICE_TYPE_DUAL_CHANNEL | \
|
||||
DEVICE_TYPE_LVDS_SINGALING | \
|
||||
DEVICE_TYPE_TMDS_DVI_SIGNALING | \
|
||||
DEVICE_TYPE_VIDEO_SIGNALING | \
|
||||
DEVICE_TYPE_DISPLAYPORT_OUTPUT | \
|
||||
DEVICE_TYPE_ANALOG_OUTPUT)
|
||||
|
||||
/* define the DVO port for HDMI output type */
|
||||
#define DVO_B 1
|
||||
#define DVO_C 2
|
||||
#define DVO_D 3
|
||||
|
||||
/* Possible values for the "DVO Port" field for versions >= 155: */
|
||||
#define DVO_PORT_HDMIA 0
|
||||
#define DVO_PORT_HDMIB 1
|
||||
#define DVO_PORT_HDMIC 2
|
||||
#define DVO_PORT_HDMID 3
|
||||
#define DVO_PORT_LVDS 4
|
||||
#define DVO_PORT_TV 5
|
||||
#define DVO_PORT_CRT 6
|
||||
#define DVO_PORT_DPB 7
|
||||
#define DVO_PORT_DPC 8
|
||||
#define DVO_PORT_DPD 9
|
||||
#define DVO_PORT_DPA 10
|
||||
#define DVO_PORT_DPE 11
|
||||
#define DVO_PORT_HDMIE 12
|
||||
#define DVO_PORT_MIPIA 21
|
||||
#define DVO_PORT_MIPIB 22
|
||||
#define DVO_PORT_MIPIC 23
|
||||
#define DVO_PORT_MIPID 24
|
||||
|
||||
/* Block 52 contains MIPI Panel info
|
||||
* 6 such enteries will there. Index into correct
|
||||
* entery is based on the panel_index in #40 LFP
|
||||
*/
|
||||
#define MAX_MIPI_CONFIGURATIONS 6
|
||||
enum mipi_seq_element {
|
||||
MIPI_SEQ_ELEM_END = 0,
|
||||
MIPI_SEQ_ELEM_SEND_PKT,
|
||||
MIPI_SEQ_ELEM_DELAY,
|
||||
MIPI_SEQ_ELEM_GPIO,
|
||||
MIPI_SEQ_ELEM_I2C, /* sequence block v2+ */
|
||||
MIPI_SEQ_ELEM_SPI, /* sequence block v3+ */
|
||||
MIPI_SEQ_ELEM_PMIC, /* sequence block v3+ */
|
||||
MIPI_SEQ_ELEM_MAX
|
||||
};
|
||||
|
||||
#define MIPI_DSI_UNDEFINED_PANEL_ID 0
|
||||
#define MIPI_DSI_GENERIC_PANEL_ID 1
|
||||
|
||||
/*
|
||||
* PMIC vs SoC Backlight support specified in pwm_blc
|
||||
* field in mipi_config block below.
|
||||
*/
|
||||
#define PPS_BLC_PMIC 0
|
||||
#define PPS_BLC_SOC 1
|
||||
|
||||
struct mipi_config {
|
||||
u16 panel_id;
|
||||
|
||||
@ -821,6 +86,8 @@ struct mipi_config {
|
||||
u32 video_transfer_mode:2;
|
||||
|
||||
u32 cabc_supported:1;
|
||||
#define PPS_BLC_PMIC 0
|
||||
#define PPS_BLC_SOC 1
|
||||
u32 pwm_blc:1;
|
||||
|
||||
/* Bit 13:10 */
|
||||
@ -924,12 +191,7 @@ struct mipi_config {
|
||||
|
||||
} __packed;
|
||||
|
||||
/* Block 52 contains MIPI configuration block
|
||||
* 6 * bdb_mipi_config, followed by 6 pps data
|
||||
* block below
|
||||
*
|
||||
* all delays has a unit of 100us
|
||||
*/
|
||||
/* all delays have a unit of 100us */
|
||||
struct mipi_pps_data {
|
||||
u16 panel_on_delay;
|
||||
u16 bl_enable_delay;
|
||||
@ -938,57 +200,4 @@ struct mipi_pps_data {
|
||||
u16 panel_power_cycle_delay;
|
||||
} __packed;
|
||||
|
||||
struct bdb_mipi_config {
|
||||
struct mipi_config config[MAX_MIPI_CONFIGURATIONS];
|
||||
struct mipi_pps_data pps[MAX_MIPI_CONFIGURATIONS];
|
||||
} __packed;
|
||||
|
||||
/* Block 53 contains MIPI sequences as needed by the panel
|
||||
* for enabling it. This block can be variable in size and
|
||||
* can be maximum of 6 blocks
|
||||
*/
|
||||
struct bdb_mipi_sequence {
|
||||
u8 version;
|
||||
u8 data[0];
|
||||
} __packed;
|
||||
|
||||
/* MIPI Sequnece Block definitions */
|
||||
enum mipi_seq {
|
||||
MIPI_SEQ_END = 0,
|
||||
MIPI_SEQ_ASSERT_RESET,
|
||||
MIPI_SEQ_INIT_OTP,
|
||||
MIPI_SEQ_DISPLAY_ON,
|
||||
MIPI_SEQ_DISPLAY_OFF,
|
||||
MIPI_SEQ_DEASSERT_RESET,
|
||||
MIPI_SEQ_BACKLIGHT_ON, /* sequence block v2+ */
|
||||
MIPI_SEQ_BACKLIGHT_OFF, /* sequence block v2+ */
|
||||
MIPI_SEQ_TEAR_ON, /* sequence block v2+ */
|
||||
MIPI_SEQ_TEAR_OFF, /* sequence block v3+ */
|
||||
MIPI_SEQ_POWER_ON, /* sequence block v3+ */
|
||||
MIPI_SEQ_POWER_OFF, /* sequence block v3+ */
|
||||
MIPI_SEQ_MAX
|
||||
};
|
||||
|
||||
enum mipi_seq_element {
|
||||
MIPI_SEQ_ELEM_END = 0,
|
||||
MIPI_SEQ_ELEM_SEND_PKT,
|
||||
MIPI_SEQ_ELEM_DELAY,
|
||||
MIPI_SEQ_ELEM_GPIO,
|
||||
MIPI_SEQ_ELEM_I2C, /* sequence block v2+ */
|
||||
MIPI_SEQ_ELEM_SPI, /* sequence block v3+ */
|
||||
MIPI_SEQ_ELEM_PMIC, /* sequence block v3+ */
|
||||
MIPI_SEQ_ELEM_MAX
|
||||
};
|
||||
|
||||
enum mipi_gpio_pin_index {
|
||||
MIPI_GPIO_UNDEFINED = 0,
|
||||
MIPI_GPIO_PANEL_ENABLE,
|
||||
MIPI_GPIO_BL_ENABLE,
|
||||
MIPI_GPIO_PWM_ENABLE,
|
||||
MIPI_GPIO_RESET_N,
|
||||
MIPI_GPIO_PWR_DOWN_R,
|
||||
MIPI_GPIO_STDBY_RST_N,
|
||||
MIPI_GPIO_MAX
|
||||
};
|
||||
|
||||
#endif /* _INTEL_BIOS_H_ */
|
||||
|
556
drivers/gpu/drm/i915/intel_color.c
Normal file
556
drivers/gpu/drm/i915/intel_color.c
Normal file
@ -0,0 +1,556 @@
|
||||
/*
|
||||
* Copyright © 2016 Intel Corporation
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "intel_drv.h"
|
||||
|
||||
#define CTM_COEFF_SIGN (1ULL << 63)
|
||||
|
||||
#define CTM_COEFF_1_0 (1ULL << 32)
|
||||
#define CTM_COEFF_2_0 (CTM_COEFF_1_0 << 1)
|
||||
#define CTM_COEFF_4_0 (CTM_COEFF_2_0 << 1)
|
||||
#define CTM_COEFF_8_0 (CTM_COEFF_4_0 << 1)
|
||||
#define CTM_COEFF_0_5 (CTM_COEFF_1_0 >> 1)
|
||||
#define CTM_COEFF_0_25 (CTM_COEFF_0_5 >> 1)
|
||||
#define CTM_COEFF_0_125 (CTM_COEFF_0_25 >> 1)
|
||||
|
||||
#define CTM_COEFF_LIMITED_RANGE ((235ULL - 16ULL) * CTM_COEFF_1_0 / 255)
|
||||
|
||||
#define CTM_COEFF_NEGATIVE(coeff) (((coeff) & CTM_COEFF_SIGN) != 0)
|
||||
#define CTM_COEFF_ABS(coeff) ((coeff) & (CTM_COEFF_SIGN - 1))
|
||||
|
||||
#define LEGACY_LUT_LENGTH (sizeof(struct drm_color_lut) * 256)
|
||||
|
||||
/*
|
||||
* Extract the CSC coefficient from a CTM coefficient (in U32.32 fixed point
|
||||
* format). This macro takes the coefficient we want transformed and the
|
||||
* number of fractional bits.
|
||||
*
|
||||
* We only have a 9 bits precision window which slides depending on the value
|
||||
* of the CTM coefficient and we write the value from bit 3. We also round the
|
||||
* value.
|
||||
*/
|
||||
#define I9XX_CSC_COEFF_FP(coeff, fbits) \
|
||||
(clamp_val(((coeff) >> (32 - (fbits) - 3)) + 4, 0, 0xfff) & 0xff8)
|
||||
|
||||
#define I9XX_CSC_COEFF_LIMITED_RANGE \
|
||||
I9XX_CSC_COEFF_FP(CTM_COEFF_LIMITED_RANGE, 9)
|
||||
#define I9XX_CSC_COEFF_1_0 \
|
||||
((7 << 12) | I9XX_CSC_COEFF_FP(CTM_COEFF_1_0, 8))
|
||||
|
||||
static bool crtc_state_is_legacy(struct drm_crtc_state *state)
|
||||
{
|
||||
return !state->degamma_lut &&
|
||||
!state->ctm &&
|
||||
state->gamma_lut &&
|
||||
state->gamma_lut->length == LEGACY_LUT_LENGTH;
|
||||
}
|
||||
|
||||
/*
|
||||
* When using limited range, multiply the matrix given by userspace by
|
||||
* the matrix that we would use for the limited range. We do the
|
||||
* multiplication in U2.30 format.
|
||||
*/
|
||||
static void ctm_mult_by_limited(uint64_t *result, int64_t *input)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < 9; i++)
|
||||
result[i] = 0;
|
||||
|
||||
for (i = 0; i < 3; i++) {
|
||||
int64_t user_coeff = input[i * 3 + i];
|
||||
uint64_t limited_coeff = CTM_COEFF_LIMITED_RANGE >> 2;
|
||||
uint64_t abs_coeff = clamp_val(CTM_COEFF_ABS(user_coeff),
|
||||
0,
|
||||
CTM_COEFF_4_0 - 1) >> 2;
|
||||
|
||||
result[i * 3 + i] = (limited_coeff * abs_coeff) >> 27;
|
||||
if (CTM_COEFF_NEGATIVE(user_coeff))
|
||||
result[i * 3 + i] |= CTM_COEFF_SIGN;
|
||||
}
|
||||
}
|
||||
|
||||
/* Set up the pipe CSC unit. */
|
||||
static void i9xx_load_csc_matrix(struct drm_crtc *crtc)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_crtc_state *crtc_state = crtc->state;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
int i, pipe = intel_crtc->pipe;
|
||||
uint16_t coeffs[9] = { 0, };
|
||||
|
||||
if (crtc_state->ctm) {
|
||||
struct drm_color_ctm *ctm =
|
||||
(struct drm_color_ctm *)crtc_state->ctm->data;
|
||||
uint64_t input[9] = { 0, };
|
||||
|
||||
if (intel_crtc->config->limited_color_range) {
|
||||
ctm_mult_by_limited(input, ctm->matrix);
|
||||
} else {
|
||||
for (i = 0; i < ARRAY_SIZE(input); i++)
|
||||
input[i] = ctm->matrix[i];
|
||||
}
|
||||
|
||||
/*
|
||||
* Convert fixed point S31.32 input to format supported by the
|
||||
* hardware.
|
||||
*/
|
||||
for (i = 0; i < ARRAY_SIZE(coeffs); i++) {
|
||||
uint64_t abs_coeff = ((1ULL << 63) - 1) & input[i];
|
||||
|
||||
/*
|
||||
* Clamp input value to min/max supported by
|
||||
* hardware.
|
||||
*/
|
||||
abs_coeff = clamp_val(abs_coeff, 0, CTM_COEFF_4_0 - 1);
|
||||
|
||||
/* sign bit */
|
||||
if (CTM_COEFF_NEGATIVE(input[i]))
|
||||
coeffs[i] |= 1 << 15;
|
||||
|
||||
if (abs_coeff < CTM_COEFF_0_125)
|
||||
coeffs[i] |= (3 << 12) |
|
||||
I9XX_CSC_COEFF_FP(abs_coeff, 12);
|
||||
else if (abs_coeff < CTM_COEFF_0_25)
|
||||
coeffs[i] |= (2 << 12) |
|
||||
I9XX_CSC_COEFF_FP(abs_coeff, 11);
|
||||
else if (abs_coeff < CTM_COEFF_0_5)
|
||||
coeffs[i] |= (1 << 12) |
|
||||
I9XX_CSC_COEFF_FP(abs_coeff, 10);
|
||||
else if (abs_coeff < CTM_COEFF_1_0)
|
||||
coeffs[i] |= I9XX_CSC_COEFF_FP(abs_coeff, 9);
|
||||
else if (abs_coeff < CTM_COEFF_2_0)
|
||||
coeffs[i] |= (7 << 12) |
|
||||
I9XX_CSC_COEFF_FP(abs_coeff, 8);
|
||||
else
|
||||
coeffs[i] |= (6 << 12) |
|
||||
I9XX_CSC_COEFF_FP(abs_coeff, 7);
|
||||
}
|
||||
} else {
|
||||
/*
|
||||
* Load an identity matrix if no coefficients are provided.
|
||||
*
|
||||
* TODO: Check what kind of values actually come out of the
|
||||
* pipe with these coeff/postoff values and adjust to get the
|
||||
* best accuracy. Perhaps we even need to take the bpc value
|
||||
* into consideration.
|
||||
*/
|
||||
for (i = 0; i < 3; i++) {
|
||||
if (intel_crtc->config->limited_color_range)
|
||||
coeffs[i * 3 + i] =
|
||||
I9XX_CSC_COEFF_LIMITED_RANGE;
|
||||
else
|
||||
coeffs[i * 3 + i] = I9XX_CSC_COEFF_1_0;
|
||||
}
|
||||
}
|
||||
|
||||
I915_WRITE(PIPE_CSC_COEFF_RY_GY(pipe), coeffs[0] << 16 | coeffs[1]);
|
||||
I915_WRITE(PIPE_CSC_COEFF_BY(pipe), coeffs[2] << 16);
|
||||
|
||||
I915_WRITE(PIPE_CSC_COEFF_RU_GU(pipe), coeffs[3] << 16 | coeffs[4]);
|
||||
I915_WRITE(PIPE_CSC_COEFF_BU(pipe), coeffs[5] << 16);
|
||||
|
||||
I915_WRITE(PIPE_CSC_COEFF_RV_GV(pipe), coeffs[6] << 16 | coeffs[7]);
|
||||
I915_WRITE(PIPE_CSC_COEFF_BV(pipe), coeffs[8] << 16);
|
||||
|
||||
I915_WRITE(PIPE_CSC_PREOFF_HI(pipe), 0);
|
||||
I915_WRITE(PIPE_CSC_PREOFF_ME(pipe), 0);
|
||||
I915_WRITE(PIPE_CSC_PREOFF_LO(pipe), 0);
|
||||
|
||||
if (INTEL_INFO(dev)->gen > 6) {
|
||||
uint16_t postoff = 0;
|
||||
|
||||
if (intel_crtc->config->limited_color_range)
|
||||
postoff = (16 * (1 << 12) / 255) & 0x1fff;
|
||||
|
||||
I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff);
|
||||
I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff);
|
||||
I915_WRITE(PIPE_CSC_POSTOFF_LO(pipe), postoff);
|
||||
|
||||
I915_WRITE(PIPE_CSC_MODE(pipe), 0);
|
||||
} else {
|
||||
uint32_t mode = CSC_MODE_YUV_TO_RGB;
|
||||
|
||||
if (intel_crtc->config->limited_color_range)
|
||||
mode |= CSC_BLACK_SCREEN_OFFSET;
|
||||
|
||||
I915_WRITE(PIPE_CSC_MODE(pipe), mode);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Set up the pipe CSC unit on CherryView.
|
||||
*/
|
||||
static void cherryview_load_csc_matrix(struct drm_crtc *crtc)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_crtc_state *state = crtc->state;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int pipe = to_intel_crtc(crtc)->pipe;
|
||||
uint32_t mode;
|
||||
|
||||
if (state->ctm) {
|
||||
struct drm_color_ctm *ctm =
|
||||
(struct drm_color_ctm *) state->ctm->data;
|
||||
uint16_t coeffs[9] = { 0, };
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(coeffs); i++) {
|
||||
uint64_t abs_coeff =
|
||||
((1ULL << 63) - 1) & ctm->matrix[i];
|
||||
|
||||
/* Round coefficient. */
|
||||
abs_coeff += 1 << (32 - 13);
|
||||
/* Clamp to hardware limits. */
|
||||
abs_coeff = clamp_val(abs_coeff, 0, CTM_COEFF_8_0 - 1);
|
||||
|
||||
/* Write coefficients in S3.12 format. */
|
||||
if (ctm->matrix[i] & (1ULL << 63))
|
||||
coeffs[i] = 1 << 15;
|
||||
coeffs[i] |= ((abs_coeff >> 32) & 7) << 12;
|
||||
coeffs[i] |= (abs_coeff >> 20) & 0xfff;
|
||||
}
|
||||
|
||||
I915_WRITE(CGM_PIPE_CSC_COEFF01(pipe),
|
||||
coeffs[1] << 16 | coeffs[0]);
|
||||
I915_WRITE(CGM_PIPE_CSC_COEFF23(pipe),
|
||||
coeffs[3] << 16 | coeffs[2]);
|
||||
I915_WRITE(CGM_PIPE_CSC_COEFF45(pipe),
|
||||
coeffs[5] << 16 | coeffs[4]);
|
||||
I915_WRITE(CGM_PIPE_CSC_COEFF67(pipe),
|
||||
coeffs[7] << 16 | coeffs[6]);
|
||||
I915_WRITE(CGM_PIPE_CSC_COEFF8(pipe), coeffs[8]);
|
||||
}
|
||||
|
||||
mode = (state->ctm ? CGM_PIPE_MODE_CSC : 0);
|
||||
if (!crtc_state_is_legacy(state)) {
|
||||
mode |= (state->degamma_lut ? CGM_PIPE_MODE_DEGAMMA : 0) |
|
||||
(state->gamma_lut ? CGM_PIPE_MODE_GAMMA : 0);
|
||||
}
|
||||
I915_WRITE(CGM_PIPE_MODE(pipe), mode);
|
||||
}
|
||||
|
||||
void intel_color_set_csc(struct drm_crtc *crtc)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
if (dev_priv->display.load_csc_matrix)
|
||||
dev_priv->display.load_csc_matrix(crtc);
|
||||
}
|
||||
|
||||
/* Loads the legacy palette/gamma unit for the CRTC. */
|
||||
static void i9xx_load_luts_internal(struct drm_crtc *crtc,
|
||||
struct drm_property_blob *blob)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
enum pipe pipe = intel_crtc->pipe;
|
||||
int i;
|
||||
|
||||
if (HAS_GMCH_DISPLAY(dev)) {
|
||||
if (intel_crtc->config->has_dsi_encoder)
|
||||
assert_dsi_pll_enabled(dev_priv);
|
||||
else
|
||||
assert_pll_enabled(dev_priv, pipe);
|
||||
}
|
||||
|
||||
if (blob) {
|
||||
struct drm_color_lut *lut = (struct drm_color_lut *) blob->data;
|
||||
for (i = 0; i < 256; i++) {
|
||||
uint32_t word =
|
||||
(drm_color_lut_extract(lut[i].red, 8) << 16) |
|
||||
(drm_color_lut_extract(lut[i].green, 8) << 8) |
|
||||
drm_color_lut_extract(lut[i].blue, 8);
|
||||
|
||||
if (HAS_GMCH_DISPLAY(dev))
|
||||
I915_WRITE(PALETTE(pipe, i), word);
|
||||
else
|
||||
I915_WRITE(LGC_PALETTE(pipe, i), word);
|
||||
}
|
||||
} else {
|
||||
for (i = 0; i < 256; i++) {
|
||||
uint32_t word = (i << 16) | (i << 8) | i;
|
||||
|
||||
if (HAS_GMCH_DISPLAY(dev))
|
||||
I915_WRITE(PALETTE(pipe, i), word);
|
||||
else
|
||||
I915_WRITE(LGC_PALETTE(pipe, i), word);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void i9xx_load_luts(struct drm_crtc *crtc)
|
||||
{
|
||||
i9xx_load_luts_internal(crtc, crtc->state->gamma_lut);
|
||||
}
|
||||
|
||||
/* Loads the legacy palette/gamma unit for the CRTC on Haswell. */
|
||||
static void haswell_load_luts(struct drm_crtc *crtc)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
struct intel_crtc_state *intel_crtc_state =
|
||||
to_intel_crtc_state(crtc->state);
|
||||
bool reenable_ips = false;
|
||||
|
||||
/*
|
||||
* Workaround : Do not read or write the pipe palette/gamma data while
|
||||
* GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
|
||||
*/
|
||||
if (IS_HASWELL(dev) && intel_crtc->config->ips_enabled &&
|
||||
(intel_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)) {
|
||||
hsw_disable_ips(intel_crtc);
|
||||
reenable_ips = true;
|
||||
}
|
||||
|
||||
intel_crtc_state->gamma_mode = GAMMA_MODE_MODE_8BIT;
|
||||
I915_WRITE(GAMMA_MODE(intel_crtc->pipe), GAMMA_MODE_MODE_8BIT);
|
||||
|
||||
i9xx_load_luts(crtc);
|
||||
|
||||
if (reenable_ips)
|
||||
hsw_enable_ips(intel_crtc);
|
||||
}
|
||||
|
||||
/* Loads the palette/gamma unit for the CRTC on Broadwell+. */
|
||||
static void broadwell_load_luts(struct drm_crtc *crtc)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_crtc_state *state = crtc->state;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc_state *intel_state = to_intel_crtc_state(state);
|
||||
enum pipe pipe = to_intel_crtc(crtc)->pipe;
|
||||
uint32_t i, lut_size = INTEL_INFO(dev)->color.degamma_lut_size;
|
||||
|
||||
if (crtc_state_is_legacy(state)) {
|
||||
haswell_load_luts(crtc);
|
||||
return;
|
||||
}
|
||||
|
||||
I915_WRITE(PREC_PAL_INDEX(pipe),
|
||||
PAL_PREC_SPLIT_MODE | PAL_PREC_AUTO_INCREMENT);
|
||||
|
||||
if (state->degamma_lut) {
|
||||
struct drm_color_lut *lut =
|
||||
(struct drm_color_lut *) state->degamma_lut->data;
|
||||
|
||||
for (i = 0; i < lut_size; i++) {
|
||||
uint32_t word =
|
||||
drm_color_lut_extract(lut[i].red, 10) << 20 |
|
||||
drm_color_lut_extract(lut[i].green, 10) << 10 |
|
||||
drm_color_lut_extract(lut[i].blue, 10);
|
||||
|
||||
I915_WRITE(PREC_PAL_DATA(pipe), word);
|
||||
}
|
||||
} else {
|
||||
for (i = 0; i < lut_size; i++) {
|
||||
uint32_t v = (i * ((1 << 10) - 1)) / (lut_size - 1);
|
||||
|
||||
I915_WRITE(PREC_PAL_DATA(pipe),
|
||||
(v << 20) | (v << 10) | v);
|
||||
}
|
||||
}
|
||||
|
||||
if (state->gamma_lut) {
|
||||
struct drm_color_lut *lut =
|
||||
(struct drm_color_lut *) state->gamma_lut->data;
|
||||
|
||||
for (i = 0; i < lut_size; i++) {
|
||||
uint32_t word =
|
||||
(drm_color_lut_extract(lut[i].red, 10) << 20) |
|
||||
(drm_color_lut_extract(lut[i].green, 10) << 10) |
|
||||
drm_color_lut_extract(lut[i].blue, 10);
|
||||
|
||||
I915_WRITE(PREC_PAL_DATA(pipe), word);
|
||||
}
|
||||
|
||||
/* Program the max register to clamp values > 1.0. */
|
||||
I915_WRITE(PREC_PAL_GC_MAX(pipe, 0),
|
||||
drm_color_lut_extract(lut[i].red, 16));
|
||||
I915_WRITE(PREC_PAL_GC_MAX(pipe, 1),
|
||||
drm_color_lut_extract(lut[i].green, 16));
|
||||
I915_WRITE(PREC_PAL_GC_MAX(pipe, 2),
|
||||
drm_color_lut_extract(lut[i].blue, 16));
|
||||
} else {
|
||||
for (i = 0; i < lut_size; i++) {
|
||||
uint32_t v = (i * ((1 << 10) - 1)) / (lut_size - 1);
|
||||
|
||||
I915_WRITE(PREC_PAL_DATA(pipe),
|
||||
(v << 20) | (v << 10) | v);
|
||||
}
|
||||
|
||||
I915_WRITE(PREC_PAL_GC_MAX(pipe, 0), (1 << 16) - 1);
|
||||
I915_WRITE(PREC_PAL_GC_MAX(pipe, 1), (1 << 16) - 1);
|
||||
I915_WRITE(PREC_PAL_GC_MAX(pipe, 2), (1 << 16) - 1);
|
||||
}
|
||||
|
||||
intel_state->gamma_mode = GAMMA_MODE_MODE_SPLIT;
|
||||
I915_WRITE(GAMMA_MODE(pipe), GAMMA_MODE_MODE_SPLIT);
|
||||
POSTING_READ(GAMMA_MODE(pipe));
|
||||
|
||||
/*
|
||||
* Reset the index, otherwise it prevents the legacy palette to be
|
||||
* written properly.
|
||||
*/
|
||||
I915_WRITE(PREC_PAL_INDEX(pipe), 0);
|
||||
}
|
||||
|
||||
/* Loads the palette/gamma unit for the CRTC on CherryView. */
|
||||
static void cherryview_load_luts(struct drm_crtc *crtc)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_crtc_state *state = crtc->state;
|
||||
enum pipe pipe = to_intel_crtc(crtc)->pipe;
|
||||
struct drm_color_lut *lut;
|
||||
uint32_t i, lut_size;
|
||||
uint32_t word0, word1;
|
||||
|
||||
if (crtc_state_is_legacy(state)) {
|
||||
/* Turn off degamma/gamma on CGM block. */
|
||||
I915_WRITE(CGM_PIPE_MODE(pipe),
|
||||
(state->ctm ? CGM_PIPE_MODE_CSC : 0));
|
||||
i9xx_load_luts_internal(crtc, state->gamma_lut);
|
||||
return;
|
||||
}
|
||||
|
||||
if (state->degamma_lut) {
|
||||
lut = (struct drm_color_lut *) state->degamma_lut->data;
|
||||
lut_size = INTEL_INFO(dev)->color.degamma_lut_size;
|
||||
for (i = 0; i < lut_size; i++) {
|
||||
/* Write LUT in U0.14 format. */
|
||||
word0 =
|
||||
(drm_color_lut_extract(lut[i].green, 14) << 16) |
|
||||
drm_color_lut_extract(lut[i].blue, 14);
|
||||
word1 = drm_color_lut_extract(lut[i].red, 14);
|
||||
|
||||
I915_WRITE(CGM_PIPE_DEGAMMA(pipe, i, 0), word0);
|
||||
I915_WRITE(CGM_PIPE_DEGAMMA(pipe, i, 1), word1);
|
||||
}
|
||||
}
|
||||
|
||||
if (state->gamma_lut) {
|
||||
lut = (struct drm_color_lut *) state->gamma_lut->data;
|
||||
lut_size = INTEL_INFO(dev)->color.gamma_lut_size;
|
||||
for (i = 0; i < lut_size; i++) {
|
||||
/* Write LUT in U0.10 format. */
|
||||
word0 =
|
||||
(drm_color_lut_extract(lut[i].green, 10) << 16) |
|
||||
drm_color_lut_extract(lut[i].blue, 10);
|
||||
word1 = drm_color_lut_extract(lut[i].red, 10);
|
||||
|
||||
I915_WRITE(CGM_PIPE_GAMMA(pipe, i, 0), word0);
|
||||
I915_WRITE(CGM_PIPE_GAMMA(pipe, i, 1), word1);
|
||||
}
|
||||
}
|
||||
|
||||
I915_WRITE(CGM_PIPE_MODE(pipe),
|
||||
(state->ctm ? CGM_PIPE_MODE_CSC : 0) |
|
||||
(state->degamma_lut ? CGM_PIPE_MODE_DEGAMMA : 0) |
|
||||
(state->gamma_lut ? CGM_PIPE_MODE_GAMMA : 0));
|
||||
|
||||
/*
|
||||
* Also program a linear LUT in the legacy block (behind the
|
||||
* CGM block).
|
||||
*/
|
||||
i9xx_load_luts_internal(crtc, NULL);
|
||||
}
|
||||
|
||||
void intel_color_load_luts(struct drm_crtc *crtc)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
/* The clocks have to be on to load the palette. */
|
||||
if (!crtc->state->active)
|
||||
return;
|
||||
|
||||
dev_priv->display.load_luts(crtc);
|
||||
}
|
||||
|
||||
int intel_color_check(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *crtc_state)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
size_t gamma_length, degamma_length;
|
||||
|
||||
degamma_length = INTEL_INFO(dev)->color.degamma_lut_size *
|
||||
sizeof(struct drm_color_lut);
|
||||
gamma_length = INTEL_INFO(dev)->color.gamma_lut_size *
|
||||
sizeof(struct drm_color_lut);
|
||||
|
||||
/*
|
||||
* We allow both degamma & gamma luts at the right size or
|
||||
* NULL.
|
||||
*/
|
||||
if ((!crtc_state->degamma_lut ||
|
||||
crtc_state->degamma_lut->length == degamma_length) &&
|
||||
(!crtc_state->gamma_lut ||
|
||||
crtc_state->gamma_lut->length == gamma_length))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* We also allow no degamma lut and a gamma lut at the legacy
|
||||
* size (256 entries).
|
||||
*/
|
||||
if (!crtc_state->degamma_lut &&
|
||||
crtc_state->gamma_lut &&
|
||||
crtc_state->gamma_lut->length == LEGACY_LUT_LENGTH)
|
||||
return 0;
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
void intel_color_init(struct drm_crtc *crtc)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
drm_mode_crtc_set_gamma_size(crtc, 256);
|
||||
|
||||
if (IS_CHERRYVIEW(dev)) {
|
||||
dev_priv->display.load_csc_matrix = cherryview_load_csc_matrix;
|
||||
dev_priv->display.load_luts = cherryview_load_luts;
|
||||
} else if (IS_HASWELL(dev)) {
|
||||
dev_priv->display.load_csc_matrix = i9xx_load_csc_matrix;
|
||||
dev_priv->display.load_luts = haswell_load_luts;
|
||||
} else if (IS_BROADWELL(dev) || IS_SKYLAKE(dev) ||
|
||||
IS_BROXTON(dev) || IS_KABYLAKE(dev)) {
|
||||
dev_priv->display.load_csc_matrix = i9xx_load_csc_matrix;
|
||||
dev_priv->display.load_luts = broadwell_load_luts;
|
||||
} else {
|
||||
dev_priv->display.load_luts = i9xx_load_luts;
|
||||
}
|
||||
|
||||
/* Enable color management support when we have degamma & gamma LUTs. */
|
||||
if (INTEL_INFO(dev)->color.degamma_lut_size != 0 &&
|
||||
INTEL_INFO(dev)->color.gamma_lut_size != 0)
|
||||
drm_helper_crtc_enable_color_mgmt(crtc,
|
||||
INTEL_INFO(dev)->color.degamma_lut_size,
|
||||
INTEL_INFO(dev)->color.gamma_lut_size);
|
||||
}
|
@ -120,22 +120,16 @@ static unsigned int intel_crt_get_flags(struct intel_encoder *encoder)
|
||||
static void intel_crt_get_config(struct intel_encoder *encoder,
|
||||
struct intel_crtc_state *pipe_config)
|
||||
{
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
int dotclock;
|
||||
|
||||
pipe_config->base.adjusted_mode.flags |= intel_crt_get_flags(encoder);
|
||||
|
||||
dotclock = pipe_config->port_clock;
|
||||
|
||||
if (HAS_PCH_SPLIT(dev))
|
||||
ironlake_check_encoder_dotclock(pipe_config, dotclock);
|
||||
|
||||
pipe_config->base.adjusted_mode.crtc_clock = dotclock;
|
||||
pipe_config->base.adjusted_mode.crtc_clock = pipe_config->port_clock;
|
||||
}
|
||||
|
||||
static void hsw_crt_get_config(struct intel_encoder *encoder,
|
||||
struct intel_crtc_state *pipe_config)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
|
||||
intel_ddi_get_config(encoder, pipe_config);
|
||||
|
||||
pipe_config->base.adjusted_mode.flags &= ~(DRM_MODE_FLAG_PHSYNC |
|
||||
@ -143,6 +137,8 @@ static void hsw_crt_get_config(struct intel_encoder *encoder,
|
||||
DRM_MODE_FLAG_PVSYNC |
|
||||
DRM_MODE_FLAG_NVSYNC);
|
||||
pipe_config->base.adjusted_mode.flags |= intel_crt_get_flags(encoder);
|
||||
|
||||
pipe_config->base.adjusted_mode.crtc_clock = lpt_get_iclkip(dev_priv);
|
||||
}
|
||||
|
||||
/* Note: The caller is required to filter out dpms modes not supported by the
|
||||
@ -222,18 +218,26 @@ intel_crt_mode_valid(struct drm_connector *connector,
|
||||
{
|
||||
struct drm_device *dev = connector->dev;
|
||||
int max_dotclk = to_i915(dev)->max_dotclk_freq;
|
||||
int max_clock;
|
||||
|
||||
int max_clock = 0;
|
||||
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
|
||||
return MODE_NO_DBLESCAN;
|
||||
|
||||
if (mode->clock < 25000)
|
||||
return MODE_CLOCK_LOW;
|
||||
|
||||
if (IS_GEN2(dev))
|
||||
max_clock = 350000;
|
||||
else
|
||||
if (HAS_PCH_LPT(dev))
|
||||
max_clock = 180000;
|
||||
else if (IS_VALLEYVIEW(dev))
|
||||
/*
|
||||
* 270 MHz due to current DPLL limits,
|
||||
* DAC limit supposedly 355 MHz.
|
||||
*/
|
||||
max_clock = 270000;
|
||||
else if (IS_GEN3(dev) || IS_GEN4(dev))
|
||||
max_clock = 400000;
|
||||
else
|
||||
max_clock = 350000;
|
||||
if (mode->clock > max_clock)
|
||||
return MODE_CLOCK_HIGH;
|
||||
|
||||
@ -261,15 +265,9 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder,
|
||||
pipe_config->pipe_bpp = 24;
|
||||
|
||||
/* FDI must always be 2.7 GHz */
|
||||
if (HAS_DDI(dev)) {
|
||||
pipe_config->ddi_pll_sel = PORT_CLK_SEL_SPLL;
|
||||
if (HAS_DDI(dev))
|
||||
pipe_config->port_clock = 135000 * 2;
|
||||
|
||||
pipe_config->dpll_hw_state.wrpll = 0;
|
||||
pipe_config->dpll_hw_state.spll =
|
||||
SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz | SPLL_PLL_SSC;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -652,6 +650,8 @@ intel_crt_detect(struct drm_connector *connector, bool force)
|
||||
else if (INTEL_INFO(dev)->gen < 4)
|
||||
status = intel_crt_load_detect(crt,
|
||||
to_intel_crtc(connector->state->crtc)->pipe);
|
||||
else if (i915.load_detect_test)
|
||||
status = connector_status_disconnected;
|
||||
else
|
||||
status = connector_status_unknown;
|
||||
intel_release_load_detect_pipe(connector, &tmp, &ctx);
|
||||
|
@ -188,28 +188,49 @@ static const struct stepping_info bxt_stepping_info[] = {
|
||||
{'B', '0'}, {'B', '1'}, {'B', '2'}
|
||||
};
|
||||
|
||||
static const struct stepping_info *intel_get_stepping_info(struct drm_device *dev)
|
||||
static const struct stepping_info no_stepping_info = { '*', '*' };
|
||||
|
||||
static const struct stepping_info *
|
||||
intel_get_stepping_info(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
const struct stepping_info *si;
|
||||
unsigned int size;
|
||||
|
||||
if (IS_KABYLAKE(dev)) {
|
||||
if (IS_KABYLAKE(dev_priv)) {
|
||||
size = ARRAY_SIZE(kbl_stepping_info);
|
||||
si = kbl_stepping_info;
|
||||
} else if (IS_SKYLAKE(dev)) {
|
||||
} else if (IS_SKYLAKE(dev_priv)) {
|
||||
size = ARRAY_SIZE(skl_stepping_info);
|
||||
si = skl_stepping_info;
|
||||
} else if (IS_BROXTON(dev)) {
|
||||
} else if (IS_BROXTON(dev_priv)) {
|
||||
size = ARRAY_SIZE(bxt_stepping_info);
|
||||
si = bxt_stepping_info;
|
||||
} else {
|
||||
return NULL;
|
||||
size = 0;
|
||||
}
|
||||
|
||||
if (INTEL_REVID(dev) < size)
|
||||
return si + INTEL_REVID(dev);
|
||||
if (INTEL_REVID(dev_priv) < size)
|
||||
return si + INTEL_REVID(dev_priv);
|
||||
|
||||
return NULL;
|
||||
return &no_stepping_info;
|
||||
}
|
||||
|
||||
static void gen9_set_dc_state_debugmask(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
uint32_t val, mask;
|
||||
|
||||
mask = DC_STATE_DEBUG_MASK_MEMORY_UP;
|
||||
|
||||
if (IS_BROXTON(dev_priv))
|
||||
mask |= DC_STATE_DEBUG_MASK_CORES;
|
||||
|
||||
/* The below bit doesn't need to be cleared ever afterwards */
|
||||
val = I915_READ(DC_STATE_DEBUG);
|
||||
if ((val & mask) != mask) {
|
||||
val |= mask;
|
||||
I915_WRITE(DC_STATE_DEBUG, val);
|
||||
POSTING_READ(DC_STATE_DEBUG);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@ -220,19 +241,19 @@ static const struct stepping_info *intel_get_stepping_info(struct drm_device *de
|
||||
* Everytime display comes back from low power state this function is called to
|
||||
* copy the firmware from internal memory to registers.
|
||||
*/
|
||||
bool intel_csr_load_program(struct drm_i915_private *dev_priv)
|
||||
void intel_csr_load_program(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
u32 *payload = dev_priv->csr.dmc_payload;
|
||||
uint32_t i, fw_size;
|
||||
|
||||
if (!IS_GEN9(dev_priv)) {
|
||||
DRM_ERROR("No CSR support available for this platform\n");
|
||||
return false;
|
||||
return;
|
||||
}
|
||||
|
||||
if (!dev_priv->csr.dmc_payload) {
|
||||
DRM_ERROR("Tried to program CSR with empty payload\n");
|
||||
return false;
|
||||
return;
|
||||
}
|
||||
|
||||
fw_size = dev_priv->csr.dmc_fw_size;
|
||||
@ -246,19 +267,17 @@ bool intel_csr_load_program(struct drm_i915_private *dev_priv)
|
||||
|
||||
dev_priv->csr.dc_state = 0;
|
||||
|
||||
return true;
|
||||
gen9_set_dc_state_debugmask(dev_priv);
|
||||
}
|
||||
|
||||
static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
|
||||
const struct firmware *fw)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct intel_css_header *css_header;
|
||||
struct intel_package_header *package_header;
|
||||
struct intel_dmc_header *dmc_header;
|
||||
struct intel_csr *csr = &dev_priv->csr;
|
||||
const struct stepping_info *stepping_info = intel_get_stepping_info(dev);
|
||||
char stepping, substepping;
|
||||
const struct stepping_info *si = intel_get_stepping_info(dev_priv);
|
||||
uint32_t dmc_offset = CSR_DEFAULT_FW_OFFSET, readcount = 0, nbytes;
|
||||
uint32_t i;
|
||||
uint32_t *dmc_payload;
|
||||
@ -266,14 +285,6 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
|
||||
if (!fw)
|
||||
return NULL;
|
||||
|
||||
if (!stepping_info) {
|
||||
DRM_ERROR("Unknown stepping info, firmware loading failed\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
stepping = stepping_info->stepping;
|
||||
substepping = stepping_info->substepping;
|
||||
|
||||
/* Extract CSS Header information*/
|
||||
css_header = (struct intel_css_header *)fw->data;
|
||||
if (sizeof(struct intel_css_header) !=
|
||||
@ -285,7 +296,7 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
|
||||
|
||||
csr->version = css_header->version;
|
||||
|
||||
if ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) &&
|
||||
if ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) &&
|
||||
csr->version < SKL_CSR_VERSION_REQUIRED) {
|
||||
DRM_INFO("Refusing to load old Skylake DMC firmware v%u.%u,"
|
||||
" please upgrade to v%u.%u or later"
|
||||
@ -313,11 +324,11 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
|
||||
/* Search for dmc_offset to find firware binary. */
|
||||
for (i = 0; i < package_header->num_entries; i++) {
|
||||
if (package_header->fw_info[i].substepping == '*' &&
|
||||
stepping == package_header->fw_info[i].stepping) {
|
||||
si->stepping == package_header->fw_info[i].stepping) {
|
||||
dmc_offset = package_header->fw_info[i].offset;
|
||||
break;
|
||||
} else if (stepping == package_header->fw_info[i].stepping &&
|
||||
substepping == package_header->fw_info[i].substepping) {
|
||||
} else if (si->stepping == package_header->fw_info[i].stepping &&
|
||||
si->substepping == package_header->fw_info[i].substepping) {
|
||||
dmc_offset = package_header->fw_info[i].offset;
|
||||
break;
|
||||
} else if (package_header->fw_info[i].stepping == '*' &&
|
||||
@ -325,7 +336,8 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
|
||||
dmc_offset = package_header->fw_info[i].offset;
|
||||
}
|
||||
if (dmc_offset == CSR_DEFAULT_FW_OFFSET) {
|
||||
DRM_ERROR("Firmware not supported for %c stepping\n", stepping);
|
||||
DRM_ERROR("Firmware not supported for %c stepping\n",
|
||||
si->stepping);
|
||||
return NULL;
|
||||
}
|
||||
readcount += dmc_offset;
|
||||
@ -371,9 +383,7 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
memcpy(dmc_payload, &fw->data[readcount], nbytes);
|
||||
|
||||
return dmc_payload;
|
||||
return memcpy(dmc_payload, &fw->data[readcount], nbytes);
|
||||
}
|
||||
|
||||
static void csr_load_work_fn(struct work_struct *work)
|
||||
@ -388,18 +398,12 @@ static void csr_load_work_fn(struct work_struct *work)
|
||||
|
||||
ret = request_firmware(&fw, dev_priv->csr.fw_path,
|
||||
&dev_priv->dev->pdev->dev);
|
||||
if (!fw)
|
||||
goto out;
|
||||
if (fw)
|
||||
dev_priv->csr.dmc_payload = parse_csr_fw(dev_priv, fw);
|
||||
|
||||
dev_priv->csr.dmc_payload = parse_csr_fw(dev_priv, fw);
|
||||
if (!dev_priv->csr.dmc_payload)
|
||||
goto out;
|
||||
|
||||
/* load csr program during system boot, as needed for DC states */
|
||||
intel_csr_load_program(dev_priv);
|
||||
|
||||
out:
|
||||
if (dev_priv->csr.dmc_payload) {
|
||||
intel_csr_load_program(dev_priv);
|
||||
|
||||
intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
|
||||
|
||||
DRM_INFO("Finished loading %s (v%u.%u)\n",
|
||||
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -671,60 +671,55 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
|
||||
return status;
|
||||
}
|
||||
|
||||
static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
|
||||
static uint32_t g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
|
||||
{
|
||||
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
|
||||
struct drm_device *dev = intel_dig_port->base.base.dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
|
||||
|
||||
if (index)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* The clock divider is based off the hrawclk, and would like to run at
|
||||
* 2MHz. So, take the hrawclk value and divide by 2 and use that
|
||||
* 2MHz. So, take the hrawclk value and divide by 2000 and use that
|
||||
*/
|
||||
return index ? 0 : DIV_ROUND_CLOSEST(intel_hrawclk(dev), 2);
|
||||
return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000);
|
||||
}
|
||||
|
||||
static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
|
||||
{
|
||||
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
|
||||
struct drm_device *dev = intel_dig_port->base.base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
|
||||
|
||||
if (index)
|
||||
return 0;
|
||||
|
||||
if (intel_dig_port->port == PORT_A) {
|
||||
/*
|
||||
* The clock divider is based off the cdclk or PCH rawclk, and would
|
||||
* like to run at 2MHz. So, take the cdclk or PCH rawclk value and
|
||||
* divide by 2000 and use that
|
||||
*/
|
||||
if (intel_dig_port->port == PORT_A)
|
||||
return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
|
||||
|
||||
} else {
|
||||
return DIV_ROUND_CLOSEST(intel_pch_rawclk(dev), 2);
|
||||
}
|
||||
else
|
||||
return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000);
|
||||
}
|
||||
|
||||
static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
|
||||
{
|
||||
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
|
||||
struct drm_device *dev = intel_dig_port->base.base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
|
||||
|
||||
if (intel_dig_port->port == PORT_A) {
|
||||
if (index)
|
||||
return 0;
|
||||
return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
|
||||
} else if (HAS_PCH_LPT_H(dev_priv)) {
|
||||
if (intel_dig_port->port != PORT_A && HAS_PCH_LPT_H(dev_priv)) {
|
||||
/* Workaround for non-ULT HSW */
|
||||
switch (index) {
|
||||
case 0: return 63;
|
||||
case 1: return 72;
|
||||
default: return 0;
|
||||
}
|
||||
} else {
|
||||
return index ? 0 : DIV_ROUND_CLOSEST(intel_pch_rawclk(dev), 2);
|
||||
}
|
||||
}
|
||||
|
||||
static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
|
||||
{
|
||||
return index ? 0 : 100;
|
||||
return ilk_get_aux_clock_divider(intel_dp, index);
|
||||
}
|
||||
|
||||
static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
|
||||
@ -737,10 +732,10 @@ static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
|
||||
return index ? 0 : 1;
|
||||
}
|
||||
|
||||
static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
|
||||
bool has_aux_irq,
|
||||
int send_bytes,
|
||||
uint32_t aux_clock_divider)
|
||||
static uint32_t g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
|
||||
bool has_aux_irq,
|
||||
int send_bytes,
|
||||
uint32_t aux_clock_divider)
|
||||
{
|
||||
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
|
||||
struct drm_device *dev = intel_dig_port->base.base.dev;
|
||||
@ -1229,71 +1224,6 @@ intel_dp_connector_unregister(struct intel_connector *intel_connector)
|
||||
intel_connector_unregister(intel_connector);
|
||||
}
|
||||
|
||||
static void
|
||||
skl_edp_set_pll_config(struct intel_crtc_state *pipe_config)
|
||||
{
|
||||
u32 ctrl1;
|
||||
|
||||
memset(&pipe_config->dpll_hw_state, 0,
|
||||
sizeof(pipe_config->dpll_hw_state));
|
||||
|
||||
pipe_config->ddi_pll_sel = SKL_DPLL0;
|
||||
pipe_config->dpll_hw_state.cfgcr1 = 0;
|
||||
pipe_config->dpll_hw_state.cfgcr2 = 0;
|
||||
|
||||
ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
|
||||
switch (pipe_config->port_clock / 2) {
|
||||
case 81000:
|
||||
ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
|
||||
SKL_DPLL0);
|
||||
break;
|
||||
case 135000:
|
||||
ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350,
|
||||
SKL_DPLL0);
|
||||
break;
|
||||
case 270000:
|
||||
ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700,
|
||||
SKL_DPLL0);
|
||||
break;
|
||||
case 162000:
|
||||
ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620,
|
||||
SKL_DPLL0);
|
||||
break;
|
||||
/* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
|
||||
results in CDCLK change. Need to handle the change of CDCLK by
|
||||
disabling pipes and re-enabling them */
|
||||
case 108000:
|
||||
ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
|
||||
SKL_DPLL0);
|
||||
break;
|
||||
case 216000:
|
||||
ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160,
|
||||
SKL_DPLL0);
|
||||
break;
|
||||
|
||||
}
|
||||
pipe_config->dpll_hw_state.ctrl1 = ctrl1;
|
||||
}
|
||||
|
||||
void
|
||||
hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config)
|
||||
{
|
||||
memset(&pipe_config->dpll_hw_state, 0,
|
||||
sizeof(pipe_config->dpll_hw_state));
|
||||
|
||||
switch (pipe_config->port_clock / 2) {
|
||||
case 81000:
|
||||
pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
|
||||
break;
|
||||
case 135000:
|
||||
pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
|
||||
break;
|
||||
case 270000:
|
||||
pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
|
||||
{
|
||||
@ -1570,10 +1500,10 @@ intel_dp_compute_config(struct intel_encoder *encoder,
|
||||
|
||||
/* Get bpp from vbt only for panels that dont have bpp in edid */
|
||||
if (intel_connector->base.display_info.bpc == 0 &&
|
||||
(dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp)) {
|
||||
(dev_priv->vbt.edp.bpp && dev_priv->vbt.edp.bpp < bpp)) {
|
||||
DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
|
||||
dev_priv->vbt.edp_bpp);
|
||||
bpp = dev_priv->vbt.edp_bpp;
|
||||
dev_priv->vbt.edp.bpp);
|
||||
bpp = dev_priv->vbt.edp.bpp;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1651,13 +1581,7 @@ found:
|
||||
&pipe_config->dp_m2_n2);
|
||||
}
|
||||
|
||||
if ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) && is_edp(intel_dp))
|
||||
skl_edp_set_pll_config(pipe_config);
|
||||
else if (IS_BROXTON(dev))
|
||||
/* handled in ddi */;
|
||||
else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
|
||||
hsw_dp_set_ddi_pll_sel(pipe_config);
|
||||
else
|
||||
if (!HAS_DDI(dev))
|
||||
intel_dp_set_clock(encoder, pipe_config);
|
||||
|
||||
return true;
|
||||
@ -1779,11 +1703,11 @@ static void wait_panel_status(struct intel_dp *intel_dp,
|
||||
I915_READ(pp_stat_reg),
|
||||
I915_READ(pp_ctrl_reg));
|
||||
|
||||
if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
|
||||
if (_wait_for((I915_READ(pp_stat_reg) & mask) == value,
|
||||
5 * USEC_PER_SEC, 10 * USEC_PER_MSEC))
|
||||
DRM_ERROR("Panel status timeout: status %08x control %08x\n",
|
||||
I915_READ(pp_stat_reg),
|
||||
I915_READ(pp_ctrl_reg));
|
||||
}
|
||||
|
||||
DRM_DEBUG_KMS("Wait complete\n");
|
||||
}
|
||||
@ -2409,7 +2333,6 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
enum port port = dp_to_dig_port(intel_dp)->port;
|
||||
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
|
||||
int dotclock;
|
||||
|
||||
tmp = I915_READ(intel_dp->output_reg);
|
||||
|
||||
@ -2459,16 +2382,12 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
|
||||
pipe_config->port_clock = 270000;
|
||||
}
|
||||
|
||||
dotclock = intel_dotclock_calculate(pipe_config->port_clock,
|
||||
&pipe_config->dp_m_n);
|
||||
pipe_config->base.adjusted_mode.crtc_clock =
|
||||
intel_dotclock_calculate(pipe_config->port_clock,
|
||||
&pipe_config->dp_m_n);
|
||||
|
||||
if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
|
||||
ironlake_check_encoder_dotclock(pipe_config, dotclock);
|
||||
|
||||
pipe_config->base.adjusted_mode.crtc_clock = dotclock;
|
||||
|
||||
if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
|
||||
pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
|
||||
if (is_edp(intel_dp) && dev_priv->vbt.edp.bpp &&
|
||||
pipe_config->pipe_bpp > dev_priv->vbt.edp.bpp) {
|
||||
/*
|
||||
* This is a big fat ugly hack.
|
||||
*
|
||||
@ -2483,8 +2402,8 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
|
||||
* load.
|
||||
*/
|
||||
DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
|
||||
pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
|
||||
dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
|
||||
pipe_config->pipe_bpp, dev_priv->vbt.edp.bpp);
|
||||
dev_priv->vbt.edp.bpp = pipe_config->pipe_bpp;
|
||||
}
|
||||
}
|
||||
|
||||
@ -3238,7 +3157,7 @@ intel_dp_voltage_max(struct intel_dp *intel_dp)
|
||||
if (IS_BROXTON(dev))
|
||||
return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
|
||||
else if (INTEL_INFO(dev)->gen >= 9) {
|
||||
if (dev_priv->edp_low_vswing && port == PORT_A)
|
||||
if (dev_priv->vbt.edp.low_vswing && port == PORT_A)
|
||||
return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
|
||||
return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
|
||||
} else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
|
||||
@ -3963,6 +3882,9 @@ intel_dp_probe_mst(struct intel_dp *intel_dp)
|
||||
{
|
||||
u8 buf[1];
|
||||
|
||||
if (!i915.enable_dp_mst)
|
||||
return false;
|
||||
|
||||
if (!intel_dp->can_mst)
|
||||
return false;
|
||||
|
||||
@ -5071,14 +4993,6 @@ put_power:
|
||||
bool intel_dp_is_edp(struct drm_device *dev, enum port port)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
union child_device_config *p_child;
|
||||
int i;
|
||||
static const short port_mapping[] = {
|
||||
[PORT_B] = DVO_PORT_DPB,
|
||||
[PORT_C] = DVO_PORT_DPC,
|
||||
[PORT_D] = DVO_PORT_DPD,
|
||||
[PORT_E] = DVO_PORT_DPE,
|
||||
};
|
||||
|
||||
/*
|
||||
* eDP not supported on g4x. so bail out early just
|
||||
@ -5090,18 +5004,7 @@ bool intel_dp_is_edp(struct drm_device *dev, enum port port)
|
||||
if (port == PORT_A)
|
||||
return true;
|
||||
|
||||
if (!dev_priv->vbt.child_dev_num)
|
||||
return false;
|
||||
|
||||
for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
|
||||
p_child = dev_priv->vbt.child_dev + i;
|
||||
|
||||
if (p_child->common.dvo_port == port_mapping[port] &&
|
||||
(p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
|
||||
(DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
return intel_bios_is_port_edp(dev_priv, port);
|
||||
}
|
||||
|
||||
void
|
||||
@ -5208,7 +5111,7 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
|
||||
DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
|
||||
cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
|
||||
|
||||
vbt = dev_priv->vbt.edp_pps;
|
||||
vbt = dev_priv->vbt.edp.pps;
|
||||
|
||||
/* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
|
||||
* our hw here, which are all in 100usec. */
|
||||
@ -5259,7 +5162,7 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 pp_on, pp_off, pp_div, port_sel = 0;
|
||||
int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
|
||||
int div = dev_priv->rawclk_freq / 1000;
|
||||
i915_reg_t pp_on_reg, pp_off_reg, pp_div_reg, pp_ctrl_reg;
|
||||
enum port port = dp_to_dig_port(intel_dp)->port;
|
||||
const struct edp_power_seq *seq = &intel_dp->pps_delays;
|
||||
@ -5852,19 +5755,17 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
|
||||
/* intel_dp vfuncs */
|
||||
if (INTEL_INFO(dev)->gen >= 9)
|
||||
intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
|
||||
else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
|
||||
intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
|
||||
else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
|
||||
intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
|
||||
else if (HAS_PCH_SPLIT(dev))
|
||||
intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
|
||||
else
|
||||
intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
|
||||
intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider;
|
||||
|
||||
if (INTEL_INFO(dev)->gen >= 9)
|
||||
intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
|
||||
else
|
||||
intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
|
||||
intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl;
|
||||
|
||||
if (HAS_DDI(dev))
|
||||
intel_dp->prepare_link_retrain = intel_ddi_prepare_link_retrain;
|
||||
|
@ -33,7 +33,6 @@
|
||||
static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
|
||||
struct intel_crtc_state *pipe_config)
|
||||
{
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
|
||||
struct intel_digital_port *intel_dig_port = intel_mst->primary;
|
||||
struct intel_dp *intel_dp = &intel_dig_port->dp;
|
||||
@ -92,9 +91,6 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
|
||||
|
||||
pipe_config->dp_m_n.tu = slots;
|
||||
|
||||
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
|
||||
hsw_dp_set_ddi_pll_sel(pipe_config);
|
||||
|
||||
return true;
|
||||
|
||||
}
|
||||
@ -506,6 +502,8 @@ static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
|
||||
struct intel_connector *intel_connector = to_intel_connector(connector);
|
||||
struct drm_device *dev = connector->dev;
|
||||
|
||||
intel_connector->unregister(intel_connector);
|
||||
|
||||
/* need to nuke the connector */
|
||||
drm_modeset_lock_all(dev);
|
||||
if (connector->state->crtc) {
|
||||
@ -519,11 +517,7 @@ static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
|
||||
|
||||
WARN(ret, "Disabling mst crtc failed with %i\n", ret);
|
||||
}
|
||||
drm_modeset_unlock_all(dev);
|
||||
|
||||
intel_connector->unregister(intel_connector);
|
||||
|
||||
drm_modeset_lock_all(dev);
|
||||
intel_connector_remove_from_fbdev(intel_connector);
|
||||
drm_connector_cleanup(connector);
|
||||
drm_modeset_unlock_all(dev);
|
||||
|
1773
drivers/gpu/drm/i915/intel_dpll_mgr.c
Normal file
1773
drivers/gpu/drm/i915/intel_dpll_mgr.c
Normal file
File diff suppressed because it is too large
Load Diff
164
drivers/gpu/drm/i915/intel_dpll_mgr.h
Normal file
164
drivers/gpu/drm/i915/intel_dpll_mgr.h
Normal file
@ -0,0 +1,164 @@
|
||||
/*
|
||||
* Copyright © 2012-2016 Intel Corporation
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
* IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef _INTEL_DPLL_MGR_H_
|
||||
#define _INTEL_DPLL_MGR_H_
|
||||
|
||||
/*FIXME: Move this to a more appropriate place. */
|
||||
#define abs_diff(a, b) ({ \
|
||||
typeof(a) __a = (a); \
|
||||
typeof(b) __b = (b); \
|
||||
(void) (&__a == &__b); \
|
||||
__a > __b ? (__a - __b) : (__b - __a); })
|
||||
|
||||
struct drm_i915_private;
|
||||
struct intel_crtc;
|
||||
struct intel_crtc_state;
|
||||
struct intel_encoder;
|
||||
|
||||
struct intel_shared_dpll;
|
||||
struct intel_dpll_mgr;
|
||||
|
||||
enum intel_dpll_id {
|
||||
DPLL_ID_PRIVATE = -1, /* non-shared dpll in use */
|
||||
/* real shared dpll ids must be >= 0 */
|
||||
DPLL_ID_PCH_PLL_A = 0,
|
||||
DPLL_ID_PCH_PLL_B = 1,
|
||||
/* hsw/bdw */
|
||||
DPLL_ID_WRPLL1 = 0,
|
||||
DPLL_ID_WRPLL2 = 1,
|
||||
DPLL_ID_SPLL = 2,
|
||||
DPLL_ID_LCPLL_810 = 3,
|
||||
DPLL_ID_LCPLL_1350 = 4,
|
||||
DPLL_ID_LCPLL_2700 = 5,
|
||||
|
||||
/* skl */
|
||||
DPLL_ID_SKL_DPLL0 = 0,
|
||||
DPLL_ID_SKL_DPLL1 = 1,
|
||||
DPLL_ID_SKL_DPLL2 = 2,
|
||||
DPLL_ID_SKL_DPLL3 = 3,
|
||||
};
|
||||
#define I915_NUM_PLLS 6
|
||||
|
||||
/** Inform the state checker that the DPLL is kept enabled even if not
|
||||
* in use by any crtc.
|
||||
*/
|
||||
#define INTEL_DPLL_ALWAYS_ON (1 << 0)
|
||||
|
||||
struct intel_dpll_hw_state {
|
||||
/* i9xx, pch plls */
|
||||
uint32_t dpll;
|
||||
uint32_t dpll_md;
|
||||
uint32_t fp0;
|
||||
uint32_t fp1;
|
||||
|
||||
/* hsw, bdw */
|
||||
uint32_t wrpll;
|
||||
uint32_t spll;
|
||||
|
||||
/* skl */
|
||||
/*
|
||||
* DPLL_CTRL1 has 6 bits for each each this DPLL. We store those in
|
||||
* lower part of ctrl1 and they get shifted into position when writing
|
||||
* the register. This allows us to easily compare the state to share
|
||||
* the DPLL.
|
||||
*/
|
||||
uint32_t ctrl1;
|
||||
/* HDMI only, 0 when used for DP */
|
||||
uint32_t cfgcr1, cfgcr2;
|
||||
|
||||
/* bxt */
|
||||
uint32_t ebb0, ebb4, pll0, pll1, pll2, pll3, pll6, pll8, pll9, pll10,
|
||||
pcsdw12;
|
||||
};
|
||||
|
||||
struct intel_shared_dpll_config {
|
||||
unsigned crtc_mask; /* mask of CRTCs sharing this PLL */
|
||||
struct intel_dpll_hw_state hw_state;
|
||||
};
|
||||
|
||||
struct intel_shared_dpll_funcs {
|
||||
/* The mode_set hook is optional and should be used together with the
|
||||
* intel_prepare_shared_dpll function. */
|
||||
void (*mode_set)(struct drm_i915_private *dev_priv,
|
||||
struct intel_shared_dpll *pll);
|
||||
void (*enable)(struct drm_i915_private *dev_priv,
|
||||
struct intel_shared_dpll *pll);
|
||||
void (*disable)(struct drm_i915_private *dev_priv,
|
||||
struct intel_shared_dpll *pll);
|
||||
bool (*get_hw_state)(struct drm_i915_private *dev_priv,
|
||||
struct intel_shared_dpll *pll,
|
||||
struct intel_dpll_hw_state *hw_state);
|
||||
};
|
||||
|
||||
struct intel_shared_dpll {
|
||||
struct intel_shared_dpll_config config;
|
||||
|
||||
unsigned active_mask; /* mask of active CRTCs (i.e. DPMS on) */
|
||||
bool on; /* is the PLL actually active? Disabled during modeset */
|
||||
const char *name;
|
||||
/* should match the index in the dev_priv->shared_dplls array */
|
||||
enum intel_dpll_id id;
|
||||
|
||||
struct intel_shared_dpll_funcs funcs;
|
||||
|
||||
uint32_t flags;
|
||||
};
|
||||
|
||||
#define SKL_DPLL0 0
|
||||
#define SKL_DPLL1 1
|
||||
#define SKL_DPLL2 2
|
||||
#define SKL_DPLL3 3
|
||||
|
||||
/* shared dpll functions */
|
||||
struct intel_shared_dpll *
|
||||
intel_get_shared_dpll_by_id(struct drm_i915_private *dev_priv,
|
||||
enum intel_dpll_id id);
|
||||
enum intel_dpll_id
|
||||
intel_get_shared_dpll_id(struct drm_i915_private *dev_priv,
|
||||
struct intel_shared_dpll *pll);
|
||||
void
|
||||
intel_shared_dpll_config_get(struct intel_shared_dpll_config *config,
|
||||
struct intel_shared_dpll *pll,
|
||||
struct intel_crtc *crtc);
|
||||
void
|
||||
intel_shared_dpll_config_put(struct intel_shared_dpll_config *config,
|
||||
struct intel_shared_dpll *pll,
|
||||
struct intel_crtc *crtc);
|
||||
void assert_shared_dpll(struct drm_i915_private *dev_priv,
|
||||
struct intel_shared_dpll *pll,
|
||||
bool state);
|
||||
#define assert_shared_dpll_enabled(d, p) assert_shared_dpll(d, p, true)
|
||||
#define assert_shared_dpll_disabled(d, p) assert_shared_dpll(d, p, false)
|
||||
struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
|
||||
struct intel_crtc_state *state,
|
||||
struct intel_encoder *encoder);
|
||||
void intel_prepare_shared_dpll(struct intel_crtc *crtc);
|
||||
void intel_enable_shared_dpll(struct intel_crtc *crtc);
|
||||
void intel_disable_shared_dpll(struct intel_crtc *crtc);
|
||||
void intel_shared_dpll_commit(struct drm_atomic_state *state);
|
||||
void intel_shared_dpll_init(struct drm_device *dev);
|
||||
|
||||
|
||||
#endif /* _INTEL_DPLL_MGR_H_ */
|
@ -44,9 +44,13 @@
|
||||
* contexts. Note that it's important that we check the condition again after
|
||||
* having timed out, since the timeout could be due to preemption or similar and
|
||||
* we've never had a chance to check the condition before the timeout.
|
||||
*
|
||||
* TODO: When modesetting has fully transitioned to atomic, the below
|
||||
* drm_can_sleep() can be removed and in_atomic()/!in_atomic() asserts
|
||||
* added.
|
||||
*/
|
||||
#define _wait_for(COND, MS, W) ({ \
|
||||
unsigned long timeout__ = jiffies + msecs_to_jiffies(MS) + 1; \
|
||||
#define _wait_for(COND, US, W) ({ \
|
||||
unsigned long timeout__ = jiffies + usecs_to_jiffies(US) + 1; \
|
||||
int ret__ = 0; \
|
||||
while (!(COND)) { \
|
||||
if (time_after(jiffies, timeout__)) { \
|
||||
@ -55,7 +59,7 @@
|
||||
break; \
|
||||
} \
|
||||
if ((W) && drm_can_sleep()) { \
|
||||
usleep_range((W)*1000, (W)*2000); \
|
||||
usleep_range((W), (W)*2); \
|
||||
} else { \
|
||||
cpu_relax(); \
|
||||
} \
|
||||
@ -63,10 +67,40 @@
|
||||
ret__; \
|
||||
})
|
||||
|
||||
#define wait_for(COND, MS) _wait_for(COND, MS, 1)
|
||||
#define wait_for_atomic(COND, MS) _wait_for(COND, MS, 0)
|
||||
#define wait_for_atomic_us(COND, US) _wait_for((COND), \
|
||||
DIV_ROUND_UP((US), 1000), 0)
|
||||
#define wait_for(COND, MS) _wait_for((COND), (MS) * 1000, 1000)
|
||||
#define wait_for_us(COND, US) _wait_for((COND), (US), 1)
|
||||
|
||||
/* If CONFIG_PREEMPT_COUNT is disabled, in_atomic() always reports false. */
|
||||
#if defined(CONFIG_DRM_I915_DEBUG) && defined(CONFIG_PREEMPT_COUNT)
|
||||
# define _WAIT_FOR_ATOMIC_CHECK WARN_ON_ONCE(!in_atomic())
|
||||
#else
|
||||
# define _WAIT_FOR_ATOMIC_CHECK do { } while (0)
|
||||
#endif
|
||||
|
||||
#define _wait_for_atomic(COND, US) ({ \
|
||||
unsigned long end__; \
|
||||
int ret__ = 0; \
|
||||
_WAIT_FOR_ATOMIC_CHECK; \
|
||||
BUILD_BUG_ON((US) > 50000); \
|
||||
end__ = (local_clock() >> 10) + (US) + 1; \
|
||||
while (!(COND)) { \
|
||||
if (time_after((unsigned long)(local_clock() >> 10), end__)) { \
|
||||
/* Unlike the regular wait_for(), this atomic variant \
|
||||
* cannot be preempted (and we'll just ignore the issue\
|
||||
* of irq interruptions) and so we know that no time \
|
||||
* has passed since the last check of COND and can \
|
||||
* immediately report the timeout. \
|
||||
*/ \
|
||||
ret__ = -ETIMEDOUT; \
|
||||
break; \
|
||||
} \
|
||||
cpu_relax(); \
|
||||
} \
|
||||
ret__; \
|
||||
})
|
||||
|
||||
#define wait_for_atomic(COND, MS) _wait_for_atomic((COND), (MS) * 1000)
|
||||
#define wait_for_atomic_us(COND, US) _wait_for_atomic((COND), (US))
|
||||
|
||||
#define KHz(x) (1000 * (x))
|
||||
#define MHz(x) KHz(1000 * (x))
|
||||
@ -118,6 +152,7 @@ enum intel_output_type {
|
||||
struct intel_framebuffer {
|
||||
struct drm_framebuffer base;
|
||||
struct drm_i915_gem_object *obj;
|
||||
struct intel_rotation_info rot_info;
|
||||
};
|
||||
|
||||
struct intel_fbdev {
|
||||
@ -260,6 +295,12 @@ struct intel_atomic_state {
|
||||
|
||||
struct intel_shared_dpll_config shared_dpll[I915_NUM_PLLS];
|
||||
struct intel_wm_config wm_config;
|
||||
|
||||
/*
|
||||
* Current watermarks can't be trusted during hardware readout, so
|
||||
* don't bother calculating intermediate watermarks.
|
||||
*/
|
||||
bool skip_intermediate_wm;
|
||||
};
|
||||
|
||||
struct intel_plane_state {
|
||||
@ -349,6 +390,7 @@ struct intel_crtc_scaler_state {
|
||||
|
||||
struct intel_pipe_wm {
|
||||
struct intel_wm_level wm[5];
|
||||
struct intel_wm_level raw_wm[5];
|
||||
uint32_t linetime;
|
||||
bool fbc_wm_enabled;
|
||||
bool pipe_enabled;
|
||||
@ -376,9 +418,10 @@ struct intel_crtc_state {
|
||||
#define PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS (1<<0) /* unreliable sync mode.flags */
|
||||
unsigned long quirks;
|
||||
|
||||
unsigned fb_bits; /* framebuffers to flip */
|
||||
bool update_pipe; /* can a fast modeset be performed? */
|
||||
bool disable_cxsr;
|
||||
bool wm_changed; /* watermarks are updated */
|
||||
bool update_wm_pre, update_wm_post; /* watermarks are updated */
|
||||
bool fb_changed; /* fb on any of the planes is changed */
|
||||
|
||||
/* Pipe source size (ie. panel fitter input size)
|
||||
@ -394,7 +437,8 @@ struct intel_crtc_state {
|
||||
bool has_infoframe;
|
||||
|
||||
/* CPU Transcoder for the pipe. Currently this can only differ from the
|
||||
* pipe on Haswell (where we have a special eDP transcoder). */
|
||||
* pipe on Haswell and later (where we have a special eDP transcoder)
|
||||
* and Broxton (where we have special DSI transcoders). */
|
||||
enum transcoder cpu_transcoder;
|
||||
|
||||
/*
|
||||
@ -441,8 +485,8 @@ struct intel_crtc_state {
|
||||
* haswell. */
|
||||
struct dpll dpll;
|
||||
|
||||
/* Selected dpll when shared or DPLL_ID_PRIVATE. */
|
||||
enum intel_dpll_id shared_dpll;
|
||||
/* Selected dpll when shared or NULL. */
|
||||
struct intel_shared_dpll *shared_dpll;
|
||||
|
||||
/*
|
||||
* - PORT_CLK_SEL for DDI ports on HSW/BDW.
|
||||
@ -510,14 +554,33 @@ struct intel_crtc_state {
|
||||
|
||||
struct {
|
||||
/*
|
||||
* optimal watermarks, programmed post-vblank when this state
|
||||
* is committed
|
||||
* Optimal watermarks, programmed post-vblank when this state
|
||||
* is committed.
|
||||
*/
|
||||
union {
|
||||
struct intel_pipe_wm ilk;
|
||||
struct skl_pipe_wm skl;
|
||||
} optimal;
|
||||
|
||||
/*
|
||||
* Intermediate watermarks; these can be programmed immediately
|
||||
* since they satisfy both the current configuration we're
|
||||
* switching away from and the new configuration we're switching
|
||||
* to.
|
||||
*/
|
||||
struct intel_pipe_wm intermediate;
|
||||
|
||||
/*
|
||||
* Platforms with two-step watermark programming will need to
|
||||
* update watermark programming post-vblank to switch from the
|
||||
* safe intermediate watermarks to the optimal final
|
||||
* watermarks.
|
||||
*/
|
||||
bool need_postvbl_update;
|
||||
} wm;
|
||||
|
||||
/* Gamma mode programmed on the pipe */
|
||||
uint32_t gamma_mode;
|
||||
};
|
||||
|
||||
struct vlv_wm_state {
|
||||
@ -537,23 +600,6 @@ struct intel_mmio_flip {
|
||||
unsigned int rotation;
|
||||
};
|
||||
|
||||
/*
|
||||
* Tracking of operations that need to be performed at the beginning/end of an
|
||||
* atomic commit, outside the atomic section where interrupts are disabled.
|
||||
* These are generally operations that grab mutexes or might otherwise sleep
|
||||
* and thus can't be run with interrupts disabled.
|
||||
*/
|
||||
struct intel_crtc_atomic_commit {
|
||||
/* Sleepable operations to perform before commit */
|
||||
|
||||
/* Sleepable operations to perform after commit */
|
||||
unsigned fb_bits;
|
||||
bool post_enable_primary;
|
||||
|
||||
/* Sleepable operations to perform before and after commit */
|
||||
bool update_fbc;
|
||||
};
|
||||
|
||||
struct intel_crtc {
|
||||
struct drm_crtc base;
|
||||
enum pipe pipe;
|
||||
@ -600,6 +646,7 @@ struct intel_crtc {
|
||||
struct intel_pipe_wm ilk;
|
||||
struct skl_pipe_wm skl;
|
||||
} active;
|
||||
|
||||
/* allow CxSR on this pipe */
|
||||
bool cxsr_allowed;
|
||||
} wm;
|
||||
@ -613,8 +660,6 @@ struct intel_crtc {
|
||||
int scanline_start;
|
||||
} debug;
|
||||
|
||||
struct intel_crtc_atomic_commit atomic;
|
||||
|
||||
/* scalers available on this crtc */
|
||||
int num_scalers;
|
||||
|
||||
@ -1007,7 +1052,6 @@ void hsw_fdi_link_train(struct drm_crtc *crtc);
|
||||
void intel_ddi_init(struct drm_device *dev, enum port port);
|
||||
enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder);
|
||||
bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe);
|
||||
void intel_ddi_pll_init(struct drm_device *dev);
|
||||
void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc);
|
||||
void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
|
||||
enum transcoder cpu_transcoder);
|
||||
@ -1051,7 +1095,7 @@ u32 intel_fb_stride_alignment(const struct drm_i915_private *dev_priv,
|
||||
uint64_t fb_modifier, uint32_t pixel_format);
|
||||
|
||||
/* intel_audio.c */
|
||||
void intel_init_audio(struct drm_device *dev);
|
||||
void intel_init_audio_hooks(struct drm_i915_private *dev_priv);
|
||||
void intel_audio_codec_enable(struct intel_encoder *encoder);
|
||||
void intel_audio_codec_disable(struct intel_encoder *encoder);
|
||||
void i915_audio_component_init(struct drm_i915_private *dev_priv);
|
||||
@ -1059,9 +1103,9 @@ void i915_audio_component_cleanup(struct drm_i915_private *dev_priv);
|
||||
|
||||
/* intel_display.c */
|
||||
extern const struct drm_plane_funcs intel_plane_funcs;
|
||||
void intel_init_display_hooks(struct drm_i915_private *dev_priv);
|
||||
unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info);
|
||||
bool intel_has_pending_fb_unpin(struct drm_device *dev);
|
||||
int intel_pch_rawclk(struct drm_device *dev);
|
||||
int intel_hrawclk(struct drm_device *dev);
|
||||
void intel_mark_busy(struct drm_device *dev);
|
||||
void intel_mark_idle(struct drm_device *dev);
|
||||
void intel_crtc_restore_mode(struct drm_crtc *crtc);
|
||||
@ -1106,9 +1150,8 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
|
||||
void intel_release_load_detect_pipe(struct drm_connector *connector,
|
||||
struct intel_load_detect_pipe *old,
|
||||
struct drm_modeset_acquire_ctx *ctx);
|
||||
int intel_pin_and_fence_fb_obj(struct drm_plane *plane,
|
||||
struct drm_framebuffer *fb,
|
||||
const struct drm_plane_state *plane_state);
|
||||
int intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
|
||||
unsigned int rotation);
|
||||
struct drm_framebuffer *
|
||||
__intel_framebuffer_create(struct drm_device *dev,
|
||||
struct drm_mode_fb_cmd2 *mode_cmd,
|
||||
@ -1144,19 +1187,13 @@ intel_rotation_90_or_270(unsigned int rotation)
|
||||
void intel_create_rotation_property(struct drm_device *dev,
|
||||
struct intel_plane *plane);
|
||||
|
||||
/* shared dpll functions */
|
||||
struct intel_shared_dpll *intel_crtc_to_shared_dpll(struct intel_crtc *crtc);
|
||||
void assert_shared_dpll(struct drm_i915_private *dev_priv,
|
||||
struct intel_shared_dpll *pll,
|
||||
bool state);
|
||||
#define assert_shared_dpll_enabled(d, p) assert_shared_dpll(d, p, true)
|
||||
#define assert_shared_dpll_disabled(d, p) assert_shared_dpll(d, p, false)
|
||||
struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
|
||||
struct intel_crtc_state *state);
|
||||
void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
|
||||
enum pipe pipe);
|
||||
|
||||
int vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
|
||||
const struct dpll *dpll);
|
||||
void vlv_force_pll_off(struct drm_device *dev, enum pipe pipe);
|
||||
int lpt_get_iclkip(struct drm_i915_private *dev_priv);
|
||||
|
||||
/* modesetting asserts */
|
||||
void assert_panel_unlocked(struct drm_i915_private *dev_priv,
|
||||
@ -1165,6 +1202,9 @@ void assert_pll(struct drm_i915_private *dev_priv,
|
||||
enum pipe pipe, bool state);
|
||||
#define assert_pll_enabled(d, p) assert_pll(d, p, true)
|
||||
#define assert_pll_disabled(d, p) assert_pll(d, p, false)
|
||||
void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state);
|
||||
#define assert_dsi_pll_enabled(d) assert_dsi_pll(d, true)
|
||||
#define assert_dsi_pll_disabled(d) assert_dsi_pll(d, false)
|
||||
void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
|
||||
enum pipe pipe, bool state);
|
||||
#define assert_fdi_rx_pll_enabled(d, p) assert_fdi_rx_pll(d, p, true)
|
||||
@ -1172,11 +1212,10 @@ void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
|
||||
void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, bool state);
|
||||
#define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
|
||||
#define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
|
||||
u32 intel_compute_tile_offset(struct drm_i915_private *dev_priv,
|
||||
int *x, int *y,
|
||||
uint64_t fb_modifier,
|
||||
unsigned int cpp,
|
||||
unsigned int pitch);
|
||||
u32 intel_compute_tile_offset(int *x, int *y,
|
||||
const struct drm_framebuffer *fb, int plane,
|
||||
unsigned int pitch,
|
||||
unsigned int rotation);
|
||||
void intel_prepare_reset(struct drm_device *dev);
|
||||
void intel_finish_reset(struct drm_device *dev);
|
||||
void hsw_enable_pc8(struct drm_i915_private *dev_priv);
|
||||
@ -1196,9 +1235,6 @@ void intel_dp_get_m_n(struct intel_crtc *crtc,
|
||||
struct intel_crtc_state *pipe_config);
|
||||
void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n);
|
||||
int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n);
|
||||
void
|
||||
ironlake_check_encoder_dotclock(const struct intel_crtc_state *pipe_config,
|
||||
int dotclock);
|
||||
bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
|
||||
intel_clock_t *best_clock);
|
||||
int chv_calc_dpll_params(int refclk, intel_clock_t *pll_clock);
|
||||
@ -1226,7 +1262,7 @@ u32 skl_plane_ctl_rotation(unsigned int rotation);
|
||||
|
||||
/* intel_csr.c */
|
||||
void intel_csr_ucode_init(struct drm_i915_private *);
|
||||
bool intel_csr_load_program(struct drm_i915_private *);
|
||||
void intel_csr_load_program(struct drm_i915_private *);
|
||||
void intel_csr_ucode_fini(struct drm_i915_private *);
|
||||
|
||||
/* intel_dp.c */
|
||||
@ -1266,7 +1302,6 @@ void intel_edp_drrs_invalidate(struct drm_device *dev,
|
||||
void intel_edp_drrs_flush(struct drm_device *dev, unsigned frontbuffer_bits);
|
||||
bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
|
||||
struct intel_digital_port *port);
|
||||
void hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config);
|
||||
|
||||
void
|
||||
intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
|
||||
@ -1541,6 +1576,7 @@ void intel_suspend_hw(struct drm_device *dev);
|
||||
int ilk_wm_max_level(const struct drm_device *dev);
|
||||
void intel_update_watermarks(struct drm_crtc *crtc);
|
||||
void intel_init_pm(struct drm_device *dev);
|
||||
void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv);
|
||||
void intel_pm_setup(struct drm_device *dev);
|
||||
void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
|
||||
void intel_gpu_ips_teardown(void);
|
||||
@ -1565,6 +1601,7 @@ void skl_wm_get_hw_state(struct drm_device *dev);
|
||||
void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
|
||||
struct skl_ddb_allocation *ddb /* out */);
|
||||
uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config);
|
||||
bool ilk_disable_lp_wm(struct drm_device *dev);
|
||||
int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6);
|
||||
|
||||
/* intel_sdvo.c */
|
||||
@ -1606,6 +1643,18 @@ intel_atomic_get_crtc_state(struct drm_atomic_state *state,
|
||||
|
||||
return to_intel_crtc_state(crtc_state);
|
||||
}
|
||||
|
||||
static inline struct intel_plane_state *
|
||||
intel_atomic_get_existing_plane_state(struct drm_atomic_state *state,
|
||||
struct intel_plane *plane)
|
||||
{
|
||||
struct drm_plane_state *plane_state;
|
||||
|
||||
plane_state = drm_atomic_get_existing_plane_state(state, &plane->base);
|
||||
|
||||
return to_intel_plane_state(plane_state);
|
||||
}
|
||||
|
||||
int intel_atomic_setup_scalers(struct drm_device *dev,
|
||||
struct intel_crtc *intel_crtc,
|
||||
struct intel_crtc_state *crtc_state);
|
||||
@ -1617,4 +1666,10 @@ void intel_plane_destroy_state(struct drm_plane *plane,
|
||||
struct drm_plane_state *state);
|
||||
extern const struct drm_plane_helper_funcs intel_plane_helper_funcs;
|
||||
|
||||
/* intel_color.c */
|
||||
void intel_color_init(struct drm_crtc *crtc);
|
||||
int intel_color_check(struct drm_crtc *crtc, struct drm_crtc_state *state);
|
||||
void intel_color_set_csc(struct drm_crtc *crtc);
|
||||
void intel_color_load_luts(struct drm_crtc *crtc);
|
||||
|
||||
#endif /* __INTEL_DRV_H__ */
|
||||
|
@ -268,6 +268,7 @@ static inline bool is_cmd_mode(struct intel_dsi *intel_dsi)
|
||||
static bool intel_dsi_compute_config(struct intel_encoder *encoder,
|
||||
struct intel_crtc_state *pipe_config)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
|
||||
struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi,
|
||||
base);
|
||||
struct intel_connector *intel_connector = intel_dsi->attached_connector;
|
||||
@ -284,6 +285,14 @@ static bool intel_dsi_compute_config(struct intel_encoder *encoder,
|
||||
/* DSI uses short packets for sync events, so clear mode flags for DSI */
|
||||
adjusted_mode->flags = 0;
|
||||
|
||||
if (IS_BROXTON(dev_priv)) {
|
||||
/* Dual link goes to DSI transcoder A. */
|
||||
if (intel_dsi->ports == BIT(PORT_C))
|
||||
pipe_config->cpu_transcoder = TRANSCODER_DSI_C;
|
||||
else
|
||||
pipe_config->cpu_transcoder = TRANSCODER_DSI_A;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -403,7 +412,7 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder)
|
||||
temp &= ~LANE_CONFIGURATION_MASK;
|
||||
temp &= ~DUAL_LINK_MODE_MASK;
|
||||
|
||||
if (intel_dsi->ports == ((1 << PORT_A) | (1 << PORT_C))) {
|
||||
if (intel_dsi->ports == (BIT(PORT_A) | BIT(PORT_C))) {
|
||||
temp |= (intel_dsi->dual_link - 1)
|
||||
<< DUAL_LINK_MODE_SHIFT;
|
||||
temp |= intel_crtc->pipe ?
|
||||
@ -667,7 +676,7 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
enum intel_display_power_domain power_domain;
|
||||
enum port port;
|
||||
bool ret;
|
||||
bool active = false;
|
||||
|
||||
DRM_DEBUG_KMS("\n");
|
||||
|
||||
@ -675,38 +684,60 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
|
||||
if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
|
||||
return false;
|
||||
|
||||
ret = false;
|
||||
/*
|
||||
* On Broxton the PLL needs to be enabled with a valid divider
|
||||
* configuration, otherwise accessing DSI registers will hang the
|
||||
* machine. See BSpec North Display Engine registers/MIPI[BXT].
|
||||
*/
|
||||
if (IS_BROXTON(dev_priv) && !intel_dsi_pll_is_enabled(dev_priv))
|
||||
goto out_put_power;
|
||||
|
||||
/* XXX: this only works for one DSI output */
|
||||
for_each_dsi_port(port, intel_dsi->ports) {
|
||||
i915_reg_t ctrl_reg = IS_BROXTON(dev) ?
|
||||
BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port);
|
||||
u32 dpi_enabled, func;
|
||||
|
||||
func = I915_READ(MIPI_DSI_FUNC_PRG(port));
|
||||
dpi_enabled = I915_READ(ctrl_reg) & DPI_ENABLE;
|
||||
bool enabled = I915_READ(ctrl_reg) & DPI_ENABLE;
|
||||
|
||||
/* Due to some hardware limitations on BYT, MIPI Port C DPI
|
||||
* Enable bit does not get set. To check whether DSI Port C
|
||||
* was enabled in BIOS, check the Pipe B enable bit
|
||||
*/
|
||||
if (IS_VALLEYVIEW(dev) && port == PORT_C)
|
||||
dpi_enabled = I915_READ(PIPECONF(PIPE_B)) &
|
||||
PIPECONF_ENABLE;
|
||||
enabled = I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE;
|
||||
|
||||
if (dpi_enabled || (func & CMD_MODE_DATA_WIDTH_MASK)) {
|
||||
if (I915_READ(MIPI_DEVICE_READY(port)) & DEVICE_READY) {
|
||||
*pipe = port == PORT_A ? PIPE_A : PIPE_B;
|
||||
ret = true;
|
||||
|
||||
goto out;
|
||||
}
|
||||
/* Try command mode if video mode not enabled */
|
||||
if (!enabled) {
|
||||
u32 tmp = I915_READ(MIPI_DSI_FUNC_PRG(port));
|
||||
enabled = tmp & CMD_MODE_DATA_WIDTH_MASK;
|
||||
}
|
||||
|
||||
if (!enabled)
|
||||
continue;
|
||||
|
||||
if (!(I915_READ(MIPI_DEVICE_READY(port)) & DEVICE_READY))
|
||||
continue;
|
||||
|
||||
if (IS_BROXTON(dev_priv)) {
|
||||
u32 tmp = I915_READ(MIPI_CTRL(port));
|
||||
tmp &= BXT_PIPE_SELECT_MASK;
|
||||
tmp >>= BXT_PIPE_SELECT_SHIFT;
|
||||
|
||||
if (WARN_ON(tmp > PIPE_C))
|
||||
continue;
|
||||
|
||||
*pipe = tmp;
|
||||
} else {
|
||||
*pipe = port == PORT_A ? PIPE_A : PIPE_B;
|
||||
}
|
||||
|
||||
active = true;
|
||||
break;
|
||||
}
|
||||
out:
|
||||
|
||||
out_put_power:
|
||||
intel_display_power_put(dev_priv, power_domain);
|
||||
|
||||
return ret;
|
||||
return active;
|
||||
}
|
||||
|
||||
static void intel_dsi_get_config(struct intel_encoder *encoder,
|
||||
@ -787,7 +818,7 @@ static void set_dsi_timings(struct drm_encoder *encoder,
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
|
||||
enum port port;
|
||||
unsigned int bpp = dsi_pixel_format_bpp(intel_dsi->pixel_format);
|
||||
unsigned int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
|
||||
unsigned int lane_count = intel_dsi->lane_count;
|
||||
|
||||
u16 hactive, hfp, hsync, hbp, vfp, vsync, vbp;
|
||||
@ -849,6 +880,23 @@ static void set_dsi_timings(struct drm_encoder *encoder,
|
||||
}
|
||||
}
|
||||
|
||||
static u32 pixel_format_to_reg(enum mipi_dsi_pixel_format fmt)
|
||||
{
|
||||
switch (fmt) {
|
||||
case MIPI_DSI_FMT_RGB888:
|
||||
return VID_MODE_FORMAT_RGB888;
|
||||
case MIPI_DSI_FMT_RGB666:
|
||||
return VID_MODE_FORMAT_RGB666;
|
||||
case MIPI_DSI_FMT_RGB666_PACKED:
|
||||
return VID_MODE_FORMAT_RGB666_PACKED;
|
||||
case MIPI_DSI_FMT_RGB565:
|
||||
return VID_MODE_FORMAT_RGB565;
|
||||
default:
|
||||
MISSING_CASE(fmt);
|
||||
return VID_MODE_FORMAT_RGB666;
|
||||
}
|
||||
}
|
||||
|
||||
static void intel_dsi_prepare(struct intel_encoder *intel_encoder)
|
||||
{
|
||||
struct drm_encoder *encoder = &intel_encoder->base;
|
||||
@ -858,7 +906,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder)
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
|
||||
const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
|
||||
enum port port;
|
||||
unsigned int bpp = dsi_pixel_format_bpp(intel_dsi->pixel_format);
|
||||
unsigned int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
|
||||
u32 val, tmp;
|
||||
u16 mode_hdisplay;
|
||||
|
||||
@ -917,9 +965,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder)
|
||||
val |= CMD_MODE_DATA_WIDTH_8_BIT; /* XXX */
|
||||
} else {
|
||||
val |= intel_dsi->channel << VID_MODE_CHANNEL_NUMBER_SHIFT;
|
||||
|
||||
/* XXX: cross-check bpp vs. pixel format? */
|
||||
val |= intel_dsi->pixel_format;
|
||||
val |= pixel_format_to_reg(intel_dsi->pixel_format);
|
||||
}
|
||||
|
||||
tmp = 0;
|
||||
@ -1121,11 +1167,13 @@ void intel_dsi_init(struct drm_device *dev)
|
||||
DRM_DEBUG_KMS("\n");
|
||||
|
||||
/* There is no detection method for MIPI so rely on VBT */
|
||||
if (!dev_priv->vbt.has_mipi)
|
||||
if (!intel_bios_is_dsi_present(dev_priv, &port))
|
||||
return;
|
||||
|
||||
if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
|
||||
dev_priv->mipi_mmio_base = VLV_MIPI_BASE;
|
||||
} else if (IS_BROXTON(dev)) {
|
||||
dev_priv->mipi_mmio_base = BXT_MIPI_BASE;
|
||||
} else {
|
||||
DRM_ERROR("Unsupported Mipi device to reg base");
|
||||
return;
|
||||
@ -1161,17 +1209,21 @@ void intel_dsi_init(struct drm_device *dev)
|
||||
intel_connector->get_hw_state = intel_connector_get_hw_state;
|
||||
intel_connector->unregister = intel_connector_unregister;
|
||||
|
||||
/* Pipe A maps to MIPI DSI port A, pipe B maps to MIPI DSI port C */
|
||||
if (dev_priv->vbt.dsi.port == DVO_PORT_MIPIA) {
|
||||
intel_encoder->crtc_mask = (1 << PIPE_A);
|
||||
intel_dsi->ports = (1 << PORT_A);
|
||||
} else if (dev_priv->vbt.dsi.port == DVO_PORT_MIPIC) {
|
||||
intel_encoder->crtc_mask = (1 << PIPE_B);
|
||||
intel_dsi->ports = (1 << PORT_C);
|
||||
}
|
||||
/*
|
||||
* On BYT/CHV, pipe A maps to MIPI DSI port A, pipe B maps to MIPI DSI
|
||||
* port C. BXT isn't limited like this.
|
||||
*/
|
||||
if (IS_BROXTON(dev_priv))
|
||||
intel_encoder->crtc_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C);
|
||||
else if (port == PORT_A)
|
||||
intel_encoder->crtc_mask = BIT(PIPE_A);
|
||||
else
|
||||
intel_encoder->crtc_mask = BIT(PIPE_B);
|
||||
|
||||
if (dev_priv->vbt.dsi.config->dual_link)
|
||||
intel_dsi->ports = ((1 << PORT_A) | (1 << PORT_C));
|
||||
intel_dsi->ports = BIT(PORT_A) | BIT(PORT_C);
|
||||
else
|
||||
intel_dsi->ports = BIT(port);
|
||||
|
||||
/* Create a DSI host (and a device) for each port. */
|
||||
for_each_dsi_port(port, intel_dsi->ports) {
|
||||
|
@ -34,8 +34,6 @@
|
||||
#define DSI_DUAL_LINK_FRONT_BACK 1
|
||||
#define DSI_DUAL_LINK_PIXEL_ALT 2
|
||||
|
||||
int dsi_pixel_format_bpp(int pixel_format);
|
||||
|
||||
struct intel_dsi_host;
|
||||
|
||||
struct intel_dsi {
|
||||
@ -64,8 +62,12 @@ struct intel_dsi {
|
||||
/* number of DSI lanes */
|
||||
unsigned int lane_count;
|
||||
|
||||
/* video mode pixel format for MIPI_DSI_FUNC_PRG register */
|
||||
u32 pixel_format;
|
||||
/*
|
||||
* video mode pixel format
|
||||
*
|
||||
* XXX: consolidate on .format in struct mipi_dsi_device.
|
||||
*/
|
||||
enum mipi_dsi_pixel_format pixel_format;
|
||||
|
||||
/* video mode format for MIPI_VIDEO_MODE_FORMAT register */
|
||||
u32 video_mode_format;
|
||||
@ -117,15 +119,14 @@ static inline struct intel_dsi_host *to_intel_dsi_host(struct mipi_dsi_host *h)
|
||||
return container_of(h, struct intel_dsi_host, base);
|
||||
}
|
||||
|
||||
#define for_each_dsi_port(__port, __ports_mask) \
|
||||
for ((__port) = PORT_A; (__port) < I915_MAX_PORTS; (__port)++) \
|
||||
for_each_if ((__ports_mask) & (1 << (__port)))
|
||||
#define for_each_dsi_port(__port, __ports_mask) for_each_port_masked(__port, __ports_mask)
|
||||
|
||||
static inline struct intel_dsi *enc_to_intel_dsi(struct drm_encoder *encoder)
|
||||
{
|
||||
return container_of(encoder, struct intel_dsi, base.base);
|
||||
}
|
||||
|
||||
bool intel_dsi_pll_is_enabled(struct drm_i915_private *dev_priv);
|
||||
extern void intel_enable_dsi_pll(struct intel_encoder *encoder);
|
||||
extern void intel_disable_dsi_pll(struct intel_encoder *encoder);
|
||||
extern u32 intel_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp);
|
||||
|
@ -412,6 +412,25 @@ static const struct drm_panel_funcs vbt_panel_funcs = {
|
||||
.get_modes = vbt_panel_get_modes,
|
||||
};
|
||||
|
||||
/* XXX: This should be done when parsing the VBT in intel_bios.c */
|
||||
static enum mipi_dsi_pixel_format pixel_format_from_vbt(u32 fmt)
|
||||
{
|
||||
/* It just so happens the VBT matches register contents. */
|
||||
switch (fmt) {
|
||||
case VID_MODE_FORMAT_RGB888:
|
||||
return MIPI_DSI_FMT_RGB888;
|
||||
case VID_MODE_FORMAT_RGB666:
|
||||
return MIPI_DSI_FMT_RGB666;
|
||||
case VID_MODE_FORMAT_RGB666_PACKED:
|
||||
return MIPI_DSI_FMT_RGB666_PACKED;
|
||||
case VID_MODE_FORMAT_RGB565:
|
||||
return MIPI_DSI_FMT_RGB565;
|
||||
default:
|
||||
MISSING_CASE(fmt);
|
||||
return MIPI_DSI_FMT_RGB666;
|
||||
}
|
||||
}
|
||||
|
||||
struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id)
|
||||
{
|
||||
struct drm_device *dev = intel_dsi->base.base.dev;
|
||||
@ -420,7 +439,7 @@ struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id)
|
||||
struct mipi_pps_data *pps = dev_priv->vbt.dsi.pps;
|
||||
struct drm_display_mode *mode = dev_priv->vbt.lfp_lvds_vbt_mode;
|
||||
struct vbt_panel *vbt_panel;
|
||||
u32 bits_per_pixel = 24;
|
||||
u32 bpp;
|
||||
u32 tlpx_ns, extra_byte_count, bitrate, tlpx_ui;
|
||||
u32 ui_num, ui_den;
|
||||
u32 prepare_cnt, exit_zero_cnt, clk_zero_cnt, trail_cnt;
|
||||
@ -436,12 +455,11 @@ struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id)
|
||||
intel_dsi->eotp_pkt = mipi_config->eot_pkt_disabled ? 0 : 1;
|
||||
intel_dsi->clock_stop = mipi_config->enable_clk_stop ? 1 : 0;
|
||||
intel_dsi->lane_count = mipi_config->lane_cnt + 1;
|
||||
intel_dsi->pixel_format = mipi_config->videomode_color_format << 7;
|
||||
intel_dsi->pixel_format = pixel_format_from_vbt(mipi_config->videomode_color_format << 7);
|
||||
bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
|
||||
|
||||
intel_dsi->dual_link = mipi_config->dual_link;
|
||||
intel_dsi->pixel_overlap = mipi_config->pixel_overlap;
|
||||
|
||||
bits_per_pixel = dsi_pixel_format_bpp(intel_dsi->pixel_format);
|
||||
|
||||
intel_dsi->operation_mode = mipi_config->is_cmd_mode;
|
||||
intel_dsi->video_mode_format = mipi_config->video_transfer_mode;
|
||||
intel_dsi->escape_clk_div = mipi_config->byte_clk_sel;
|
||||
@ -475,8 +493,7 @@ struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id)
|
||||
*/
|
||||
if (intel_dsi->video_mode_format == VIDEO_MODE_BURST) {
|
||||
if (mipi_config->target_burst_mode_freq) {
|
||||
computed_ddr =
|
||||
(pclk * bits_per_pixel) / intel_dsi->lane_count;
|
||||
computed_ddr = (pclk * bpp) / intel_dsi->lane_count;
|
||||
|
||||
if (mipi_config->target_burst_mode_freq <
|
||||
computed_ddr) {
|
||||
@ -499,7 +516,7 @@ struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id)
|
||||
intel_dsi->burst_mode_ratio = burst_mode_ratio;
|
||||
intel_dsi->pclk = pclk;
|
||||
|
||||
bitrate = (pclk * bits_per_pixel) / intel_dsi->lane_count;
|
||||
bitrate = (pclk * bpp) / intel_dsi->lane_count;
|
||||
|
||||
switch (intel_dsi->escape_clk_div) {
|
||||
case 0:
|
||||
|
@ -30,27 +30,6 @@
|
||||
#include "i915_drv.h"
|
||||
#include "intel_dsi.h"
|
||||
|
||||
int dsi_pixel_format_bpp(int pixel_format)
|
||||
{
|
||||
int bpp;
|
||||
|
||||
switch (pixel_format) {
|
||||
default:
|
||||
case VID_MODE_FORMAT_RGB888:
|
||||
case VID_MODE_FORMAT_RGB666_LOOSE:
|
||||
bpp = 24;
|
||||
break;
|
||||
case VID_MODE_FORMAT_RGB666:
|
||||
bpp = 18;
|
||||
break;
|
||||
case VID_MODE_FORMAT_RGB565:
|
||||
bpp = 16;
|
||||
break;
|
||||
}
|
||||
|
||||
return bpp;
|
||||
}
|
||||
|
||||
struct dsi_mnp {
|
||||
u32 dsi_pll_ctrl;
|
||||
u32 dsi_pll_div;
|
||||
@ -64,10 +43,11 @@ static const u32 lfsr_converts[] = {
|
||||
};
|
||||
|
||||
/* Get DSI clock from pixel clock */
|
||||
static u32 dsi_clk_from_pclk(u32 pclk, int pixel_format, int lane_count)
|
||||
static u32 dsi_clk_from_pclk(u32 pclk, enum mipi_dsi_pixel_format fmt,
|
||||
int lane_count)
|
||||
{
|
||||
u32 dsi_clk_khz;
|
||||
u32 bpp = dsi_pixel_format_bpp(pixel_format);
|
||||
u32 bpp = mipi_dsi_pixel_format_to_bpp(fmt);
|
||||
|
||||
/* DSI data rate = pixel clock * bits per pixel / lane count
|
||||
pixel clock is converted from KHz to Hz */
|
||||
@ -212,6 +192,36 @@ static void vlv_disable_dsi_pll(struct intel_encoder *encoder)
|
||||
mutex_unlock(&dev_priv->sb_lock);
|
||||
}
|
||||
|
||||
static bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
bool enabled;
|
||||
u32 val;
|
||||
u32 mask;
|
||||
|
||||
mask = BXT_DSI_PLL_DO_ENABLE | BXT_DSI_PLL_LOCKED;
|
||||
val = I915_READ(BXT_DSI_PLL_ENABLE);
|
||||
enabled = (val & mask) == mask;
|
||||
|
||||
if (!enabled)
|
||||
return false;
|
||||
|
||||
/*
|
||||
* Both dividers must be programmed with valid values even if only one
|
||||
* of the PLL is used, see BSpec/Broxton Clocks. Check this here for
|
||||
* paranoia, since BIOS is known to misconfigure PLLs in this way at
|
||||
* times, and since accessing DSI registers with invalid dividers
|
||||
* causes a system hang.
|
||||
*/
|
||||
val = I915_READ(BXT_DSI_PLL_CTL);
|
||||
if (!(val & BXT_DSIA_16X_MASK) || !(val & BXT_DSIC_16X_MASK)) {
|
||||
DRM_DEBUG_DRIVER("PLL is enabled with invalid divider settings (%08x)\n",
|
||||
val);
|
||||
enabled = false;
|
||||
}
|
||||
|
||||
return enabled;
|
||||
}
|
||||
|
||||
static void bxt_disable_dsi_pll(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
|
||||
@ -232,9 +242,9 @@ static void bxt_disable_dsi_pll(struct intel_encoder *encoder)
|
||||
DRM_ERROR("Timeout waiting for PLL lock deassertion\n");
|
||||
}
|
||||
|
||||
static void assert_bpp_mismatch(int pixel_format, int pipe_bpp)
|
||||
static void assert_bpp_mismatch(enum mipi_dsi_pixel_format fmt, int pipe_bpp)
|
||||
{
|
||||
int bpp = dsi_pixel_format_bpp(pixel_format);
|
||||
int bpp = mipi_dsi_pixel_format_to_bpp(fmt);
|
||||
|
||||
WARN(bpp != pipe_bpp,
|
||||
"bpp match assertion failure (expected %d, current %d)\n",
|
||||
@ -362,35 +372,57 @@ static void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
|
||||
/* Program BXT Mipi clocks and dividers */
|
||||
static void bxt_dsi_program_clocks(struct drm_device *dev, enum port port)
|
||||
{
|
||||
u32 tmp;
|
||||
u32 divider;
|
||||
u32 dsi_rate;
|
||||
u32 pll_ratio;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 tmp;
|
||||
u32 dsi_rate = 0;
|
||||
u32 pll_ratio = 0;
|
||||
u32 rx_div;
|
||||
u32 tx_div;
|
||||
u32 rx_div_upper;
|
||||
u32 rx_div_lower;
|
||||
u32 mipi_8by3_divider;
|
||||
|
||||
/* Clear old configurations */
|
||||
tmp = I915_READ(BXT_MIPI_CLOCK_CTL);
|
||||
tmp &= ~(BXT_MIPI_TX_ESCLK_FIXDIV_MASK(port));
|
||||
tmp &= ~(BXT_MIPI_RX_ESCLK_FIXDIV_MASK(port));
|
||||
tmp &= ~(BXT_MIPI_ESCLK_VAR_DIV_MASK(port));
|
||||
tmp &= ~(BXT_MIPI_DPHY_DIVIDER_MASK(port));
|
||||
tmp &= ~(BXT_MIPI_RX_ESCLK_UPPER_FIXDIV_MASK(port));
|
||||
tmp &= ~(BXT_MIPI_8X_BY3_DIVIDER_MASK(port));
|
||||
tmp &= ~(BXT_MIPI_RX_ESCLK_LOWER_FIXDIV_MASK(port));
|
||||
|
||||
/* Get the current DSI rate(actual) */
|
||||
pll_ratio = I915_READ(BXT_DSI_PLL_CTL) &
|
||||
BXT_DSI_PLL_RATIO_MASK;
|
||||
dsi_rate = (BXT_REF_CLOCK_KHZ * pll_ratio) / 2;
|
||||
|
||||
/* Max possible output of clock is 39.5 MHz, program value -1 */
|
||||
divider = (dsi_rate / BXT_MAX_VAR_OUTPUT_KHZ) - 1;
|
||||
tmp |= BXT_MIPI_ESCLK_VAR_DIV(port, divider);
|
||||
/*
|
||||
* tx clock should be <= 20MHz and the div value must be
|
||||
* subtracted by 1 as per bspec
|
||||
*/
|
||||
tx_div = DIV_ROUND_UP(dsi_rate, 20000) - 1;
|
||||
/*
|
||||
* rx clock should be <= 150MHz and the div value must be
|
||||
* subtracted by 1 as per bspec
|
||||
*/
|
||||
rx_div = DIV_ROUND_UP(dsi_rate, 150000) - 1;
|
||||
|
||||
/*
|
||||
* Tx escape clock must be as close to 20MHz possible, but should
|
||||
* not exceed it. Hence select divide by 2
|
||||
* rx divider value needs to be updated in the
|
||||
* two differnt bit fields in the register hence splitting the
|
||||
* rx divider value accordingly
|
||||
*/
|
||||
tmp |= BXT_MIPI_TX_ESCLK_8XDIV_BY2(port);
|
||||
rx_div_lower = rx_div & RX_DIVIDER_BIT_1_2;
|
||||
rx_div_upper = (rx_div & RX_DIVIDER_BIT_3_4) >> 2;
|
||||
|
||||
tmp |= BXT_MIPI_RX_ESCLK_8X_BY3(port);
|
||||
/* As per bpsec program the 8/3X clock divider to the below value */
|
||||
if (dev_priv->vbt.dsi.config->is_cmd_mode)
|
||||
mipi_8by3_divider = 0x2;
|
||||
else
|
||||
mipi_8by3_divider = 0x3;
|
||||
|
||||
tmp |= BXT_MIPI_8X_BY3_DIVIDER(port, mipi_8by3_divider);
|
||||
tmp |= BXT_MIPI_TX_ESCLK_DIVIDER(port, tx_div);
|
||||
tmp |= BXT_MIPI_RX_ESCLK_LOWER_DIVIDER(port, rx_div_lower);
|
||||
tmp |= BXT_MIPI_RX_ESCLK_UPPER_DIVIDER(port, rx_div_upper);
|
||||
|
||||
I915_WRITE(BXT_MIPI_CLOCK_CTL, tmp);
|
||||
}
|
||||
@ -484,6 +516,16 @@ static void bxt_enable_dsi_pll(struct intel_encoder *encoder)
|
||||
DRM_DEBUG_KMS("DSI PLL locked\n");
|
||||
}
|
||||
|
||||
bool intel_dsi_pll_is_enabled(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
if (IS_BROXTON(dev_priv))
|
||||
return bxt_dsi_pll_is_enabled(dev_priv);
|
||||
|
||||
MISSING_CASE(INTEL_DEVID(dev_priv));
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
void intel_enable_dsi_pll(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
@ -513,9 +555,9 @@ static void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
|
||||
/* Clear old configurations */
|
||||
tmp = I915_READ(BXT_MIPI_CLOCK_CTL);
|
||||
tmp &= ~(BXT_MIPI_TX_ESCLK_FIXDIV_MASK(port));
|
||||
tmp &= ~(BXT_MIPI_RX_ESCLK_FIXDIV_MASK(port));
|
||||
tmp &= ~(BXT_MIPI_ESCLK_VAR_DIV_MASK(port));
|
||||
tmp &= ~(BXT_MIPI_DPHY_DIVIDER_MASK(port));
|
||||
tmp &= ~(BXT_MIPI_RX_ESCLK_UPPER_FIXDIV_MASK(port));
|
||||
tmp &= ~(BXT_MIPI_8X_BY3_DIVIDER_MASK(port));
|
||||
tmp &= ~(BXT_MIPI_RX_ESCLK_LOWER_FIXDIV_MASK(port));
|
||||
I915_WRITE(BXT_MIPI_CLOCK_CTL, tmp);
|
||||
I915_WRITE(MIPI_EOT_DISABLE(port), CLOCKSTOP);
|
||||
}
|
||||
|
@ -516,9 +516,9 @@ static int find_compression_threshold(struct drm_i915_private *dev_priv,
|
||||
* underruns, even if that range is not reserved by the BIOS. */
|
||||
if (IS_BROADWELL(dev_priv) ||
|
||||
IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
|
||||
end = dev_priv->gtt.stolen_size - 8 * 1024 * 1024;
|
||||
end = dev_priv->ggtt.stolen_size - 8 * 1024 * 1024;
|
||||
else
|
||||
end = dev_priv->gtt.stolen_usable_size;
|
||||
end = dev_priv->ggtt.stolen_usable_size;
|
||||
|
||||
/* HACK: This code depends on what we will do in *_enable_fbc. If that
|
||||
* code changes, this code needs to change as well.
|
||||
|
@ -146,7 +146,7 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
|
||||
/* If the FB is too big, just don't use it since fbdev is not very
|
||||
* important and we should probably use that space with FBC or other
|
||||
* features. */
|
||||
if (size * 2 < dev_priv->gtt.stolen_usable_size)
|
||||
if (size * 2 < dev_priv->ggtt.stolen_usable_size)
|
||||
obj = i915_gem_object_create_stolen(dev, size);
|
||||
if (obj == NULL)
|
||||
obj = i915_gem_alloc_object(dev, size);
|
||||
@ -220,7 +220,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
|
||||
* This also validates that any existing fb inherited from the
|
||||
* BIOS is suitable for own access.
|
||||
*/
|
||||
ret = intel_pin_and_fence_fb_obj(NULL, &ifbdev->fb->base, NULL);
|
||||
ret = intel_pin_and_fence_fb_obj(&ifbdev->fb->base, BIT(DRM_ROTATE_0));
|
||||
if (ret)
|
||||
goto out_unlock;
|
||||
|
||||
@ -244,13 +244,13 @@ static int intelfb_create(struct drm_fb_helper *helper,
|
||||
|
||||
/* setup aperture base/size for vesafb takeover */
|
||||
info->apertures->ranges[0].base = dev->mode_config.fb_base;
|
||||
info->apertures->ranges[0].size = dev_priv->gtt.mappable_end;
|
||||
info->apertures->ranges[0].size = dev_priv->ggtt.mappable_end;
|
||||
|
||||
info->fix.smem_start = dev->mode_config.fb_base + i915_gem_obj_ggtt_offset(obj);
|
||||
info->fix.smem_len = size;
|
||||
|
||||
info->screen_base =
|
||||
ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj),
|
||||
ioremap_wc(dev_priv->ggtt.mappable_base + i915_gem_obj_ggtt_offset(obj),
|
||||
size);
|
||||
if (!info->screen_base) {
|
||||
DRM_ERROR("Failed to remap framebuffer into virtual memory\n");
|
||||
@ -379,6 +379,7 @@ retry:
|
||||
struct drm_connector *connector;
|
||||
struct drm_encoder *encoder;
|
||||
struct drm_fb_helper_crtc *new_crtc;
|
||||
struct intel_crtc *intel_crtc;
|
||||
|
||||
fb_conn = fb_helper->connector_info[i];
|
||||
connector = fb_conn->connector;
|
||||
@ -420,6 +421,13 @@ retry:
|
||||
|
||||
num_connectors_enabled++;
|
||||
|
||||
intel_crtc = to_intel_crtc(connector->state->crtc);
|
||||
for (j = 0; j < 256; j++) {
|
||||
intel_crtc->lut_r[j] = j;
|
||||
intel_crtc->lut_g[j] = j;
|
||||
intel_crtc->lut_b[j] = j;
|
||||
}
|
||||
|
||||
new_crtc = intel_fb_helper_crtc(fb_helper, connector->state->crtc);
|
||||
|
||||
/*
|
||||
@ -800,6 +808,8 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous
|
||||
void intel_fbdev_output_poll_changed(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
async_synchronize_full();
|
||||
if (dev_priv->fbdev)
|
||||
drm_fb_helper_hotplug_event(&dev_priv->fbdev->helper);
|
||||
}
|
||||
@ -811,6 +821,7 @@ void intel_fbdev_restore_mode(struct drm_device *dev)
|
||||
struct intel_fbdev *ifbdev = dev_priv->fbdev;
|
||||
struct drm_fb_helper *fb_helper;
|
||||
|
||||
async_synchronize_full();
|
||||
if (!ifbdev)
|
||||
return;
|
||||
|
||||
|
@ -212,7 +212,7 @@ static void cpt_check_pch_fifo_underruns(struct intel_crtc *crtc)
|
||||
I915_WRITE(SERR_INT, SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder));
|
||||
POSTING_READ(SERR_INT);
|
||||
|
||||
DRM_ERROR("pch fifo underrun on pch transcoder %c\n",
|
||||
DRM_ERROR("pch fifo underrun on pch transcoder %s\n",
|
||||
transcoder_name(pch_transcoder));
|
||||
}
|
||||
|
||||
@ -235,7 +235,7 @@ static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
|
||||
|
||||
if (old && I915_READ(SERR_INT) &
|
||||
SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)) {
|
||||
DRM_ERROR("uncleared pch fifo underrun on pch transcoder %c\n",
|
||||
DRM_ERROR("uncleared pch fifo underrun on pch transcoder %s\n",
|
||||
transcoder_name(pch_transcoder));
|
||||
}
|
||||
}
|
||||
@ -386,7 +386,7 @@ void intel_pch_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
|
||||
{
|
||||
if (intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder,
|
||||
false))
|
||||
DRM_ERROR("PCH transcoder %c FIFO underrun\n",
|
||||
DRM_ERROR("PCH transcoder %s FIFO underrun\n",
|
||||
transcoder_name(pch_transcoder));
|
||||
}
|
||||
|
||||
|
@ -59,7 +59,7 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#define I915_SKL_GUC_UCODE "i915/skl_guc_ver4.bin"
|
||||
#define I915_SKL_GUC_UCODE "i915/skl_guc_ver6.bin"
|
||||
MODULE_FIRMWARE(I915_SKL_GUC_UCODE);
|
||||
|
||||
/* User-friendly representation of an enum */
|
||||
@ -81,14 +81,14 @@ const char *intel_guc_fw_status_repr(enum intel_guc_fw_status status)
|
||||
|
||||
static void direct_interrupts_to_host(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct intel_engine_cs *ring;
|
||||
int i, irqs;
|
||||
struct intel_engine_cs *engine;
|
||||
int irqs;
|
||||
|
||||
/* tell all command streamers NOT to forward interrupts and vblank to GuC */
|
||||
irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_NEVER);
|
||||
irqs |= _MASKED_BIT_DISABLE(GFX_INTERRUPT_STEERING);
|
||||
for_each_ring(ring, dev_priv, i)
|
||||
I915_WRITE(RING_MODE_GEN7(ring), irqs);
|
||||
for_each_engine(engine, dev_priv)
|
||||
I915_WRITE(RING_MODE_GEN7(engine), irqs);
|
||||
|
||||
/* route all GT interrupts to the host */
|
||||
I915_WRITE(GUC_BCS_RCS_IER, 0);
|
||||
@ -98,14 +98,14 @@ static void direct_interrupts_to_host(struct drm_i915_private *dev_priv)
|
||||
|
||||
static void direct_interrupts_to_guc(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct intel_engine_cs *ring;
|
||||
int i, irqs;
|
||||
struct intel_engine_cs *engine;
|
||||
int irqs;
|
||||
|
||||
/* tell all command streamers to forward interrupts and vblank to GuC */
|
||||
irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_ALWAYS);
|
||||
irqs |= _MASKED_BIT_ENABLE(GFX_INTERRUPT_STEERING);
|
||||
for_each_ring(ring, dev_priv, i)
|
||||
I915_WRITE(RING_MODE_GEN7(ring), irqs);
|
||||
for_each_engine(engine, dev_priv)
|
||||
I915_WRITE(RING_MODE_GEN7(engine), irqs);
|
||||
|
||||
/* route USER_INTERRUPT to Host, all others are sent to GuC. */
|
||||
irqs = GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
|
||||
@ -595,8 +595,8 @@ void intel_guc_ucode_init(struct drm_device *dev)
|
||||
fw_path = NULL;
|
||||
} else if (IS_SKYLAKE(dev)) {
|
||||
fw_path = I915_SKL_GUC_UCODE;
|
||||
guc_fw->guc_fw_major_wanted = 4;
|
||||
guc_fw->guc_fw_minor_wanted = 3;
|
||||
guc_fw->guc_fw_major_wanted = 6;
|
||||
guc_fw->guc_fw_minor_wanted = 1;
|
||||
} else {
|
||||
i915.enable_guc_submission = false;
|
||||
fw_path = ""; /* unknown device */
|
||||
|
@ -952,9 +952,6 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
|
||||
if (pipe_config->pixel_multiplier)
|
||||
dotclock /= pipe_config->pixel_multiplier;
|
||||
|
||||
if (HAS_PCH_SPLIT(dev_priv->dev))
|
||||
ironlake_check_encoder_dotclock(pipe_config, dotclock);
|
||||
|
||||
pipe_config->base.adjusted_mode.crtc_clock = dotclock;
|
||||
}
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -57,8 +57,8 @@
|
||||
/* Logical Rings */
|
||||
int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request);
|
||||
int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request);
|
||||
void intel_logical_ring_stop(struct intel_engine_cs *ring);
|
||||
void intel_logical_ring_cleanup(struct intel_engine_cs *ring);
|
||||
void intel_logical_ring_stop(struct intel_engine_cs *engine);
|
||||
void intel_logical_ring_cleanup(struct intel_engine_cs *engine);
|
||||
int intel_logical_rings_init(struct drm_device *dev);
|
||||
int intel_logical_ring_begin(struct drm_i915_gem_request *req, int num_dwords);
|
||||
|
||||
@ -98,18 +98,18 @@ static inline void intel_logical_ring_emit_reg(struct intel_ringbuffer *ringbuf,
|
||||
#define LRC_STATE_PN (LRC_PPHWSP_PN + 1)
|
||||
|
||||
void intel_lr_context_free(struct intel_context *ctx);
|
||||
uint32_t intel_lr_context_size(struct intel_engine_cs *ring);
|
||||
uint32_t intel_lr_context_size(struct intel_engine_cs *engine);
|
||||
int intel_lr_context_deferred_alloc(struct intel_context *ctx,
|
||||
struct intel_engine_cs *ring);
|
||||
struct intel_engine_cs *engine);
|
||||
void intel_lr_context_unpin(struct intel_context *ctx,
|
||||
struct intel_engine_cs *engine);
|
||||
void intel_lr_context_reset(struct drm_device *dev,
|
||||
struct intel_context *ctx);
|
||||
uint64_t intel_lr_context_descriptor(struct intel_context *ctx,
|
||||
struct intel_engine_cs *ring);
|
||||
struct intel_engine_cs *engine);
|
||||
|
||||
u32 intel_execlists_ctx_id(struct intel_context *ctx,
|
||||
struct intel_engine_cs *ring);
|
||||
struct intel_engine_cs *engine);
|
||||
|
||||
/* Execlists */
|
||||
int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists);
|
||||
@ -118,7 +118,7 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
|
||||
struct drm_i915_gem_execbuffer2 *args,
|
||||
struct list_head *vmas);
|
||||
|
||||
void intel_lrc_irq_handler(struct intel_engine_cs *ring);
|
||||
void intel_execlists_retire_requests(struct intel_engine_cs *ring);
|
||||
void intel_lrc_irq_handler(struct intel_engine_cs *engine);
|
||||
void intel_execlists_retire_requests(struct intel_engine_cs *engine);
|
||||
|
||||
#endif /* _INTEL_LRC_H_ */
|
||||
|
@ -109,7 +109,6 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
|
||||
u32 tmp, flags = 0;
|
||||
int dotclock;
|
||||
|
||||
tmp = I915_READ(lvds_encoder->reg);
|
||||
if (tmp & LVDS_HSYNC_POLARITY)
|
||||
@ -130,12 +129,7 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
|
||||
pipe_config->gmch_pfit.control |= tmp & PANEL_8TO6_DITHER_ENABLE;
|
||||
}
|
||||
|
||||
dotclock = pipe_config->port_clock;
|
||||
|
||||
if (HAS_PCH_SPLIT(dev_priv->dev))
|
||||
ironlake_check_encoder_dotclock(pipe_config, dotclock);
|
||||
|
||||
pipe_config->base.adjusted_mode.crtc_clock = dotclock;
|
||||
pipe_config->base.adjusted_mode.crtc_clock = pipe_config->port_clock;
|
||||
}
|
||||
|
||||
static void intel_pre_enable_lvds(struct intel_encoder *encoder)
|
||||
@ -151,7 +145,7 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder)
|
||||
if (HAS_PCH_SPLIT(dev)) {
|
||||
assert_fdi_rx_pll_disabled(dev_priv, pipe);
|
||||
assert_shared_dpll_disabled(dev_priv,
|
||||
intel_crtc_to_shared_dpll(crtc));
|
||||
crtc->config->shared_dpll);
|
||||
} else {
|
||||
assert_pll_disabled(dev_priv, pipe);
|
||||
}
|
||||
@ -781,57 +775,6 @@ static const struct dmi_system_id intel_no_lvds[] = {
|
||||
{ } /* terminating entry */
|
||||
};
|
||||
|
||||
/*
|
||||
* Enumerate the child dev array parsed from VBT to check whether
|
||||
* the LVDS is present.
|
||||
* If it is present, return 1.
|
||||
* If it is not present, return false.
|
||||
* If no child dev is parsed from VBT, it assumes that the LVDS is present.
|
||||
*/
|
||||
static bool lvds_is_present_in_vbt(struct drm_device *dev,
|
||||
u8 *i2c_pin)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int i;
|
||||
|
||||
if (!dev_priv->vbt.child_dev_num)
|
||||
return true;
|
||||
|
||||
for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
|
||||
union child_device_config *uchild = dev_priv->vbt.child_dev + i;
|
||||
struct old_child_dev_config *child = &uchild->old;
|
||||
|
||||
/* If the device type is not LFP, continue.
|
||||
* We have to check both the new identifiers as well as the
|
||||
* old for compatibility with some BIOSes.
|
||||
*/
|
||||
if (child->device_type != DEVICE_TYPE_INT_LFP &&
|
||||
child->device_type != DEVICE_TYPE_LFP)
|
||||
continue;
|
||||
|
||||
if (intel_gmbus_is_valid_pin(dev_priv, child->i2c_pin))
|
||||
*i2c_pin = child->i2c_pin;
|
||||
|
||||
/* However, we cannot trust the BIOS writers to populate
|
||||
* the VBT correctly. Since LVDS requires additional
|
||||
* information from AIM blocks, a non-zero addin offset is
|
||||
* a good indicator that the LVDS is actually present.
|
||||
*/
|
||||
if (child->addin_offset)
|
||||
return true;
|
||||
|
||||
/* But even then some BIOS writers perform some black magic
|
||||
* and instantiate the device without reference to any
|
||||
* additional data. Trust that if the VBT was written into
|
||||
* the OpRegion then they have validated the LVDS's existence.
|
||||
*/
|
||||
if (dev_priv->opregion.vbt)
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static int intel_dual_link_lvds_callback(const struct dmi_system_id *id)
|
||||
{
|
||||
DRM_INFO("Forcing lvds to dual link mode on %s\n", id->ident);
|
||||
@ -981,14 +924,14 @@ void intel_lvds_init(struct drm_device *dev)
|
||||
if (HAS_PCH_SPLIT(dev)) {
|
||||
if ((lvds & LVDS_DETECTED) == 0)
|
||||
return;
|
||||
if (dev_priv->vbt.edp_support) {
|
||||
if (dev_priv->vbt.edp.support) {
|
||||
DRM_DEBUG_KMS("disable LVDS for eDP support\n");
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
pin = GMBUS_PIN_PANEL;
|
||||
if (!lvds_is_present_in_vbt(dev, &pin)) {
|
||||
if (!intel_bios_is_lvds_present(dev_priv, &pin)) {
|
||||
if ((lvds & LVDS_PORT_EN) == 0) {
|
||||
DRM_DEBUG_KMS("LVDS is not present in VBT\n");
|
||||
return;
|
||||
|
@ -159,7 +159,7 @@ static bool get_mocs_settings(struct drm_device *dev,
|
||||
return result;
|
||||
}
|
||||
|
||||
static i915_reg_t mocs_register(enum intel_ring_id ring, int index)
|
||||
static i915_reg_t mocs_register(enum intel_engine_id ring, int index)
|
||||
{
|
||||
switch (ring) {
|
||||
case RCS:
|
||||
@ -191,7 +191,7 @@ static i915_reg_t mocs_register(enum intel_ring_id ring, int index)
|
||||
*/
|
||||
static int emit_mocs_control_table(struct drm_i915_gem_request *req,
|
||||
const struct drm_i915_mocs_table *table,
|
||||
enum intel_ring_id ring)
|
||||
enum intel_engine_id ring)
|
||||
{
|
||||
struct intel_ringbuffer *ringbuf = req->ringbuf;
|
||||
unsigned int index;
|
||||
@ -322,14 +322,14 @@ int intel_rcs_context_init_mocs(struct drm_i915_gem_request *req)
|
||||
struct drm_i915_mocs_table t;
|
||||
int ret;
|
||||
|
||||
if (get_mocs_settings(req->ring->dev, &t)) {
|
||||
if (get_mocs_settings(req->engine->dev, &t)) {
|
||||
struct drm_i915_private *dev_priv = req->i915;
|
||||
struct intel_engine_cs *ring;
|
||||
enum intel_ring_id ring_id;
|
||||
struct intel_engine_cs *engine;
|
||||
enum intel_engine_id id;
|
||||
|
||||
/* Program the control registers */
|
||||
for_each_ring(ring, dev_priv, ring_id) {
|
||||
ret = emit_mocs_control_table(req, &t, ring_id);
|
||||
for_each_engine_id(engine, dev_priv, id) {
|
||||
ret = emit_mocs_control_table(req, &t, id);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
@ -196,7 +196,7 @@ intel_overlay_map_regs(struct intel_overlay *overlay)
|
||||
if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
|
||||
regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_handle->vaddr;
|
||||
else
|
||||
regs = io_mapping_map_wc(dev_priv->gtt.mappable,
|
||||
regs = io_mapping_map_wc(dev_priv->ggtt.mappable,
|
||||
i915_gem_obj_ggtt_offset(overlay->reg_bo));
|
||||
|
||||
return regs;
|
||||
@ -233,14 +233,14 @@ static int intel_overlay_on(struct intel_overlay *overlay)
|
||||
{
|
||||
struct drm_device *dev = overlay->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_engine_cs *ring = &dev_priv->ring[RCS];
|
||||
struct intel_engine_cs *engine = &dev_priv->engine[RCS];
|
||||
struct drm_i915_gem_request *req;
|
||||
int ret;
|
||||
|
||||
WARN_ON(overlay->active);
|
||||
WARN_ON(IS_I830(dev) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE));
|
||||
|
||||
req = i915_gem_request_alloc(ring, NULL);
|
||||
req = i915_gem_request_alloc(engine, NULL);
|
||||
if (IS_ERR(req))
|
||||
return PTR_ERR(req);
|
||||
|
||||
@ -252,11 +252,11 @@ static int intel_overlay_on(struct intel_overlay *overlay)
|
||||
|
||||
overlay->active = true;
|
||||
|
||||
intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_ON);
|
||||
intel_ring_emit(ring, overlay->flip_addr | OFC_UPDATE);
|
||||
intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
|
||||
intel_ring_emit(ring, MI_NOOP);
|
||||
intel_ring_advance(ring);
|
||||
intel_ring_emit(engine, MI_OVERLAY_FLIP | MI_OVERLAY_ON);
|
||||
intel_ring_emit(engine, overlay->flip_addr | OFC_UPDATE);
|
||||
intel_ring_emit(engine, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
|
||||
intel_ring_emit(engine, MI_NOOP);
|
||||
intel_ring_advance(engine);
|
||||
|
||||
return intel_overlay_do_wait_request(overlay, req, NULL);
|
||||
}
|
||||
@ -267,7 +267,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
|
||||
{
|
||||
struct drm_device *dev = overlay->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_engine_cs *ring = &dev_priv->ring[RCS];
|
||||
struct intel_engine_cs *engine = &dev_priv->engine[RCS];
|
||||
struct drm_i915_gem_request *req;
|
||||
u32 flip_addr = overlay->flip_addr;
|
||||
u32 tmp;
|
||||
@ -283,7 +283,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
|
||||
if (tmp & (1 << 17))
|
||||
DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp);
|
||||
|
||||
req = i915_gem_request_alloc(ring, NULL);
|
||||
req = i915_gem_request_alloc(engine, NULL);
|
||||
if (IS_ERR(req))
|
||||
return PTR_ERR(req);
|
||||
|
||||
@ -293,9 +293,9 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
|
||||
return ret;
|
||||
}
|
||||
|
||||
intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
|
||||
intel_ring_emit(ring, flip_addr);
|
||||
intel_ring_advance(ring);
|
||||
intel_ring_emit(engine, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
|
||||
intel_ring_emit(engine, flip_addr);
|
||||
intel_ring_advance(engine);
|
||||
|
||||
WARN_ON(overlay->last_flip_req);
|
||||
i915_gem_request_assign(&overlay->last_flip_req, req);
|
||||
@ -336,7 +336,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
|
||||
{
|
||||
struct drm_device *dev = overlay->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_engine_cs *ring = &dev_priv->ring[RCS];
|
||||
struct intel_engine_cs *engine = &dev_priv->engine[RCS];
|
||||
struct drm_i915_gem_request *req;
|
||||
u32 flip_addr = overlay->flip_addr;
|
||||
int ret;
|
||||
@ -349,7 +349,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
|
||||
* of the hw. Do it in both cases */
|
||||
flip_addr |= OFC_UPDATE;
|
||||
|
||||
req = i915_gem_request_alloc(ring, NULL);
|
||||
req = i915_gem_request_alloc(engine, NULL);
|
||||
if (IS_ERR(req))
|
||||
return PTR_ERR(req);
|
||||
|
||||
@ -360,22 +360,23 @@ static int intel_overlay_off(struct intel_overlay *overlay)
|
||||
}
|
||||
|
||||
/* wait for overlay to go idle */
|
||||
intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
|
||||
intel_ring_emit(ring, flip_addr);
|
||||
intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
|
||||
intel_ring_emit(engine, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
|
||||
intel_ring_emit(engine, flip_addr);
|
||||
intel_ring_emit(engine, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
|
||||
/* turn overlay off */
|
||||
if (IS_I830(dev)) {
|
||||
/* Workaround: Don't disable the overlay fully, since otherwise
|
||||
* it dies on the next OVERLAY_ON cmd. */
|
||||
intel_ring_emit(ring, MI_NOOP);
|
||||
intel_ring_emit(ring, MI_NOOP);
|
||||
intel_ring_emit(ring, MI_NOOP);
|
||||
intel_ring_emit(engine, MI_NOOP);
|
||||
intel_ring_emit(engine, MI_NOOP);
|
||||
intel_ring_emit(engine, MI_NOOP);
|
||||
} else {
|
||||
intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
|
||||
intel_ring_emit(ring, flip_addr);
|
||||
intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
|
||||
intel_ring_emit(engine, MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
|
||||
intel_ring_emit(engine, flip_addr);
|
||||
intel_ring_emit(engine,
|
||||
MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
|
||||
}
|
||||
intel_ring_advance(ring);
|
||||
intel_ring_advance(engine);
|
||||
|
||||
return intel_overlay_do_wait_request(overlay, req, intel_overlay_off_tail);
|
||||
}
|
||||
@ -408,7 +409,7 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
|
||||
{
|
||||
struct drm_device *dev = overlay->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_engine_cs *ring = &dev_priv->ring[RCS];
|
||||
struct intel_engine_cs *engine = &dev_priv->engine[RCS];
|
||||
int ret;
|
||||
|
||||
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
|
||||
@ -423,7 +424,7 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
|
||||
/* synchronous slowpath */
|
||||
struct drm_i915_gem_request *req;
|
||||
|
||||
req = i915_gem_request_alloc(ring, NULL);
|
||||
req = i915_gem_request_alloc(engine, NULL);
|
||||
if (IS_ERR(req))
|
||||
return PTR_ERR(req);
|
||||
|
||||
@ -433,9 +434,10 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
|
||||
return ret;
|
||||
}
|
||||
|
||||
intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
|
||||
intel_ring_emit(ring, MI_NOOP);
|
||||
intel_ring_advance(ring);
|
||||
intel_ring_emit(engine,
|
||||
MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
|
||||
intel_ring_emit(engine, MI_NOOP);
|
||||
intel_ring_advance(engine);
|
||||
|
||||
ret = intel_overlay_do_wait_request(overlay, req,
|
||||
intel_overlay_release_old_vid_tail);
|
||||
@ -1488,7 +1490,7 @@ intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
|
||||
regs = (struct overlay_registers __iomem *)
|
||||
overlay->reg_bo->phys_handle->vaddr;
|
||||
else
|
||||
regs = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
|
||||
regs = io_mapping_map_atomic_wc(dev_priv->ggtt.mappable,
|
||||
i915_gem_obj_ggtt_offset(overlay->reg_bo));
|
||||
|
||||
return regs;
|
||||
|
@ -1240,7 +1240,7 @@ static void intel_backlight_device_unregister(struct intel_connector *connector)
|
||||
*/
|
||||
static u32 bxt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
|
||||
{
|
||||
return KHz(19200) / pwm_freq_hz;
|
||||
return DIV_ROUND_CLOSEST(KHz(19200), pwm_freq_hz);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1251,16 +1251,14 @@ static u32 bxt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
|
||||
static u32 spt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
|
||||
u32 mul, clock;
|
||||
u32 mul;
|
||||
|
||||
if (I915_READ(SOUTH_CHICKEN1) & SPT_PWM_GRANULARITY)
|
||||
mul = 128;
|
||||
else
|
||||
mul = 16;
|
||||
|
||||
clock = MHz(24);
|
||||
|
||||
return clock / (pwm_freq_hz * mul);
|
||||
return DIV_ROUND_CLOSEST(MHz(24), pwm_freq_hz * mul);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1283,7 +1281,7 @@ static u32 lpt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
|
||||
else
|
||||
clock = MHz(24); /* LPT:LP */
|
||||
|
||||
return clock / (pwm_freq_hz * mul);
|
||||
return DIV_ROUND_CLOSEST(clock, pwm_freq_hz * mul);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1292,10 +1290,9 @@ static u32 lpt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
|
||||
*/
|
||||
static u32 pch_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
|
||||
{
|
||||
struct drm_device *dev = connector->base.dev;
|
||||
int clock = MHz(intel_pch_rawclk(dev));
|
||||
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
|
||||
|
||||
return clock / (pwm_freq_hz * 128);
|
||||
return DIV_ROUND_CLOSEST(KHz(dev_priv->rawclk_freq), pwm_freq_hz * 128);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1308,16 +1305,15 @@ static u32 pch_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
|
||||
*/
|
||||
static u32 i9xx_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
|
||||
{
|
||||
struct drm_device *dev = connector->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
|
||||
int clock;
|
||||
|
||||
if (IS_PINEVIEW(dev))
|
||||
clock = MHz(intel_hrawclk(dev));
|
||||
if (IS_PINEVIEW(dev_priv))
|
||||
clock = KHz(dev_priv->rawclk_freq);
|
||||
else
|
||||
clock = 1000 * dev_priv->cdclk_freq;
|
||||
clock = KHz(dev_priv->cdclk_freq);
|
||||
|
||||
return clock / (pwm_freq_hz * 32);
|
||||
return DIV_ROUND_CLOSEST(clock, pwm_freq_hz * 32);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1332,11 +1328,11 @@ static u32 i965_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
|
||||
int clock;
|
||||
|
||||
if (IS_G4X(dev_priv))
|
||||
clock = MHz(intel_hrawclk(dev));
|
||||
clock = KHz(dev_priv->rawclk_freq);
|
||||
else
|
||||
clock = 1000 * dev_priv->cdclk_freq;
|
||||
clock = KHz(dev_priv->cdclk_freq);
|
||||
|
||||
return clock / (pwm_freq_hz * 128);
|
||||
return DIV_ROUND_CLOSEST(clock, pwm_freq_hz * 128);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1346,19 +1342,21 @@ static u32 i965_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
|
||||
*/
|
||||
static u32 vlv_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
|
||||
{
|
||||
struct drm_device *dev = connector->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int clock;
|
||||
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
|
||||
int mul, clock;
|
||||
|
||||
if ((I915_READ(CBR1_VLV) & CBR_PWM_CLOCK_MUX_SELECT) == 0) {
|
||||
if (IS_CHERRYVIEW(dev))
|
||||
return KHz(19200) / (pwm_freq_hz * 16);
|
||||
if (IS_CHERRYVIEW(dev_priv))
|
||||
clock = KHz(19200);
|
||||
else
|
||||
return MHz(25) / (pwm_freq_hz * 16);
|
||||
clock = MHz(25);
|
||||
mul = 16;
|
||||
} else {
|
||||
clock = intel_hrawclk(dev);
|
||||
return MHz(clock) / (pwm_freq_hz * 128);
|
||||
clock = KHz(dev_priv->rawclk_freq);
|
||||
mul = 128;
|
||||
}
|
||||
|
||||
return DIV_ROUND_CLOSEST(clock, pwm_freq_hz * mul);
|
||||
}
|
||||
|
||||
static u32 get_backlight_max_vbt(struct intel_connector *connector)
|
||||
@ -1745,7 +1743,7 @@ intel_panel_init_backlight_funcs(struct intel_panel *panel)
|
||||
panel->backlight.get = pch_get_backlight;
|
||||
panel->backlight.hz_to_pwm = pch_hz_to_pwm;
|
||||
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
|
||||
if (dev_priv->vbt.has_mipi) {
|
||||
if (connector->base.connector_type == DRM_MODE_CONNECTOR_DSI) {
|
||||
panel->backlight.setup = pwm_setup_backlight;
|
||||
panel->backlight.enable = pwm_enable_backlight;
|
||||
panel->backlight.disable = pwm_disable_backlight;
|
||||
|
@ -487,20 +487,6 @@ static const struct intel_watermark_params g4x_cursor_wm_info = {
|
||||
.guard_size = 2,
|
||||
.cacheline_size = G4X_FIFO_LINE_SIZE,
|
||||
};
|
||||
static const struct intel_watermark_params valleyview_wm_info = {
|
||||
.fifo_size = VALLEYVIEW_FIFO_SIZE,
|
||||
.max_wm = VALLEYVIEW_MAX_WM,
|
||||
.default_wm = VALLEYVIEW_MAX_WM,
|
||||
.guard_size = 2,
|
||||
.cacheline_size = G4X_FIFO_LINE_SIZE,
|
||||
};
|
||||
static const struct intel_watermark_params valleyview_cursor_wm_info = {
|
||||
.fifo_size = I965_CURSOR_FIFO,
|
||||
.max_wm = VALLEYVIEW_CURSOR_MAX_WM,
|
||||
.default_wm = I965_CURSOR_DFT_WM,
|
||||
.guard_size = 2,
|
||||
.cacheline_size = G4X_FIFO_LINE_SIZE,
|
||||
};
|
||||
static const struct intel_watermark_params i965_cursor_wm_info = {
|
||||
.fifo_size = I965_CURSOR_FIFO,
|
||||
.max_wm = I965_CURSOR_MAX_WM,
|
||||
@ -2010,11 +1996,18 @@ static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
|
||||
cur_latency *= 5;
|
||||
}
|
||||
|
||||
result->pri_val = ilk_compute_pri_wm(cstate, pristate,
|
||||
pri_latency, level);
|
||||
result->spr_val = ilk_compute_spr_wm(cstate, sprstate, spr_latency);
|
||||
result->cur_val = ilk_compute_cur_wm(cstate, curstate, cur_latency);
|
||||
result->fbc_val = ilk_compute_fbc_wm(cstate, pristate, result->pri_val);
|
||||
if (pristate) {
|
||||
result->pri_val = ilk_compute_pri_wm(cstate, pristate,
|
||||
pri_latency, level);
|
||||
result->fbc_val = ilk_compute_fbc_wm(cstate, pristate, result->pri_val);
|
||||
}
|
||||
|
||||
if (sprstate)
|
||||
result->spr_val = ilk_compute_spr_wm(cstate, sprstate, spr_latency);
|
||||
|
||||
if (curstate)
|
||||
result->cur_val = ilk_compute_cur_wm(cstate, curstate, cur_latency);
|
||||
|
||||
result->enable = true;
|
||||
}
|
||||
|
||||
@ -2278,99 +2271,170 @@ static void skl_setup_wm_latency(struct drm_device *dev)
|
||||
intel_print_wm_latency(dev, "Gen9 Plane", dev_priv->wm.skl_latency);
|
||||
}
|
||||
|
||||
/* Compute new watermarks for the pipe */
|
||||
static int ilk_compute_pipe_wm(struct intel_crtc *intel_crtc,
|
||||
struct drm_atomic_state *state)
|
||||
static bool ilk_validate_pipe_wm(struct drm_device *dev,
|
||||
struct intel_pipe_wm *pipe_wm)
|
||||
{
|
||||
struct intel_pipe_wm *pipe_wm;
|
||||
struct drm_device *dev = intel_crtc->base.dev;
|
||||
const struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc_state *cstate = NULL;
|
||||
struct intel_plane *intel_plane;
|
||||
struct drm_plane_state *ps;
|
||||
struct intel_plane_state *pristate = NULL;
|
||||
struct intel_plane_state *sprstate = NULL;
|
||||
struct intel_plane_state *curstate = NULL;
|
||||
int level, max_level = ilk_wm_max_level(dev);
|
||||
/* LP0 watermark maximums depend on this pipe alone */
|
||||
struct intel_wm_config config = {
|
||||
const struct intel_wm_config config = {
|
||||
.num_pipes_active = 1,
|
||||
.sprites_enabled = pipe_wm->sprites_enabled,
|
||||
.sprites_scaled = pipe_wm->sprites_scaled,
|
||||
};
|
||||
struct ilk_wm_maximums max;
|
||||
|
||||
cstate = intel_atomic_get_crtc_state(state, intel_crtc);
|
||||
if (IS_ERR(cstate))
|
||||
return PTR_ERR(cstate);
|
||||
|
||||
pipe_wm = &cstate->wm.optimal.ilk;
|
||||
memset(pipe_wm, 0, sizeof(*pipe_wm));
|
||||
|
||||
for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
|
||||
ps = drm_atomic_get_plane_state(state,
|
||||
&intel_plane->base);
|
||||
if (IS_ERR(ps))
|
||||
return PTR_ERR(ps);
|
||||
|
||||
if (intel_plane->base.type == DRM_PLANE_TYPE_PRIMARY)
|
||||
pristate = to_intel_plane_state(ps);
|
||||
else if (intel_plane->base.type == DRM_PLANE_TYPE_OVERLAY)
|
||||
sprstate = to_intel_plane_state(ps);
|
||||
else if (intel_plane->base.type == DRM_PLANE_TYPE_CURSOR)
|
||||
curstate = to_intel_plane_state(ps);
|
||||
}
|
||||
|
||||
config.sprites_enabled = sprstate->visible;
|
||||
config.sprites_scaled = sprstate->visible &&
|
||||
(drm_rect_width(&sprstate->dst) != drm_rect_width(&sprstate->src) >> 16 ||
|
||||
drm_rect_height(&sprstate->dst) != drm_rect_height(&sprstate->src) >> 16);
|
||||
|
||||
pipe_wm->pipe_enabled = cstate->base.active;
|
||||
pipe_wm->sprites_enabled = config.sprites_enabled;
|
||||
pipe_wm->sprites_scaled = config.sprites_scaled;
|
||||
|
||||
/* ILK/SNB: LP2+ watermarks only w/o sprites */
|
||||
if (INTEL_INFO(dev)->gen <= 6 && sprstate->visible)
|
||||
max_level = 1;
|
||||
|
||||
/* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */
|
||||
if (config.sprites_scaled)
|
||||
max_level = 0;
|
||||
|
||||
ilk_compute_wm_level(dev_priv, intel_crtc, 0, cstate,
|
||||
pristate, sprstate, curstate, &pipe_wm->wm[0]);
|
||||
|
||||
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
|
||||
pipe_wm->linetime = hsw_compute_linetime_wm(dev, cstate);
|
||||
|
||||
/* LP0 watermarks always use 1/2 DDB partitioning */
|
||||
ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max);
|
||||
|
||||
/* At least LP0 must be valid */
|
||||
if (!ilk_validate_wm_level(0, &max, &pipe_wm->wm[0]))
|
||||
if (!ilk_validate_wm_level(0, &max, &pipe_wm->wm[0])) {
|
||||
DRM_DEBUG_KMS("LP0 watermark invalid\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Compute new watermarks for the pipe */
|
||||
static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
|
||||
{
|
||||
struct drm_atomic_state *state = cstate->base.state;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
|
||||
struct intel_pipe_wm *pipe_wm;
|
||||
struct drm_device *dev = state->dev;
|
||||
const struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_plane *intel_plane;
|
||||
struct intel_plane_state *pristate = NULL;
|
||||
struct intel_plane_state *sprstate = NULL;
|
||||
struct intel_plane_state *curstate = NULL;
|
||||
int level, max_level = ilk_wm_max_level(dev), usable_level;
|
||||
struct ilk_wm_maximums max;
|
||||
|
||||
pipe_wm = &cstate->wm.optimal.ilk;
|
||||
|
||||
for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
|
||||
struct intel_plane_state *ps;
|
||||
|
||||
ps = intel_atomic_get_existing_plane_state(state,
|
||||
intel_plane);
|
||||
if (!ps)
|
||||
continue;
|
||||
|
||||
if (intel_plane->base.type == DRM_PLANE_TYPE_PRIMARY)
|
||||
pristate = ps;
|
||||
else if (intel_plane->base.type == DRM_PLANE_TYPE_OVERLAY)
|
||||
sprstate = ps;
|
||||
else if (intel_plane->base.type == DRM_PLANE_TYPE_CURSOR)
|
||||
curstate = ps;
|
||||
}
|
||||
|
||||
pipe_wm->pipe_enabled = cstate->base.active;
|
||||
if (sprstate) {
|
||||
pipe_wm->sprites_enabled = sprstate->visible;
|
||||
pipe_wm->sprites_scaled = sprstate->visible &&
|
||||
(drm_rect_width(&sprstate->dst) != drm_rect_width(&sprstate->src) >> 16 ||
|
||||
drm_rect_height(&sprstate->dst) != drm_rect_height(&sprstate->src) >> 16);
|
||||
}
|
||||
|
||||
usable_level = max_level;
|
||||
|
||||
/* ILK/SNB: LP2+ watermarks only w/o sprites */
|
||||
if (INTEL_INFO(dev)->gen <= 6 && pipe_wm->sprites_enabled)
|
||||
usable_level = 1;
|
||||
|
||||
/* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */
|
||||
if (pipe_wm->sprites_scaled)
|
||||
usable_level = 0;
|
||||
|
||||
ilk_compute_wm_level(dev_priv, intel_crtc, 0, cstate,
|
||||
pristate, sprstate, curstate, &pipe_wm->raw_wm[0]);
|
||||
|
||||
memset(&pipe_wm->wm, 0, sizeof(pipe_wm->wm));
|
||||
pipe_wm->wm[0] = pipe_wm->raw_wm[0];
|
||||
|
||||
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
|
||||
pipe_wm->linetime = hsw_compute_linetime_wm(dev, cstate);
|
||||
|
||||
if (!ilk_validate_pipe_wm(dev, pipe_wm))
|
||||
return -EINVAL;
|
||||
|
||||
ilk_compute_wm_reg_maximums(dev, 1, &max);
|
||||
|
||||
for (level = 1; level <= max_level; level++) {
|
||||
struct intel_wm_level wm = {};
|
||||
struct intel_wm_level *wm = &pipe_wm->raw_wm[level];
|
||||
|
||||
ilk_compute_wm_level(dev_priv, intel_crtc, level, cstate,
|
||||
pristate, sprstate, curstate, &wm);
|
||||
pristate, sprstate, curstate, wm);
|
||||
|
||||
/*
|
||||
* Disable any watermark level that exceeds the
|
||||
* register maximums since such watermarks are
|
||||
* always invalid.
|
||||
*/
|
||||
if (!ilk_validate_wm_level(level, &max, &wm))
|
||||
break;
|
||||
if (level > usable_level)
|
||||
continue;
|
||||
|
||||
pipe_wm->wm[level] = wm;
|
||||
if (ilk_validate_wm_level(level, &max, wm))
|
||||
pipe_wm->wm[level] = *wm;
|
||||
else
|
||||
usable_level = level;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Build a set of 'intermediate' watermark values that satisfy both the old
|
||||
* state and the new state. These can be programmed to the hardware
|
||||
* immediately.
|
||||
*/
|
||||
static int ilk_compute_intermediate_wm(struct drm_device *dev,
|
||||
struct intel_crtc *intel_crtc,
|
||||
struct intel_crtc_state *newstate)
|
||||
{
|
||||
struct intel_pipe_wm *a = &newstate->wm.intermediate;
|
||||
struct intel_pipe_wm *b = &intel_crtc->wm.active.ilk;
|
||||
int level, max_level = ilk_wm_max_level(dev);
|
||||
|
||||
/*
|
||||
* Start with the final, target watermarks, then combine with the
|
||||
* currently active watermarks to get values that are safe both before
|
||||
* and after the vblank.
|
||||
*/
|
||||
*a = newstate->wm.optimal.ilk;
|
||||
a->pipe_enabled |= b->pipe_enabled;
|
||||
a->sprites_enabled |= b->sprites_enabled;
|
||||
a->sprites_scaled |= b->sprites_scaled;
|
||||
|
||||
for (level = 0; level <= max_level; level++) {
|
||||
struct intel_wm_level *a_wm = &a->wm[level];
|
||||
const struct intel_wm_level *b_wm = &b->wm[level];
|
||||
|
||||
a_wm->enable &= b_wm->enable;
|
||||
a_wm->pri_val = max(a_wm->pri_val, b_wm->pri_val);
|
||||
a_wm->spr_val = max(a_wm->spr_val, b_wm->spr_val);
|
||||
a_wm->cur_val = max(a_wm->cur_val, b_wm->cur_val);
|
||||
a_wm->fbc_val = max(a_wm->fbc_val, b_wm->fbc_val);
|
||||
}
|
||||
|
||||
/*
|
||||
* We need to make sure that these merged watermark values are
|
||||
* actually a valid configuration themselves. If they're not,
|
||||
* there's no safe way to transition from the old state to
|
||||
* the new state, so we need to fail the atomic transaction.
|
||||
*/
|
||||
if (!ilk_validate_pipe_wm(dev, a))
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* If our intermediate WM are identical to the final WM, then we can
|
||||
* omit the post-vblank programming; only update if it's different.
|
||||
*/
|
||||
if (memcmp(a, &newstate->wm.optimal.ilk, sizeof(*a)) == 0)
|
||||
newstate->wm.need_postvbl_update = false;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Merge the watermarks from all active pipes for a specific level.
|
||||
*/
|
||||
@ -2383,9 +2447,7 @@ static void ilk_merge_wm_level(struct drm_device *dev,
|
||||
ret_wm->enable = true;
|
||||
|
||||
for_each_intel_crtc(dev, intel_crtc) {
|
||||
const struct intel_crtc_state *cstate =
|
||||
to_intel_crtc_state(intel_crtc->base.state);
|
||||
const struct intel_pipe_wm *active = &cstate->wm.optimal.ilk;
|
||||
const struct intel_pipe_wm *active = &intel_crtc->wm.active.ilk;
|
||||
const struct intel_wm_level *wm = &active->wm[level];
|
||||
|
||||
if (!active->pipe_enabled)
|
||||
@ -2533,15 +2595,14 @@ static void ilk_compute_wm_results(struct drm_device *dev,
|
||||
|
||||
/* LP0 register values */
|
||||
for_each_intel_crtc(dev, intel_crtc) {
|
||||
const struct intel_crtc_state *cstate =
|
||||
to_intel_crtc_state(intel_crtc->base.state);
|
||||
enum pipe pipe = intel_crtc->pipe;
|
||||
const struct intel_wm_level *r = &cstate->wm.optimal.ilk.wm[0];
|
||||
const struct intel_wm_level *r =
|
||||
&intel_crtc->wm.active.ilk.wm[0];
|
||||
|
||||
if (WARN_ON(!r->enable))
|
||||
continue;
|
||||
|
||||
results->wm_linetime[pipe] = cstate->wm.optimal.ilk.linetime;
|
||||
results->wm_linetime[pipe] = intel_crtc->wm.active.ilk.linetime;
|
||||
|
||||
results->wm_pipe[pipe] =
|
||||
(r->pri_val << WM0_PIPE_PLANE_SHIFT) |
|
||||
@ -2748,7 +2809,7 @@ static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
|
||||
dev_priv->wm.hw = *results;
|
||||
}
|
||||
|
||||
static bool ilk_disable_lp_wm(struct drm_device *dev)
|
||||
bool ilk_disable_lp_wm(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
@ -3643,11 +3704,9 @@ static void ilk_compute_wm_config(struct drm_device *dev,
|
||||
}
|
||||
}
|
||||
|
||||
static void ilk_program_watermarks(struct intel_crtc_state *cstate)
|
||||
static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_crtc *crtc = cstate->base.crtc;
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
|
||||
struct ilk_wm_maximums max;
|
||||
struct intel_wm_config config = {};
|
||||
@ -3678,28 +3737,28 @@ static void ilk_program_watermarks(struct intel_crtc_state *cstate)
|
||||
ilk_write_wm_values(dev_priv, &results);
|
||||
}
|
||||
|
||||
static void ilk_update_wm(struct drm_crtc *crtc)
|
||||
static void ilk_initial_watermarks(struct intel_crtc_state *cstate)
|
||||
{
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
|
||||
struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
|
||||
|
||||
WARN_ON(cstate->base.active != intel_crtc->active);
|
||||
mutex_lock(&dev_priv->wm.wm_mutex);
|
||||
intel_crtc->wm.active.ilk = cstate->wm.intermediate;
|
||||
ilk_program_watermarks(dev_priv);
|
||||
mutex_unlock(&dev_priv->wm.wm_mutex);
|
||||
}
|
||||
|
||||
/*
|
||||
* IVB workaround: must disable low power watermarks for at least
|
||||
* one frame before enabling scaling. LP watermarks can be re-enabled
|
||||
* when scaling is disabled.
|
||||
*
|
||||
* WaCxSRDisabledForSpriteScaling:ivb
|
||||
*/
|
||||
if (cstate->disable_lp_wm) {
|
||||
ilk_disable_lp_wm(crtc->dev);
|
||||
intel_wait_for_vblank(crtc->dev, intel_crtc->pipe);
|
||||
static void ilk_optimize_watermarks(struct intel_crtc_state *cstate)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
|
||||
|
||||
mutex_lock(&dev_priv->wm.wm_mutex);
|
||||
if (cstate->wm.need_postvbl_update) {
|
||||
intel_crtc->wm.active.ilk = cstate->wm.optimal.ilk;
|
||||
ilk_program_watermarks(dev_priv);
|
||||
}
|
||||
|
||||
intel_crtc->wm.active.ilk = cstate->wm.optimal.ilk;
|
||||
|
||||
ilk_program_watermarks(cstate);
|
||||
mutex_unlock(&dev_priv->wm.wm_mutex);
|
||||
}
|
||||
|
||||
static void skl_pipe_wm_active_state(uint32_t val,
|
||||
@ -4585,9 +4644,9 @@ static bool bxt_check_bios_rc6_setup(const struct drm_device *dev)
|
||||
* for this check.
|
||||
*/
|
||||
rc6_ctx_base = I915_READ(RC6_CTX_BASE) & RC6_CTX_BASE_MASK;
|
||||
if (!((rc6_ctx_base >= dev_priv->gtt.stolen_reserved_base) &&
|
||||
(rc6_ctx_base + PAGE_SIZE <= dev_priv->gtt.stolen_reserved_base +
|
||||
dev_priv->gtt.stolen_reserved_size))) {
|
||||
if (!((rc6_ctx_base >= dev_priv->ggtt.stolen_reserved_base) &&
|
||||
(rc6_ctx_base + PAGE_SIZE <= dev_priv->ggtt.stolen_reserved_base +
|
||||
dev_priv->ggtt.stolen_reserved_size))) {
|
||||
DRM_DEBUG_KMS("RC6 Base address not as expected.\n");
|
||||
enable_rc6 = false;
|
||||
}
|
||||
@ -4756,9 +4815,8 @@ static void gen9_enable_rps(struct drm_device *dev)
|
||||
static void gen9_enable_rc6(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_engine_cs *ring;
|
||||
struct intel_engine_cs *engine;
|
||||
uint32_t rc6_mask = 0;
|
||||
int unused;
|
||||
|
||||
/* 1a: Software RC state - RC0 */
|
||||
I915_WRITE(GEN6_RC_STATE, 0);
|
||||
@ -4779,8 +4837,8 @@ static void gen9_enable_rc6(struct drm_device *dev)
|
||||
I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16);
|
||||
I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
|
||||
I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
|
||||
for_each_ring(ring, dev_priv, unused)
|
||||
I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
|
||||
for_each_engine(engine, dev_priv)
|
||||
I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
|
||||
|
||||
if (HAS_GUC_UCODE(dev))
|
||||
I915_WRITE(GUC_MAX_IDLE_COUNT, 0xA);
|
||||
@ -4826,9 +4884,8 @@ static void gen9_enable_rc6(struct drm_device *dev)
|
||||
static void gen8_enable_rps(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_engine_cs *ring;
|
||||
struct intel_engine_cs *engine;
|
||||
uint32_t rc6_mask = 0;
|
||||
int unused;
|
||||
|
||||
/* 1a: Software RC state - RC0 */
|
||||
I915_WRITE(GEN6_RC_STATE, 0);
|
||||
@ -4847,8 +4904,8 @@ static void gen8_enable_rps(struct drm_device *dev)
|
||||
I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
|
||||
I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
|
||||
I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
|
||||
for_each_ring(ring, dev_priv, unused)
|
||||
I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
|
||||
for_each_engine(engine, dev_priv)
|
||||
I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
|
||||
I915_WRITE(GEN6_RC_SLEEP, 0);
|
||||
if (IS_BROADWELL(dev))
|
||||
I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */
|
||||
@ -4908,11 +4965,11 @@ static void gen8_enable_rps(struct drm_device *dev)
|
||||
static void gen6_enable_rps(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_engine_cs *ring;
|
||||
struct intel_engine_cs *engine;
|
||||
u32 rc6vids, pcu_mbox = 0, rc6_mask = 0;
|
||||
u32 gtfifodbg;
|
||||
int rc6_mode;
|
||||
int i, ret;
|
||||
int ret;
|
||||
|
||||
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
|
||||
|
||||
@ -4944,8 +5001,8 @@ static void gen6_enable_rps(struct drm_device *dev)
|
||||
I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
|
||||
I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
|
||||
|
||||
for_each_ring(ring, dev_priv, i)
|
||||
I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
|
||||
for_each_engine(engine, dev_priv)
|
||||
I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
|
||||
|
||||
I915_WRITE(GEN6_RC_SLEEP, 0);
|
||||
I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
|
||||
@ -5232,7 +5289,7 @@ static void cherryview_setup_pctx(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
unsigned long pctx_paddr, paddr;
|
||||
struct i915_gtt *gtt = &dev_priv->gtt;
|
||||
struct i915_ggtt *ggtt = &dev_priv->ggtt;
|
||||
u32 pcbr;
|
||||
int pctx_size = 32*1024;
|
||||
|
||||
@ -5240,7 +5297,7 @@ static void cherryview_setup_pctx(struct drm_device *dev)
|
||||
if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) {
|
||||
DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
|
||||
paddr = (dev_priv->mm.stolen_base +
|
||||
(gtt->stolen_size - pctx_size));
|
||||
(ggtt->stolen_size - pctx_size));
|
||||
|
||||
pctx_paddr = (paddr & (~4095));
|
||||
I915_WRITE(VLV_PCBR, pctx_paddr);
|
||||
@ -5436,9 +5493,8 @@ static void valleyview_cleanup_gt_powersave(struct drm_device *dev)
|
||||
static void cherryview_enable_rps(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_engine_cs *ring;
|
||||
struct intel_engine_cs *engine;
|
||||
u32 gtfifodbg, val, rc6_mode = 0, pcbr;
|
||||
int i;
|
||||
|
||||
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
|
||||
|
||||
@ -5463,8 +5519,8 @@ static void cherryview_enable_rps(struct drm_device *dev)
|
||||
I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
|
||||
I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
|
||||
|
||||
for_each_ring(ring, dev_priv, i)
|
||||
I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
|
||||
for_each_engine(engine, dev_priv)
|
||||
I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
|
||||
I915_WRITE(GEN6_RC_SLEEP, 0);
|
||||
|
||||
/* TO threshold set to 500 us ( 0x186 * 1.28 us) */
|
||||
@ -5534,9 +5590,8 @@ static void cherryview_enable_rps(struct drm_device *dev)
|
||||
static void valleyview_enable_rps(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_engine_cs *ring;
|
||||
struct intel_engine_cs *engine;
|
||||
u32 gtfifodbg, val, rc6_mode = 0;
|
||||
int i;
|
||||
|
||||
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
|
||||
|
||||
@ -5574,8 +5629,8 @@ static void valleyview_enable_rps(struct drm_device *dev)
|
||||
I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
|
||||
I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
|
||||
|
||||
for_each_ring(ring, dev_priv, i)
|
||||
I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
|
||||
for_each_engine(engine, dev_priv)
|
||||
I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
|
||||
|
||||
I915_WRITE(GEN6_RC6_THRESHOLD, 0x557);
|
||||
|
||||
@ -5951,17 +6006,16 @@ EXPORT_SYMBOL_GPL(i915_gpu_lower);
|
||||
bool i915_gpu_busy(void)
|
||||
{
|
||||
struct drm_i915_private *dev_priv;
|
||||
struct intel_engine_cs *ring;
|
||||
struct intel_engine_cs *engine;
|
||||
bool ret = false;
|
||||
int i;
|
||||
|
||||
spin_lock_irq(&mchdev_lock);
|
||||
if (!i915_mch_dev)
|
||||
goto out_unlock;
|
||||
dev_priv = i915_mch_dev;
|
||||
|
||||
for_each_ring(ring, dev_priv, i)
|
||||
ret |= !list_empty(&ring->request_list);
|
||||
for_each_engine(engine, dev_priv)
|
||||
ret |= !list_empty(&engine->request_list);
|
||||
|
||||
out_unlock:
|
||||
spin_unlock_irq(&mchdev_lock);
|
||||
@ -7038,8 +7092,7 @@ void intel_init_clock_gating(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
if (dev_priv->display.init_clock_gating)
|
||||
dev_priv->display.init_clock_gating(dev);
|
||||
dev_priv->display.init_clock_gating(dev);
|
||||
}
|
||||
|
||||
void intel_suspend_hw(struct drm_device *dev)
|
||||
@ -7048,6 +7101,60 @@ void intel_suspend_hw(struct drm_device *dev)
|
||||
lpt_suspend_hw(dev);
|
||||
}
|
||||
|
||||
static void nop_init_clock_gating(struct drm_device *dev)
|
||||
{
|
||||
DRM_DEBUG_KMS("No clock gating settings or workarounds applied.\n");
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_init_clock_gating_hooks - setup the clock gating hooks
|
||||
* @dev_priv: device private
|
||||
*
|
||||
* Setup the hooks that configure which clocks of a given platform can be
|
||||
* gated and also apply various GT and display specific workarounds for these
|
||||
* platforms. Note that some GT specific workarounds are applied separately
|
||||
* when GPU contexts or batchbuffers start their execution.
|
||||
*/
|
||||
void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
if (IS_SKYLAKE(dev_priv))
|
||||
dev_priv->display.init_clock_gating = nop_init_clock_gating;
|
||||
else if (IS_KABYLAKE(dev_priv))
|
||||
dev_priv->display.init_clock_gating = nop_init_clock_gating;
|
||||
else if (IS_BROXTON(dev_priv))
|
||||
dev_priv->display.init_clock_gating = bxt_init_clock_gating;
|
||||
else if (IS_BROADWELL(dev_priv))
|
||||
dev_priv->display.init_clock_gating = broadwell_init_clock_gating;
|
||||
else if (IS_CHERRYVIEW(dev_priv))
|
||||
dev_priv->display.init_clock_gating = cherryview_init_clock_gating;
|
||||
else if (IS_HASWELL(dev_priv))
|
||||
dev_priv->display.init_clock_gating = haswell_init_clock_gating;
|
||||
else if (IS_IVYBRIDGE(dev_priv))
|
||||
dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
|
||||
else if (IS_VALLEYVIEW(dev_priv))
|
||||
dev_priv->display.init_clock_gating = valleyview_init_clock_gating;
|
||||
else if (IS_GEN6(dev_priv))
|
||||
dev_priv->display.init_clock_gating = gen6_init_clock_gating;
|
||||
else if (IS_GEN5(dev_priv))
|
||||
dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
|
||||
else if (IS_G4X(dev_priv))
|
||||
dev_priv->display.init_clock_gating = g4x_init_clock_gating;
|
||||
else if (IS_CRESTLINE(dev_priv))
|
||||
dev_priv->display.init_clock_gating = crestline_init_clock_gating;
|
||||
else if (IS_BROADWATER(dev_priv))
|
||||
dev_priv->display.init_clock_gating = broadwater_init_clock_gating;
|
||||
else if (IS_GEN3(dev_priv))
|
||||
dev_priv->display.init_clock_gating = gen3_init_clock_gating;
|
||||
else if (IS_I85X(dev_priv) || IS_I865G(dev_priv))
|
||||
dev_priv->display.init_clock_gating = i85x_init_clock_gating;
|
||||
else if (IS_GEN2(dev_priv))
|
||||
dev_priv->display.init_clock_gating = i830_init_clock_gating;
|
||||
else {
|
||||
MISSING_CASE(INTEL_DEVID(dev_priv));
|
||||
dev_priv->display.init_clock_gating = nop_init_clock_gating;
|
||||
}
|
||||
}
|
||||
|
||||
/* Set up chip specific power management-related functions */
|
||||
void intel_init_pm(struct drm_device *dev)
|
||||
{
|
||||
@ -7064,10 +7171,6 @@ void intel_init_pm(struct drm_device *dev)
|
||||
/* For FIFO watermark updates */
|
||||
if (INTEL_INFO(dev)->gen >= 9) {
|
||||
skl_setup_wm_latency(dev);
|
||||
|
||||
if (IS_BROXTON(dev))
|
||||
dev_priv->display.init_clock_gating =
|
||||
bxt_init_clock_gating;
|
||||
dev_priv->display.update_wm = skl_update_wm;
|
||||
} else if (HAS_PCH_SPLIT(dev)) {
|
||||
ilk_setup_wm_latency(dev);
|
||||
@ -7076,36 +7179,23 @@ void intel_init_pm(struct drm_device *dev)
|
||||
dev_priv->wm.spr_latency[1] && dev_priv->wm.cur_latency[1]) ||
|
||||
(!IS_GEN5(dev) && dev_priv->wm.pri_latency[0] &&
|
||||
dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) {
|
||||
dev_priv->display.update_wm = ilk_update_wm;
|
||||
dev_priv->display.compute_pipe_wm = ilk_compute_pipe_wm;
|
||||
dev_priv->display.program_watermarks = ilk_program_watermarks;
|
||||
dev_priv->display.compute_intermediate_wm =
|
||||
ilk_compute_intermediate_wm;
|
||||
dev_priv->display.initial_watermarks =
|
||||
ilk_initial_watermarks;
|
||||
dev_priv->display.optimize_watermarks =
|
||||
ilk_optimize_watermarks;
|
||||
} else {
|
||||
DRM_DEBUG_KMS("Failed to read display plane latency. "
|
||||
"Disable CxSR\n");
|
||||
}
|
||||
|
||||
if (IS_GEN5(dev))
|
||||
dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
|
||||
else if (IS_GEN6(dev))
|
||||
dev_priv->display.init_clock_gating = gen6_init_clock_gating;
|
||||
else if (IS_IVYBRIDGE(dev))
|
||||
dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
|
||||
else if (IS_HASWELL(dev))
|
||||
dev_priv->display.init_clock_gating = haswell_init_clock_gating;
|
||||
else if (INTEL_INFO(dev)->gen == 8)
|
||||
dev_priv->display.init_clock_gating = broadwell_init_clock_gating;
|
||||
} else if (IS_CHERRYVIEW(dev)) {
|
||||
vlv_setup_wm_latency(dev);
|
||||
|
||||
dev_priv->display.update_wm = vlv_update_wm;
|
||||
dev_priv->display.init_clock_gating =
|
||||
cherryview_init_clock_gating;
|
||||
} else if (IS_VALLEYVIEW(dev)) {
|
||||
vlv_setup_wm_latency(dev);
|
||||
|
||||
dev_priv->display.update_wm = vlv_update_wm;
|
||||
dev_priv->display.init_clock_gating =
|
||||
valleyview_init_clock_gating;
|
||||
} else if (IS_PINEVIEW(dev)) {
|
||||
if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
|
||||
dev_priv->is_ddr3,
|
||||
@ -7121,20 +7211,13 @@ void intel_init_pm(struct drm_device *dev)
|
||||
dev_priv->display.update_wm = NULL;
|
||||
} else
|
||||
dev_priv->display.update_wm = pineview_update_wm;
|
||||
dev_priv->display.init_clock_gating = gen3_init_clock_gating;
|
||||
} else if (IS_G4X(dev)) {
|
||||
dev_priv->display.update_wm = g4x_update_wm;
|
||||
dev_priv->display.init_clock_gating = g4x_init_clock_gating;
|
||||
} else if (IS_GEN4(dev)) {
|
||||
dev_priv->display.update_wm = i965_update_wm;
|
||||
if (IS_CRESTLINE(dev))
|
||||
dev_priv->display.init_clock_gating = crestline_init_clock_gating;
|
||||
else if (IS_BROADWATER(dev))
|
||||
dev_priv->display.init_clock_gating = broadwater_init_clock_gating;
|
||||
} else if (IS_GEN3(dev)) {
|
||||
dev_priv->display.update_wm = i9xx_update_wm;
|
||||
dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
|
||||
dev_priv->display.init_clock_gating = gen3_init_clock_gating;
|
||||
} else if (IS_GEN2(dev)) {
|
||||
if (INTEL_INFO(dev)->num_pipes == 1) {
|
||||
dev_priv->display.update_wm = i845_update_wm;
|
||||
@ -7143,11 +7226,6 @@ void intel_init_pm(struct drm_device *dev)
|
||||
dev_priv->display.update_wm = i9xx_update_wm;
|
||||
dev_priv->display.get_fifo_size = i830_get_fifo_size;
|
||||
}
|
||||
|
||||
if (IS_I85X(dev) || IS_I865G(dev))
|
||||
dev_priv->display.init_clock_gating = i85x_init_clock_gating;
|
||||
else
|
||||
dev_priv->display.init_clock_gating = i830_init_clock_gating;
|
||||
} else {
|
||||
DRM_ERROR("unexpected fall-through in intel_init_pm\n");
|
||||
}
|
||||
@ -7302,7 +7380,7 @@ static void __intel_rps_boost_work(struct work_struct *work)
|
||||
struct drm_i915_gem_request *req = boost->req;
|
||||
|
||||
if (!i915_gem_request_completed(req, true))
|
||||
gen6_rps_boost(to_i915(req->ring->dev), NULL,
|
||||
gen6_rps_boost(to_i915(req->engine->dev), NULL,
|
||||
req->emitted_jiffies);
|
||||
|
||||
i915_gem_request_unreference__unlocked(req);
|
||||
|
@ -507,7 +507,8 @@ static void hsw_psr_disable(struct intel_dp *intel_dp)
|
||||
|
||||
/* Wait till PSR is idle */
|
||||
if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL) &
|
||||
EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10))
|
||||
EDP_PSR_STATUS_STATE_MASK) == 0,
|
||||
2 * USEC_PER_SEC, 10 * USEC_PER_MSEC))
|
||||
DRM_ERROR("Timed out waiting for PSR Idle State\n");
|
||||
|
||||
dev_priv->psr.active = false;
|
||||
@ -780,8 +781,7 @@ void intel_psr_init(struct drm_device *dev)
|
||||
|
||||
/* Per platform default */
|
||||
if (i915.enable_psr == -1) {
|
||||
if (IS_HASWELL(dev) || IS_BROADWELL(dev) ||
|
||||
IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
|
||||
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
|
||||
i915.enable_psr = 1;
|
||||
else
|
||||
i915.enable_psr = 0;
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -55,31 +55,30 @@ struct intel_hw_status_page {
|
||||
#define i915_semaphore_seqno_size sizeof(uint64_t)
|
||||
#define GEN8_SIGNAL_OFFSET(__ring, to) \
|
||||
(i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \
|
||||
((__ring)->id * I915_NUM_RINGS * i915_semaphore_seqno_size) + \
|
||||
((__ring)->id * I915_NUM_ENGINES * i915_semaphore_seqno_size) + \
|
||||
(i915_semaphore_seqno_size * (to)))
|
||||
|
||||
#define GEN8_WAIT_OFFSET(__ring, from) \
|
||||
(i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \
|
||||
((from) * I915_NUM_RINGS * i915_semaphore_seqno_size) + \
|
||||
((from) * I915_NUM_ENGINES * i915_semaphore_seqno_size) + \
|
||||
(i915_semaphore_seqno_size * (__ring)->id))
|
||||
|
||||
#define GEN8_RING_SEMAPHORE_INIT do { \
|
||||
#define GEN8_RING_SEMAPHORE_INIT(e) do { \
|
||||
if (!dev_priv->semaphore_obj) { \
|
||||
break; \
|
||||
} \
|
||||
ring->semaphore.signal_ggtt[RCS] = GEN8_SIGNAL_OFFSET(ring, RCS); \
|
||||
ring->semaphore.signal_ggtt[VCS] = GEN8_SIGNAL_OFFSET(ring, VCS); \
|
||||
ring->semaphore.signal_ggtt[BCS] = GEN8_SIGNAL_OFFSET(ring, BCS); \
|
||||
ring->semaphore.signal_ggtt[VECS] = GEN8_SIGNAL_OFFSET(ring, VECS); \
|
||||
ring->semaphore.signal_ggtt[VCS2] = GEN8_SIGNAL_OFFSET(ring, VCS2); \
|
||||
ring->semaphore.signal_ggtt[ring->id] = MI_SEMAPHORE_SYNC_INVALID; \
|
||||
(e)->semaphore.signal_ggtt[RCS] = GEN8_SIGNAL_OFFSET((e), RCS); \
|
||||
(e)->semaphore.signal_ggtt[VCS] = GEN8_SIGNAL_OFFSET((e), VCS); \
|
||||
(e)->semaphore.signal_ggtt[BCS] = GEN8_SIGNAL_OFFSET((e), BCS); \
|
||||
(e)->semaphore.signal_ggtt[VECS] = GEN8_SIGNAL_OFFSET((e), VECS); \
|
||||
(e)->semaphore.signal_ggtt[VCS2] = GEN8_SIGNAL_OFFSET((e), VCS2); \
|
||||
(e)->semaphore.signal_ggtt[(e)->id] = MI_SEMAPHORE_SYNC_INVALID; \
|
||||
} while(0)
|
||||
|
||||
enum intel_ring_hangcheck_action {
|
||||
HANGCHECK_IDLE = 0,
|
||||
HANGCHECK_WAIT,
|
||||
HANGCHECK_ACTIVE,
|
||||
HANGCHECK_ACTIVE_LOOP,
|
||||
HANGCHECK_KICK,
|
||||
HANGCHECK_HUNG,
|
||||
};
|
||||
@ -88,7 +87,6 @@ enum intel_ring_hangcheck_action {
|
||||
|
||||
struct intel_ring_hangcheck {
|
||||
u64 acthd;
|
||||
u64 max_acthd;
|
||||
u32 seqno;
|
||||
int score;
|
||||
enum intel_ring_hangcheck_action action;
|
||||
@ -101,7 +99,7 @@ struct intel_ringbuffer {
|
||||
void __iomem *virtual_start;
|
||||
struct i915_vma *vma;
|
||||
|
||||
struct intel_engine_cs *ring;
|
||||
struct intel_engine_cs *engine;
|
||||
struct list_head link;
|
||||
|
||||
u32 head;
|
||||
@ -125,7 +123,7 @@ struct intel_ringbuffer {
|
||||
};
|
||||
|
||||
struct intel_context;
|
||||
struct drm_i915_reg_descriptor;
|
||||
struct drm_i915_reg_table;
|
||||
|
||||
/*
|
||||
* we use a single page to load ctx workarounds so all of these
|
||||
@ -148,14 +146,14 @@ struct i915_ctx_workarounds {
|
||||
|
||||
struct intel_engine_cs {
|
||||
const char *name;
|
||||
enum intel_ring_id {
|
||||
enum intel_engine_id {
|
||||
RCS = 0,
|
||||
BCS,
|
||||
VCS,
|
||||
VCS2, /* Keep instances of the same type engine together. */
|
||||
VECS
|
||||
} id;
|
||||
#define I915_NUM_RINGS 5
|
||||
#define I915_NUM_ENGINES 5
|
||||
#define _VCS(n) (VCS + (n))
|
||||
unsigned int exec_id;
|
||||
unsigned int guc_id;
|
||||
@ -246,16 +244,16 @@ struct intel_engine_cs {
|
||||
* ie. transpose of f(x, y)
|
||||
*/
|
||||
struct {
|
||||
u32 sync_seqno[I915_NUM_RINGS-1];
|
||||
u32 sync_seqno[I915_NUM_ENGINES-1];
|
||||
|
||||
union {
|
||||
struct {
|
||||
/* our mbox written by others */
|
||||
u32 wait[I915_NUM_RINGS];
|
||||
u32 wait[I915_NUM_ENGINES];
|
||||
/* mboxes this ring signals to */
|
||||
i915_reg_t signal[I915_NUM_RINGS];
|
||||
i915_reg_t signal[I915_NUM_ENGINES];
|
||||
} mbox;
|
||||
u64 signal_ggtt[I915_NUM_RINGS];
|
||||
u64 signal_ggtt[I915_NUM_ENGINES];
|
||||
};
|
||||
|
||||
/* AKA wait() */
|
||||
@ -271,7 +269,8 @@ struct intel_engine_cs {
|
||||
spinlock_t execlist_lock;
|
||||
struct list_head execlist_queue;
|
||||
struct list_head execlist_retired_req_list;
|
||||
u8 next_context_status_buffer;
|
||||
unsigned int next_context_status_buffer;
|
||||
unsigned int idle_lite_restore_wa;
|
||||
bool disable_lite_restore_wa;
|
||||
u32 ctx_desc_template;
|
||||
u32 irq_keep_mask; /* bitmask for interrupts that should not be masked */
|
||||
@ -332,15 +331,8 @@ struct intel_engine_cs {
|
||||
/*
|
||||
* Table of registers allowed in commands that read/write registers.
|
||||
*/
|
||||
const struct drm_i915_reg_descriptor *reg_table;
|
||||
int reg_count;
|
||||
|
||||
/*
|
||||
* Table of registers allowed in commands that read/write registers, but
|
||||
* only from the DRM master.
|
||||
*/
|
||||
const struct drm_i915_reg_descriptor *master_reg_table;
|
||||
int master_reg_count;
|
||||
const struct drm_i915_reg_table *reg_tables;
|
||||
int reg_table_count;
|
||||
|
||||
/*
|
||||
* Returns the bitmask for the length field of the specified command.
|
||||
@ -356,19 +348,19 @@ struct intel_engine_cs {
|
||||
};
|
||||
|
||||
static inline bool
|
||||
intel_ring_initialized(struct intel_engine_cs *ring)
|
||||
intel_engine_initialized(struct intel_engine_cs *engine)
|
||||
{
|
||||
return ring->dev != NULL;
|
||||
return engine->dev != NULL;
|
||||
}
|
||||
|
||||
static inline unsigned
|
||||
intel_ring_flag(struct intel_engine_cs *ring)
|
||||
intel_engine_flag(struct intel_engine_cs *engine)
|
||||
{
|
||||
return 1 << ring->id;
|
||||
return 1 << engine->id;
|
||||
}
|
||||
|
||||
static inline u32
|
||||
intel_ring_sync_index(struct intel_engine_cs *ring,
|
||||
intel_ring_sync_index(struct intel_engine_cs *engine,
|
||||
struct intel_engine_cs *other)
|
||||
{
|
||||
int idx;
|
||||
@ -381,34 +373,34 @@ intel_ring_sync_index(struct intel_engine_cs *ring,
|
||||
* vcs2 -> 0 = rcs, 1 = vcs, 2 = bcs, 3 = vecs;
|
||||
*/
|
||||
|
||||
idx = (other - ring) - 1;
|
||||
idx = (other - engine) - 1;
|
||||
if (idx < 0)
|
||||
idx += I915_NUM_RINGS;
|
||||
idx += I915_NUM_ENGINES;
|
||||
|
||||
return idx;
|
||||
}
|
||||
|
||||
static inline void
|
||||
intel_flush_status_page(struct intel_engine_cs *ring, int reg)
|
||||
intel_flush_status_page(struct intel_engine_cs *engine, int reg)
|
||||
{
|
||||
drm_clflush_virt_range(&ring->status_page.page_addr[reg],
|
||||
drm_clflush_virt_range(&engine->status_page.page_addr[reg],
|
||||
sizeof(uint32_t));
|
||||
}
|
||||
|
||||
static inline u32
|
||||
intel_read_status_page(struct intel_engine_cs *ring,
|
||||
intel_read_status_page(struct intel_engine_cs *engine,
|
||||
int reg)
|
||||
{
|
||||
/* Ensure that the compiler doesn't optimize away the load. */
|
||||
barrier();
|
||||
return ring->status_page.page_addr[reg];
|
||||
return engine->status_page.page_addr[reg];
|
||||
}
|
||||
|
||||
static inline void
|
||||
intel_write_status_page(struct intel_engine_cs *ring,
|
||||
intel_write_status_page(struct intel_engine_cs *engine,
|
||||
int reg, u32 value)
|
||||
{
|
||||
ring->status_page.page_addr[reg] = value;
|
||||
engine->status_page.page_addr[reg] = value;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -439,42 +431,42 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
|
||||
void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
|
||||
void intel_ringbuffer_free(struct intel_ringbuffer *ring);
|
||||
|
||||
void intel_stop_ring_buffer(struct intel_engine_cs *ring);
|
||||
void intel_cleanup_ring_buffer(struct intel_engine_cs *ring);
|
||||
void intel_stop_engine(struct intel_engine_cs *engine);
|
||||
void intel_cleanup_engine(struct intel_engine_cs *engine);
|
||||
|
||||
int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request);
|
||||
|
||||
int __must_check intel_ring_begin(struct drm_i915_gem_request *req, int n);
|
||||
int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req);
|
||||
static inline void intel_ring_emit(struct intel_engine_cs *ring,
|
||||
static inline void intel_ring_emit(struct intel_engine_cs *engine,
|
||||
u32 data)
|
||||
{
|
||||
struct intel_ringbuffer *ringbuf = ring->buffer;
|
||||
struct intel_ringbuffer *ringbuf = engine->buffer;
|
||||
iowrite32(data, ringbuf->virtual_start + ringbuf->tail);
|
||||
ringbuf->tail += 4;
|
||||
}
|
||||
static inline void intel_ring_emit_reg(struct intel_engine_cs *ring,
|
||||
static inline void intel_ring_emit_reg(struct intel_engine_cs *engine,
|
||||
i915_reg_t reg)
|
||||
{
|
||||
intel_ring_emit(ring, i915_mmio_reg_offset(reg));
|
||||
intel_ring_emit(engine, i915_mmio_reg_offset(reg));
|
||||
}
|
||||
static inline void intel_ring_advance(struct intel_engine_cs *ring)
|
||||
static inline void intel_ring_advance(struct intel_engine_cs *engine)
|
||||
{
|
||||
struct intel_ringbuffer *ringbuf = ring->buffer;
|
||||
struct intel_ringbuffer *ringbuf = engine->buffer;
|
||||
ringbuf->tail &= ringbuf->size - 1;
|
||||
}
|
||||
int __intel_ring_space(int head, int tail, int size);
|
||||
void intel_ring_update_space(struct intel_ringbuffer *ringbuf);
|
||||
int intel_ring_space(struct intel_ringbuffer *ringbuf);
|
||||
bool intel_ring_stopped(struct intel_engine_cs *ring);
|
||||
bool intel_engine_stopped(struct intel_engine_cs *engine);
|
||||
|
||||
int __must_check intel_ring_idle(struct intel_engine_cs *ring);
|
||||
void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno);
|
||||
int __must_check intel_engine_idle(struct intel_engine_cs *engine);
|
||||
void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno);
|
||||
int intel_ring_flush_all_caches(struct drm_i915_gem_request *req);
|
||||
int intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req);
|
||||
|
||||
void intel_fini_pipe_control(struct intel_engine_cs *ring);
|
||||
int intel_init_pipe_control(struct intel_engine_cs *ring);
|
||||
void intel_fini_pipe_control(struct intel_engine_cs *engine);
|
||||
int intel_init_pipe_control(struct intel_engine_cs *engine);
|
||||
|
||||
int intel_init_render_ring_buffer(struct drm_device *dev);
|
||||
int intel_init_bsd_ring_buffer(struct drm_device *dev);
|
||||
@ -482,9 +474,9 @@ int intel_init_bsd2_ring_buffer(struct drm_device *dev);
|
||||
int intel_init_blt_ring_buffer(struct drm_device *dev);
|
||||
int intel_init_vebox_ring_buffer(struct drm_device *dev);
|
||||
|
||||
u64 intel_ring_get_active_head(struct intel_engine_cs *ring);
|
||||
u64 intel_ring_get_active_head(struct intel_engine_cs *engine);
|
||||
|
||||
int init_workarounds_ring(struct intel_engine_cs *ring);
|
||||
int init_workarounds_ring(struct intel_engine_cs *engine);
|
||||
|
||||
static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf)
|
||||
{
|
||||
|
@ -89,6 +89,10 @@ intel_display_power_domain_str(enum intel_display_power_domain domain)
|
||||
return "TRANSCODER_C";
|
||||
case POWER_DOMAIN_TRANSCODER_EDP:
|
||||
return "TRANSCODER_EDP";
|
||||
case POWER_DOMAIN_TRANSCODER_DSI_A:
|
||||
return "TRANSCODER_DSI_A";
|
||||
case POWER_DOMAIN_TRANSCODER_DSI_C:
|
||||
return "TRANSCODER_DSI_C";
|
||||
case POWER_DOMAIN_PORT_DDI_A_LANES:
|
||||
return "PORT_DDI_A_LANES";
|
||||
case POWER_DOMAIN_PORT_DDI_B_LANES:
|
||||
@ -419,8 +423,11 @@ static void hsw_set_power_well(struct drm_i915_private *dev_priv,
|
||||
BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
|
||||
BIT(POWER_DOMAIN_PIPE_A) | \
|
||||
BIT(POWER_DOMAIN_TRANSCODER_EDP) | \
|
||||
BIT(POWER_DOMAIN_TRANSCODER_DSI_A) | \
|
||||
BIT(POWER_DOMAIN_TRANSCODER_DSI_C) | \
|
||||
BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_A_LANES) | \
|
||||
BIT(POWER_DOMAIN_PORT_DSI) | \
|
||||
BIT(POWER_DOMAIN_AUX_A) | \
|
||||
BIT(POWER_DOMAIN_PLLS) | \
|
||||
BIT(POWER_DOMAIN_INIT))
|
||||
@ -458,8 +465,6 @@ static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
|
||||
static void assert_can_disable_dc9(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
WARN(intel_irqs_enabled(dev_priv), "Interrupts not disabled yet.\n");
|
||||
WARN(!(I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9),
|
||||
"DC9 already programmed to be disabled.\n");
|
||||
WARN(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
|
||||
"DC5 still not disabled.\n");
|
||||
|
||||
@ -472,24 +477,6 @@ static void assert_can_disable_dc9(struct drm_i915_private *dev_priv)
|
||||
*/
|
||||
}
|
||||
|
||||
static void gen9_set_dc_state_debugmask(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
uint32_t val, mask;
|
||||
|
||||
mask = DC_STATE_DEBUG_MASK_MEMORY_UP;
|
||||
|
||||
if (IS_BROXTON(dev_priv))
|
||||
mask |= DC_STATE_DEBUG_MASK_CORES;
|
||||
|
||||
/* The below bit doesn't need to be cleared ever afterwards */
|
||||
val = I915_READ(DC_STATE_DEBUG);
|
||||
if ((val & mask) != mask) {
|
||||
val |= mask;
|
||||
I915_WRITE(DC_STATE_DEBUG, val);
|
||||
POSTING_READ(DC_STATE_DEBUG);
|
||||
}
|
||||
}
|
||||
|
||||
static void gen9_write_dc_state(struct drm_i915_private *dev_priv,
|
||||
u32 state)
|
||||
{
|
||||
@ -538,12 +525,8 @@ static void gen9_set_dc_state(struct drm_i915_private *dev_priv, uint32_t state)
|
||||
else
|
||||
mask |= DC_STATE_EN_UPTO_DC6;
|
||||
|
||||
WARN_ON_ONCE(state & ~mask);
|
||||
|
||||
if (i915.enable_dc == 0)
|
||||
state = DC_STATE_DISABLE;
|
||||
else if (i915.enable_dc == 1 && state > DC_STATE_EN_UPTO_DC5)
|
||||
state = DC_STATE_EN_UPTO_DC5;
|
||||
if (WARN_ON_ONCE(state & ~dev_priv->csr.allowed_dc_mask))
|
||||
state &= dev_priv->csr.allowed_dc_mask;
|
||||
|
||||
val = I915_READ(DC_STATE_EN);
|
||||
DRM_DEBUG_KMS("Setting DC state from %02x to %02x\n",
|
||||
@ -606,18 +589,6 @@ static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
|
||||
assert_csr_loaded(dev_priv);
|
||||
}
|
||||
|
||||
static void assert_can_disable_dc5(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
/*
|
||||
* During initialization, the firmware may not be loaded yet.
|
||||
* We still want to make sure that the DC enabling flag is cleared.
|
||||
*/
|
||||
if (dev_priv->power_domains.initializing)
|
||||
return;
|
||||
|
||||
assert_rpm_wakelock_held(dev_priv);
|
||||
}
|
||||
|
||||
static void gen9_enable_dc5(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
assert_can_enable_dc5(dev_priv);
|
||||
@ -642,30 +613,6 @@ static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
|
||||
assert_csr_loaded(dev_priv);
|
||||
}
|
||||
|
||||
static void assert_can_disable_dc6(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
/*
|
||||
* During initialization, the firmware may not be loaded yet.
|
||||
* We still want to make sure that the DC enabling flag is cleared.
|
||||
*/
|
||||
if (dev_priv->power_domains.initializing)
|
||||
return;
|
||||
|
||||
WARN_ONCE(!(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6),
|
||||
"DC6 already programmed to be disabled.\n");
|
||||
}
|
||||
|
||||
static void gen9_disable_dc5_dc6(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
assert_can_disable_dc5(dev_priv);
|
||||
|
||||
if ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) &&
|
||||
i915.enable_dc != 0 && i915.enable_dc != 1)
|
||||
assert_can_disable_dc6(dev_priv);
|
||||
|
||||
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
|
||||
}
|
||||
|
||||
void skl_enable_dc6(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
assert_can_enable_dc6(dev_priv);
|
||||
@ -678,8 +625,6 @@ void skl_enable_dc6(struct drm_i915_private *dev_priv)
|
||||
|
||||
void skl_disable_dc6(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
assert_can_disable_dc6(dev_priv);
|
||||
|
||||
DRM_DEBUG_KMS("Disabling DC6\n");
|
||||
|
||||
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
|
||||
@ -833,32 +778,25 @@ static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
|
||||
static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
|
||||
struct i915_power_well *power_well)
|
||||
{
|
||||
gen9_disable_dc5_dc6(dev_priv);
|
||||
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
|
||||
}
|
||||
|
||||
static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
|
||||
struct i915_power_well *power_well)
|
||||
{
|
||||
if ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) &&
|
||||
i915.enable_dc != 0 && i915.enable_dc != 1)
|
||||
if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC6)
|
||||
skl_enable_dc6(dev_priv);
|
||||
else
|
||||
else if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5)
|
||||
gen9_enable_dc5(dev_priv);
|
||||
}
|
||||
|
||||
static void gen9_dc_off_power_well_sync_hw(struct drm_i915_private *dev_priv,
|
||||
struct i915_power_well *power_well)
|
||||
{
|
||||
if (power_well->count > 0) {
|
||||
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
|
||||
} else {
|
||||
if ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) &&
|
||||
i915.enable_dc != 0 &&
|
||||
i915.enable_dc != 1)
|
||||
gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
|
||||
else
|
||||
gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
|
||||
}
|
||||
if (power_well->count > 0)
|
||||
gen9_dc_off_power_well_enable(dev_priv, power_well);
|
||||
else
|
||||
gen9_dc_off_power_well_disable(dev_priv, power_well);
|
||||
}
|
||||
|
||||
static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
|
||||
@ -2023,6 +1961,55 @@ sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
|
||||
return 1;
|
||||
}
|
||||
|
||||
static uint32_t get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
|
||||
int enable_dc)
|
||||
{
|
||||
uint32_t mask;
|
||||
int requested_dc;
|
||||
int max_dc;
|
||||
|
||||
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
|
||||
max_dc = 2;
|
||||
mask = 0;
|
||||
} else if (IS_BROXTON(dev_priv)) {
|
||||
max_dc = 1;
|
||||
/*
|
||||
* DC9 has a separate HW flow from the rest of the DC states,
|
||||
* not depending on the DMC firmware. It's needed by system
|
||||
* suspend/resume, so allow it unconditionally.
|
||||
*/
|
||||
mask = DC_STATE_EN_DC9;
|
||||
} else {
|
||||
max_dc = 0;
|
||||
mask = 0;
|
||||
}
|
||||
|
||||
if (!i915.disable_power_well)
|
||||
max_dc = 0;
|
||||
|
||||
if (enable_dc >= 0 && enable_dc <= max_dc) {
|
||||
requested_dc = enable_dc;
|
||||
} else if (enable_dc == -1) {
|
||||
requested_dc = max_dc;
|
||||
} else if (enable_dc > max_dc && enable_dc <= 2) {
|
||||
DRM_DEBUG_KMS("Adjusting requested max DC state (%d->%d)\n",
|
||||
enable_dc, max_dc);
|
||||
requested_dc = max_dc;
|
||||
} else {
|
||||
DRM_ERROR("Unexpected value for enable_dc (%d)\n", enable_dc);
|
||||
requested_dc = max_dc;
|
||||
}
|
||||
|
||||
if (requested_dc > 1)
|
||||
mask |= DC_STATE_EN_UPTO_DC6;
|
||||
if (requested_dc > 0)
|
||||
mask |= DC_STATE_EN_UPTO_DC5;
|
||||
|
||||
DRM_DEBUG_KMS("Allowed DC state mask %02x\n", mask);
|
||||
|
||||
return mask;
|
||||
}
|
||||
|
||||
#define set_power_wells(power_domains, __power_wells) ({ \
|
||||
(power_domains)->power_wells = (__power_wells); \
|
||||
(power_domains)->power_well_count = ARRAY_SIZE(__power_wells); \
|
||||
@ -2041,6 +2028,8 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
|
||||
|
||||
i915.disable_power_well = sanitize_disable_power_well_option(dev_priv,
|
||||
i915.disable_power_well);
|
||||
dev_priv->csr.allowed_dc_mask = get_allowed_dc_mask(dev_priv,
|
||||
i915.enable_dc);
|
||||
|
||||
BUILD_BUG_ON(POWER_DOMAIN_NUM > 31);
|
||||
|
||||
@ -2141,8 +2130,8 @@ static void skl_display_core_init(struct drm_i915_private *dev_priv,
|
||||
|
||||
skl_init_cdclk(dev_priv);
|
||||
|
||||
if (dev_priv->csr.dmc_payload && intel_csr_load_program(dev_priv))
|
||||
gen9_set_dc_state_debugmask(dev_priv);
|
||||
if (dev_priv->csr.dmc_payload)
|
||||
intel_csr_load_program(dev_priv);
|
||||
}
|
||||
|
||||
static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
|
||||
|
@ -1398,12 +1398,10 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
|
||||
}
|
||||
|
||||
dotclock = pipe_config->port_clock;
|
||||
|
||||
if (pipe_config->pixel_multiplier)
|
||||
dotclock /= pipe_config->pixel_multiplier;
|
||||
|
||||
if (HAS_PCH_SPLIT(dev))
|
||||
ironlake_check_encoder_dotclock(pipe_config, dotclock);
|
||||
|
||||
pipe_config->base.adjusted_mode.crtc_clock = dotclock;
|
||||
|
||||
/* Cross check the port pixel multiplier with the sdvo encoder state. */
|
||||
@ -2262,9 +2260,9 @@ intel_sdvo_select_ddc_bus(struct drm_i915_private *dev_priv,
|
||||
struct sdvo_device_mapping *mapping;
|
||||
|
||||
if (sdvo->port == PORT_B)
|
||||
mapping = &(dev_priv->sdvo_mappings[0]);
|
||||
mapping = &dev_priv->vbt.sdvo_mappings[0];
|
||||
else
|
||||
mapping = &(dev_priv->sdvo_mappings[1]);
|
||||
mapping = &dev_priv->vbt.sdvo_mappings[1];
|
||||
|
||||
if (mapping->initialized)
|
||||
sdvo->ddc_bus = 1 << ((mapping->ddc_pin & 0xf0) >> 4);
|
||||
@ -2280,9 +2278,9 @@ intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv,
|
||||
u8 pin;
|
||||
|
||||
if (sdvo->port == PORT_B)
|
||||
mapping = &dev_priv->sdvo_mappings[0];
|
||||
mapping = &dev_priv->vbt.sdvo_mappings[0];
|
||||
else
|
||||
mapping = &dev_priv->sdvo_mappings[1];
|
||||
mapping = &dev_priv->vbt.sdvo_mappings[1];
|
||||
|
||||
if (mapping->initialized &&
|
||||
intel_gmbus_is_valid_pin(dev_priv, mapping->i2c_pin))
|
||||
@ -2318,11 +2316,11 @@ intel_sdvo_get_slave_addr(struct drm_device *dev, struct intel_sdvo *sdvo)
|
||||
struct sdvo_device_mapping *my_mapping, *other_mapping;
|
||||
|
||||
if (sdvo->port == PORT_B) {
|
||||
my_mapping = &dev_priv->sdvo_mappings[0];
|
||||
other_mapping = &dev_priv->sdvo_mappings[1];
|
||||
my_mapping = &dev_priv->vbt.sdvo_mappings[0];
|
||||
other_mapping = &dev_priv->vbt.sdvo_mappings[1];
|
||||
} else {
|
||||
my_mapping = &dev_priv->sdvo_mappings[1];
|
||||
other_mapping = &dev_priv->sdvo_mappings[0];
|
||||
my_mapping = &dev_priv->vbt.sdvo_mappings[1];
|
||||
other_mapping = &dev_priv->vbt.sdvo_mappings[0];
|
||||
}
|
||||
|
||||
/* If the BIOS described our SDVO device, take advantage of it. */
|
||||
|
@ -193,7 +193,7 @@ skl_update_plane(struct drm_plane *drm_plane,
|
||||
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
|
||||
u32 surf_addr;
|
||||
u32 tile_height, plane_offset, plane_size;
|
||||
unsigned int rotation;
|
||||
unsigned int rotation = plane_state->base.rotation;
|
||||
int x_offset, y_offset;
|
||||
int crtc_x = plane_state->dst.x1;
|
||||
int crtc_y = plane_state->dst.y1;
|
||||
@ -213,7 +213,6 @@ skl_update_plane(struct drm_plane *drm_plane,
|
||||
plane_ctl |= skl_plane_ctl_format(fb->pixel_format);
|
||||
plane_ctl |= skl_plane_ctl_tiling(fb->modifier[0]);
|
||||
|
||||
rotation = plane_state->base.rotation;
|
||||
plane_ctl |= skl_plane_ctl_rotation(rotation);
|
||||
|
||||
stride_div = intel_fb_stride_alignment(dev_priv, fb->modifier[0],
|
||||
@ -351,6 +350,7 @@ vlv_update_plane(struct drm_plane *dplane,
|
||||
int plane = intel_plane->plane;
|
||||
u32 sprctl;
|
||||
u32 sprsurf_offset, linear_offset;
|
||||
unsigned int rotation = dplane->state->rotation;
|
||||
int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
|
||||
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
|
||||
int crtc_x = plane_state->dst.x1;
|
||||
@ -423,12 +423,11 @@ vlv_update_plane(struct drm_plane *dplane,
|
||||
crtc_h--;
|
||||
|
||||
linear_offset = y * fb->pitches[0] + x * cpp;
|
||||
sprsurf_offset = intel_compute_tile_offset(dev_priv, &x, &y,
|
||||
fb->modifier[0], cpp,
|
||||
fb->pitches[0]);
|
||||
sprsurf_offset = intel_compute_tile_offset(&x, &y, fb, 0,
|
||||
fb->pitches[0], rotation);
|
||||
linear_offset -= sprsurf_offset;
|
||||
|
||||
if (plane_state->base.rotation == BIT(DRM_ROTATE_180)) {
|
||||
if (rotation == BIT(DRM_ROTATE_180)) {
|
||||
sprctl |= SP_ROTATE_180;
|
||||
|
||||
x += src_w;
|
||||
@ -493,6 +492,7 @@ ivb_update_plane(struct drm_plane *plane,
|
||||
enum pipe pipe = intel_plane->pipe;
|
||||
u32 sprctl, sprscale = 0;
|
||||
u32 sprsurf_offset, linear_offset;
|
||||
unsigned int rotation = plane_state->base.rotation;
|
||||
int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
|
||||
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
|
||||
int crtc_x = plane_state->dst.x1;
|
||||
@ -556,12 +556,11 @@ ivb_update_plane(struct drm_plane *plane,
|
||||
sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h;
|
||||
|
||||
linear_offset = y * fb->pitches[0] + x * cpp;
|
||||
sprsurf_offset = intel_compute_tile_offset(dev_priv, &x, &y,
|
||||
fb->modifier[0], cpp,
|
||||
fb->pitches[0]);
|
||||
sprsurf_offset = intel_compute_tile_offset(&x, &y, fb, 0,
|
||||
fb->pitches[0], rotation);
|
||||
linear_offset -= sprsurf_offset;
|
||||
|
||||
if (plane_state->base.rotation == BIT(DRM_ROTATE_180)) {
|
||||
if (rotation == BIT(DRM_ROTATE_180)) {
|
||||
sprctl |= SPRITE_ROTATE_180;
|
||||
|
||||
/* HSW and BDW does this automagically in hardware */
|
||||
@ -634,6 +633,7 @@ ilk_update_plane(struct drm_plane *plane,
|
||||
int pipe = intel_plane->pipe;
|
||||
u32 dvscntr, dvsscale;
|
||||
u32 dvssurf_offset, linear_offset;
|
||||
unsigned int rotation = plane_state->base.rotation;
|
||||
int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
|
||||
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
|
||||
int crtc_x = plane_state->dst.x1;
|
||||
@ -693,12 +693,11 @@ ilk_update_plane(struct drm_plane *plane,
|
||||
dvsscale = DVS_SCALE_ENABLE | (src_w << 16) | src_h;
|
||||
|
||||
linear_offset = y * fb->pitches[0] + x * cpp;
|
||||
dvssurf_offset = intel_compute_tile_offset(dev_priv, &x, &y,
|
||||
fb->modifier[0], cpp,
|
||||
fb->pitches[0]);
|
||||
dvssurf_offset = intel_compute_tile_offset(&x, &y, fb, 0,
|
||||
fb->pitches[0], rotation);
|
||||
linear_offset -= dvssurf_offset;
|
||||
|
||||
if (plane_state->base.rotation == BIT(DRM_ROTATE_180)) {
|
||||
if (rotation == BIT(DRM_ROTATE_180)) {
|
||||
dvscntr |= DVS_ROTATE_180;
|
||||
|
||||
x += src_w;
|
||||
|
@ -326,24 +326,12 @@ static const struct color_conversion sdtv_csc_yprpb = {
|
||||
.rv = 0x0100, .gv = 0x03ad, .bv = 0x074d, .av = 0x0200,
|
||||
};
|
||||
|
||||
static const struct color_conversion sdtv_csc_rgb = {
|
||||
.ry = 0x0000, .gy = 0x0f00, .by = 0x0000, .ay = 0x0166,
|
||||
.ru = 0x0000, .gu = 0x0000, .bu = 0x0f00, .au = 0x0166,
|
||||
.rv = 0x0f00, .gv = 0x0000, .bv = 0x0000, .av = 0x0166,
|
||||
};
|
||||
|
||||
static const struct color_conversion hdtv_csc_yprpb = {
|
||||
.ry = 0x05b3, .gy = 0x016e, .by = 0x0728, .ay = 0x0145,
|
||||
.ru = 0x07d5, .gu = 0x038b, .bu = 0x0100, .au = 0x0200,
|
||||
.rv = 0x0100, .gv = 0x03d1, .bv = 0x06bc, .av = 0x0200,
|
||||
};
|
||||
|
||||
static const struct color_conversion hdtv_csc_rgb = {
|
||||
.ry = 0x0000, .gy = 0x0f00, .by = 0x0000, .ay = 0x0166,
|
||||
.ru = 0x0000, .gu = 0x0000, .bu = 0x0f00, .au = 0x0166,
|
||||
.rv = 0x0f00, .gv = 0x0000, .bv = 0x0000, .av = 0x0166,
|
||||
};
|
||||
|
||||
static const struct video_levels component_levels = {
|
||||
.blank = 279, .black = 279, .burst = 0,
|
||||
};
|
||||
@ -1531,47 +1519,6 @@ static const struct drm_encoder_funcs intel_tv_enc_funcs = {
|
||||
.destroy = intel_encoder_destroy,
|
||||
};
|
||||
|
||||
/*
|
||||
* Enumerate the child dev array parsed from VBT to check whether
|
||||
* the integrated TV is present.
|
||||
* If it is present, return 1.
|
||||
* If it is not present, return false.
|
||||
* If no child dev is parsed from VBT, it assumes that the TV is present.
|
||||
*/
|
||||
static int tv_is_present_in_vbt(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
union child_device_config *p_child;
|
||||
int i, ret;
|
||||
|
||||
if (!dev_priv->vbt.child_dev_num)
|
||||
return 1;
|
||||
|
||||
ret = 0;
|
||||
for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
|
||||
p_child = dev_priv->vbt.child_dev + i;
|
||||
/*
|
||||
* If the device type is not TV, continue.
|
||||
*/
|
||||
switch (p_child->old.device_type) {
|
||||
case DEVICE_TYPE_INT_TV:
|
||||
case DEVICE_TYPE_TV:
|
||||
case DEVICE_TYPE_TV_SVIDEO_COMPOSITE:
|
||||
break;
|
||||
default:
|
||||
continue;
|
||||
}
|
||||
/* Only when the addin_offset is non-zero, it is regarded
|
||||
* as present.
|
||||
*/
|
||||
if (p_child->old.addin_offset) {
|
||||
ret = 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
void
|
||||
intel_tv_init(struct drm_device *dev)
|
||||
{
|
||||
@ -1587,13 +1534,10 @@ intel_tv_init(struct drm_device *dev)
|
||||
if ((I915_READ(TV_CTL) & TV_FUSE_STATE_MASK) == TV_FUSE_STATE_DISABLED)
|
||||
return;
|
||||
|
||||
if (!tv_is_present_in_vbt(dev)) {
|
||||
if (!intel_bios_is_tv_present(dev_priv)) {
|
||||
DRM_DEBUG_KMS("Integrated TV is not present.\n");
|
||||
return;
|
||||
}
|
||||
/* Even if we have an encoder we may not have a connector */
|
||||
if (!dev_priv->vbt.int_tv_support)
|
||||
return;
|
||||
|
||||
/*
|
||||
* Sanity check the TV output by checking to see if the
|
||||
|
@ -1433,7 +1433,7 @@ static int i915_reset_complete(struct drm_device *dev)
|
||||
return (gdrst & GRDOM_RESET_STATUS) == 0;
|
||||
}
|
||||
|
||||
static int i915_do_reset(struct drm_device *dev)
|
||||
static int i915_do_reset(struct drm_device *dev, unsigned engine_mask)
|
||||
{
|
||||
/* assert reset for at least 20 usec */
|
||||
pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE);
|
||||
@ -1450,13 +1450,13 @@ static int g4x_reset_complete(struct drm_device *dev)
|
||||
return (gdrst & GRDOM_RESET_ENABLE) == 0;
|
||||
}
|
||||
|
||||
static int g33_do_reset(struct drm_device *dev)
|
||||
static int g33_do_reset(struct drm_device *dev, unsigned engine_mask)
|
||||
{
|
||||
pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE);
|
||||
return wait_for(g4x_reset_complete(dev), 500);
|
||||
}
|
||||
|
||||
static int g4x_do_reset(struct drm_device *dev)
|
||||
static int g4x_do_reset(struct drm_device *dev, unsigned engine_mask)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int ret;
|
||||
@ -1486,7 +1486,7 @@ static int g4x_do_reset(struct drm_device *dev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ironlake_do_reset(struct drm_device *dev)
|
||||
static int ironlake_do_reset(struct drm_device *dev, unsigned engine_mask)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int ret;
|
||||
@ -1510,75 +1510,132 @@ static int ironlake_do_reset(struct drm_device *dev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gen6_do_reset(struct drm_device *dev)
|
||||
/* Reset the hardware domains (GENX_GRDOM_*) specified by mask */
|
||||
static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv,
|
||||
u32 hw_domain_mask)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int ret;
|
||||
|
||||
/* Reset the chip */
|
||||
int ret;
|
||||
|
||||
/* GEN6_GDRST is not in the gt power well, no need to check
|
||||
* for fifo space for the write or forcewake the chip for
|
||||
* the read
|
||||
*/
|
||||
__raw_i915_write32(dev_priv, GEN6_GDRST, GEN6_GRDOM_FULL);
|
||||
__raw_i915_write32(dev_priv, GEN6_GDRST, hw_domain_mask);
|
||||
|
||||
/* Spin waiting for the device to ack the reset request */
|
||||
ret = wait_for((__raw_i915_read32(dev_priv, GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);
|
||||
#define ACKED ((__raw_i915_read32(dev_priv, GEN6_GDRST) & hw_domain_mask) == 0)
|
||||
/* Spin waiting for the device to ack the reset requests */
|
||||
ret = wait_for(ACKED, 500);
|
||||
#undef ACKED
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* gen6_reset_engines - reset individual engines
|
||||
* @dev: DRM device
|
||||
* @engine_mask: mask of intel_ring_flag() engines or ALL_ENGINES for full reset
|
||||
*
|
||||
* This function will reset the individual engines that are set in engine_mask.
|
||||
* If you provide ALL_ENGINES as mask, full global domain reset will be issued.
|
||||
*
|
||||
* Note: It is responsibility of the caller to handle the difference between
|
||||
* asking full domain reset versus reset for all available individual engines.
|
||||
*
|
||||
* Returns 0 on success, nonzero on error.
|
||||
*/
|
||||
static int gen6_reset_engines(struct drm_device *dev, unsigned engine_mask)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_engine_cs *engine;
|
||||
const u32 hw_engine_mask[I915_NUM_ENGINES] = {
|
||||
[RCS] = GEN6_GRDOM_RENDER,
|
||||
[BCS] = GEN6_GRDOM_BLT,
|
||||
[VCS] = GEN6_GRDOM_MEDIA,
|
||||
[VCS2] = GEN8_GRDOM_MEDIA2,
|
||||
[VECS] = GEN6_GRDOM_VECS,
|
||||
};
|
||||
u32 hw_mask;
|
||||
int ret;
|
||||
|
||||
if (engine_mask == ALL_ENGINES) {
|
||||
hw_mask = GEN6_GRDOM_FULL;
|
||||
} else {
|
||||
hw_mask = 0;
|
||||
for_each_engine_masked(engine, dev_priv, engine_mask)
|
||||
hw_mask |= hw_engine_mask[engine->id];
|
||||
}
|
||||
|
||||
ret = gen6_hw_domain_reset(dev_priv, hw_mask);
|
||||
|
||||
intel_uncore_forcewake_reset(dev, true);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int wait_for_register(struct drm_i915_private *dev_priv,
|
||||
i915_reg_t reg,
|
||||
const u32 mask,
|
||||
const u32 value,
|
||||
const unsigned long timeout_ms)
|
||||
static int wait_for_register_fw(struct drm_i915_private *dev_priv,
|
||||
i915_reg_t reg,
|
||||
const u32 mask,
|
||||
const u32 value,
|
||||
const unsigned long timeout_ms)
|
||||
{
|
||||
return wait_for((I915_READ(reg) & mask) == value, timeout_ms);
|
||||
return wait_for((I915_READ_FW(reg) & mask) == value, timeout_ms);
|
||||
}
|
||||
|
||||
static int gen8_do_reset(struct drm_device *dev)
|
||||
static int gen8_request_engine_reset(struct intel_engine_cs *engine)
|
||||
{
|
||||
int ret;
|
||||
struct drm_i915_private *dev_priv = engine->dev->dev_private;
|
||||
|
||||
I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
|
||||
_MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET));
|
||||
|
||||
ret = wait_for_register_fw(dev_priv,
|
||||
RING_RESET_CTL(engine->mmio_base),
|
||||
RESET_CTL_READY_TO_RESET,
|
||||
RESET_CTL_READY_TO_RESET,
|
||||
700);
|
||||
if (ret)
|
||||
DRM_ERROR("%s: reset request timeout\n", engine->name);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void gen8_unrequest_engine_reset(struct intel_engine_cs *engine)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = engine->dev->dev_private;
|
||||
|
||||
I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
|
||||
_MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
|
||||
}
|
||||
|
||||
static int gen8_reset_engines(struct drm_device *dev, unsigned engine_mask)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_engine_cs *engine;
|
||||
int i;
|
||||
|
||||
for_each_ring(engine, dev_priv, i) {
|
||||
I915_WRITE(RING_RESET_CTL(engine->mmio_base),
|
||||
_MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET));
|
||||
|
||||
if (wait_for_register(dev_priv,
|
||||
RING_RESET_CTL(engine->mmio_base),
|
||||
RESET_CTL_READY_TO_RESET,
|
||||
RESET_CTL_READY_TO_RESET,
|
||||
700)) {
|
||||
DRM_ERROR("%s: reset request timeout\n", engine->name);
|
||||
for_each_engine_masked(engine, dev_priv, engine_mask)
|
||||
if (gen8_request_engine_reset(engine))
|
||||
goto not_ready;
|
||||
}
|
||||
}
|
||||
|
||||
return gen6_do_reset(dev);
|
||||
return gen6_reset_engines(dev, engine_mask);
|
||||
|
||||
not_ready:
|
||||
for_each_ring(engine, dev_priv, i)
|
||||
I915_WRITE(RING_RESET_CTL(engine->mmio_base),
|
||||
_MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
|
||||
for_each_engine_masked(engine, dev_priv, engine_mask)
|
||||
gen8_unrequest_engine_reset(engine);
|
||||
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
static int (*intel_get_gpu_reset(struct drm_device *dev))(struct drm_device *)
|
||||
static int (*intel_get_gpu_reset(struct drm_device *dev))(struct drm_device *,
|
||||
unsigned engine_mask)
|
||||
{
|
||||
if (!i915.reset)
|
||||
return NULL;
|
||||
|
||||
if (INTEL_INFO(dev)->gen >= 8)
|
||||
return gen8_do_reset;
|
||||
return gen8_reset_engines;
|
||||
else if (INTEL_INFO(dev)->gen >= 6)
|
||||
return gen6_do_reset;
|
||||
return gen6_reset_engines;
|
||||
else if (IS_GEN5(dev))
|
||||
return ironlake_do_reset;
|
||||
else if (IS_G4X(dev))
|
||||
@ -1591,10 +1648,10 @@ static int (*intel_get_gpu_reset(struct drm_device *dev))(struct drm_device *)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int intel_gpu_reset(struct drm_device *dev)
|
||||
int intel_gpu_reset(struct drm_device *dev, unsigned engine_mask)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
int (*reset)(struct drm_device *);
|
||||
int (*reset)(struct drm_device *, unsigned);
|
||||
int ret;
|
||||
|
||||
reset = intel_get_gpu_reset(dev);
|
||||
@ -1605,7 +1662,7 @@ int intel_gpu_reset(struct drm_device *dev)
|
||||
* request may be dropped and never completes (causing -EIO).
|
||||
*/
|
||||
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
|
||||
ret = reset(dev);
|
||||
ret = reset(dev, engine_mask);
|
||||
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
|
||||
|
||||
return ret;
|
||||
|
826
drivers/gpu/drm/i915/intel_vbt_defs.h
Normal file
826
drivers/gpu/drm/i915/intel_vbt_defs.h
Normal file
@ -0,0 +1,826 @@
|
||||
/*
|
||||
* Copyright © 2006-2016 Intel Corporation
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*
|
||||
* Authors:
|
||||
* Eric Anholt <eric@anholt.net>
|
||||
*
|
||||
*/
|
||||
|
||||
/*
|
||||
* This information is private to VBT parsing in intel_bios.c.
|
||||
*
|
||||
* Please do NOT include anywhere else.
|
||||
*/
|
||||
#ifndef _INTEL_BIOS_PRIVATE
|
||||
#error "intel_vbt_defs.h is private to intel_bios.c"
|
||||
#endif
|
||||
|
||||
#ifndef _INTEL_VBT_DEFS_H_
|
||||
#define _INTEL_VBT_DEFS_H_
|
||||
|
||||
#include "intel_bios.h"
|
||||
|
||||
/**
|
||||
* struct vbt_header - VBT Header structure
|
||||
* @signature: VBT signature, always starts with "$VBT"
|
||||
* @version: Version of this structure
|
||||
* @header_size: Size of this structure
|
||||
* @vbt_size: Size of VBT (VBT Header, BDB Header and data blocks)
|
||||
* @vbt_checksum: Checksum
|
||||
* @reserved0: Reserved
|
||||
* @bdb_offset: Offset of &struct bdb_header from beginning of VBT
|
||||
* @aim_offset: Offsets of add-in data blocks from beginning of VBT
|
||||
*/
|
||||
struct vbt_header {
|
||||
u8 signature[20];
|
||||
u16 version;
|
||||
u16 header_size;
|
||||
u16 vbt_size;
|
||||
u8 vbt_checksum;
|
||||
u8 reserved0;
|
||||
u32 bdb_offset;
|
||||
u32 aim_offset[4];
|
||||
} __packed;
|
||||
|
||||
/**
|
||||
* struct bdb_header - BDB Header structure
|
||||
* @signature: BDB signature "BIOS_DATA_BLOCK"
|
||||
* @version: Version of the data block definitions
|
||||
* @header_size: Size of this structure
|
||||
* @bdb_size: Size of BDB (BDB Header and data blocks)
|
||||
*/
|
||||
struct bdb_header {
|
||||
u8 signature[16];
|
||||
u16 version;
|
||||
u16 header_size;
|
||||
u16 bdb_size;
|
||||
} __packed;
|
||||
|
||||
/* strictly speaking, this is a "skip" block, but it has interesting info */
|
||||
struct vbios_data {
|
||||
u8 type; /* 0 == desktop, 1 == mobile */
|
||||
u8 relstage;
|
||||
u8 chipset;
|
||||
u8 lvds_present:1;
|
||||
u8 tv_present:1;
|
||||
u8 rsvd2:6; /* finish byte */
|
||||
u8 rsvd3[4];
|
||||
u8 signon[155];
|
||||
u8 copyright[61];
|
||||
u16 code_segment;
|
||||
u8 dos_boot_mode;
|
||||
u8 bandwidth_percent;
|
||||
u8 rsvd4; /* popup memory size */
|
||||
u8 resize_pci_bios;
|
||||
u8 rsvd5; /* is crt already on ddc2 */
|
||||
} __packed;
|
||||
|
||||
/*
|
||||
* There are several types of BIOS data blocks (BDBs), each block has
|
||||
* an ID and size in the first 3 bytes (ID in first, size in next 2).
|
||||
* Known types are listed below.
|
||||
*/
|
||||
#define BDB_GENERAL_FEATURES 1
|
||||
#define BDB_GENERAL_DEFINITIONS 2
|
||||
#define BDB_OLD_TOGGLE_LIST 3
|
||||
#define BDB_MODE_SUPPORT_LIST 4
|
||||
#define BDB_GENERIC_MODE_TABLE 5
|
||||
#define BDB_EXT_MMIO_REGS 6
|
||||
#define BDB_SWF_IO 7
|
||||
#define BDB_SWF_MMIO 8
|
||||
#define BDB_PSR 9
|
||||
#define BDB_MODE_REMOVAL_TABLE 10
|
||||
#define BDB_CHILD_DEVICE_TABLE 11
|
||||
#define BDB_DRIVER_FEATURES 12
|
||||
#define BDB_DRIVER_PERSISTENCE 13
|
||||
#define BDB_EXT_TABLE_PTRS 14
|
||||
#define BDB_DOT_CLOCK_OVERRIDE 15
|
||||
#define BDB_DISPLAY_SELECT 16
|
||||
/* 17 rsvd */
|
||||
#define BDB_DRIVER_ROTATION 18
|
||||
#define BDB_DISPLAY_REMOVE 19
|
||||
#define BDB_OEM_CUSTOM 20
|
||||
#define BDB_EFP_LIST 21 /* workarounds for VGA hsync/vsync */
|
||||
#define BDB_SDVO_LVDS_OPTIONS 22
|
||||
#define BDB_SDVO_PANEL_DTDS 23
|
||||
#define BDB_SDVO_LVDS_PNP_IDS 24
|
||||
#define BDB_SDVO_LVDS_POWER_SEQ 25
|
||||
#define BDB_TV_OPTIONS 26
|
||||
#define BDB_EDP 27
|
||||
#define BDB_LVDS_OPTIONS 40
|
||||
#define BDB_LVDS_LFP_DATA_PTRS 41
|
||||
#define BDB_LVDS_LFP_DATA 42
|
||||
#define BDB_LVDS_BACKLIGHT 43
|
||||
#define BDB_LVDS_POWER 44
|
||||
#define BDB_MIPI_CONFIG 52
|
||||
#define BDB_MIPI_SEQUENCE 53
|
||||
#define BDB_SKIP 254 /* VBIOS private block, ignore */
|
||||
|
||||
struct bdb_general_features {
|
||||
/* bits 1 */
|
||||
u8 panel_fitting:2;
|
||||
u8 flexaim:1;
|
||||
u8 msg_enable:1;
|
||||
u8 clear_screen:3;
|
||||
u8 color_flip:1;
|
||||
|
||||
/* bits 2 */
|
||||
u8 download_ext_vbt:1;
|
||||
u8 enable_ssc:1;
|
||||
u8 ssc_freq:1;
|
||||
u8 enable_lfp_on_override:1;
|
||||
u8 disable_ssc_ddt:1;
|
||||
u8 rsvd7:1;
|
||||
u8 display_clock_mode:1;
|
||||
u8 rsvd8:1; /* finish byte */
|
||||
|
||||
/* bits 3 */
|
||||
u8 disable_smooth_vision:1;
|
||||
u8 single_dvi:1;
|
||||
u8 rsvd9:1;
|
||||
u8 fdi_rx_polarity_inverted:1;
|
||||
u8 rsvd10:4; /* finish byte */
|
||||
|
||||
/* bits 4 */
|
||||
u8 legacy_monitor_detect;
|
||||
|
||||
/* bits 5 */
|
||||
u8 int_crt_support:1;
|
||||
u8 int_tv_support:1;
|
||||
u8 int_efp_support:1;
|
||||
u8 dp_ssc_enb:1; /* PCH attached eDP supports SSC */
|
||||
u8 dp_ssc_freq:1; /* SSC freq for PCH attached eDP */
|
||||
u8 rsvd11:3; /* finish byte */
|
||||
} __packed;
|
||||
|
||||
/* pre-915 */
|
||||
#define GPIO_PIN_DVI_LVDS 0x03 /* "DVI/LVDS DDC GPIO pins" */
|
||||
#define GPIO_PIN_ADD_I2C 0x05 /* "ADDCARD I2C GPIO pins" */
|
||||
#define GPIO_PIN_ADD_DDC 0x04 /* "ADDCARD DDC GPIO pins" */
|
||||
#define GPIO_PIN_ADD_DDC_I2C 0x06 /* "ADDCARD DDC/I2C GPIO pins" */
|
||||
|
||||
/* Pre 915 */
|
||||
#define DEVICE_TYPE_NONE 0x00
|
||||
#define DEVICE_TYPE_CRT 0x01
|
||||
#define DEVICE_TYPE_TV 0x09
|
||||
#define DEVICE_TYPE_EFP 0x12
|
||||
#define DEVICE_TYPE_LFP 0x22
|
||||
/* On 915+ */
|
||||
#define DEVICE_TYPE_CRT_DPMS 0x6001
|
||||
#define DEVICE_TYPE_CRT_DPMS_HOTPLUG 0x4001
|
||||
#define DEVICE_TYPE_TV_COMPOSITE 0x0209
|
||||
#define DEVICE_TYPE_TV_MACROVISION 0x0289
|
||||
#define DEVICE_TYPE_TV_RF_COMPOSITE 0x020c
|
||||
#define DEVICE_TYPE_TV_SVIDEO_COMPOSITE 0x0609
|
||||
#define DEVICE_TYPE_TV_SCART 0x0209
|
||||
#define DEVICE_TYPE_TV_CODEC_HOTPLUG_PWR 0x6009
|
||||
#define DEVICE_TYPE_EFP_HOTPLUG_PWR 0x6012
|
||||
#define DEVICE_TYPE_EFP_DVI_HOTPLUG_PWR 0x6052
|
||||
#define DEVICE_TYPE_EFP_DVI_I 0x6053
|
||||
#define DEVICE_TYPE_EFP_DVI_D_DUAL 0x6152
|
||||
#define DEVICE_TYPE_EFP_DVI_D_HDCP 0x60d2
|
||||
#define DEVICE_TYPE_OPENLDI_HOTPLUG_PWR 0x6062
|
||||
#define DEVICE_TYPE_OPENLDI_DUALPIX 0x6162
|
||||
#define DEVICE_TYPE_LFP_PANELLINK 0x5012
|
||||
#define DEVICE_TYPE_LFP_CMOS_PWR 0x5042
|
||||
#define DEVICE_TYPE_LFP_LVDS_PWR 0x5062
|
||||
#define DEVICE_TYPE_LFP_LVDS_DUAL 0x5162
|
||||
#define DEVICE_TYPE_LFP_LVDS_DUAL_HDCP 0x51e2
|
||||
|
||||
#define DEVICE_CFG_NONE 0x00
|
||||
#define DEVICE_CFG_12BIT_DVOB 0x01
|
||||
#define DEVICE_CFG_12BIT_DVOC 0x02
|
||||
#define DEVICE_CFG_24BIT_DVOBC 0x09
|
||||
#define DEVICE_CFG_24BIT_DVOCB 0x0a
|
||||
#define DEVICE_CFG_DUAL_DVOB 0x11
|
||||
#define DEVICE_CFG_DUAL_DVOC 0x12
|
||||
#define DEVICE_CFG_DUAL_DVOBC 0x13
|
||||
#define DEVICE_CFG_DUAL_LINK_DVOBC 0x19
|
||||
#define DEVICE_CFG_DUAL_LINK_DVOCB 0x1a
|
||||
|
||||
#define DEVICE_WIRE_NONE 0x00
|
||||
#define DEVICE_WIRE_DVOB 0x01
|
||||
#define DEVICE_WIRE_DVOC 0x02
|
||||
#define DEVICE_WIRE_DVOBC 0x03
|
||||
#define DEVICE_WIRE_DVOBB 0x05
|
||||
#define DEVICE_WIRE_DVOCC 0x06
|
||||
#define DEVICE_WIRE_DVOB_MASTER 0x0d
|
||||
#define DEVICE_WIRE_DVOC_MASTER 0x0e
|
||||
|
||||
#define DEVICE_PORT_DVOA 0x00 /* none on 845+ */
|
||||
#define DEVICE_PORT_DVOB 0x01
|
||||
#define DEVICE_PORT_DVOC 0x02
|
||||
|
||||
/*
|
||||
* We used to keep this struct but without any version control. We should avoid
|
||||
* using it in the future, but it should be safe to keep using it in the old
|
||||
* code. Do not change; we rely on its size.
|
||||
*/
|
||||
struct old_child_dev_config {
|
||||
u16 handle;
|
||||
u16 device_type;
|
||||
u8 device_id[10]; /* ascii string */
|
||||
u16 addin_offset;
|
||||
u8 dvo_port; /* See Device_PORT_* above */
|
||||
u8 i2c_pin;
|
||||
u8 slave_addr;
|
||||
u8 ddc_pin;
|
||||
u16 edid_ptr;
|
||||
u8 dvo_cfg; /* See DEVICE_CFG_* above */
|
||||
u8 dvo2_port;
|
||||
u8 i2c2_pin;
|
||||
u8 slave2_addr;
|
||||
u8 ddc2_pin;
|
||||
u8 capabilities;
|
||||
u8 dvo_wiring;/* See DEVICE_WIRE_* above */
|
||||
u8 dvo2_wiring;
|
||||
u16 extended_type;
|
||||
u8 dvo_function;
|
||||
} __packed;
|
||||
|
||||
/* This one contains field offsets that are known to be common for all BDB
|
||||
* versions. Notice that the meaning of the contents contents may still change,
|
||||
* but at least the offsets are consistent. */
|
||||
|
||||
/* Definitions for flags_1 */
|
||||
#define IBOOST_ENABLE (1<<3)
|
||||
|
||||
struct common_child_dev_config {
|
||||
u16 handle;
|
||||
u16 device_type;
|
||||
u8 not_common1[12];
|
||||
u8 dvo_port;
|
||||
u8 not_common2[2];
|
||||
u8 ddc_pin;
|
||||
u16 edid_ptr;
|
||||
u8 obsolete;
|
||||
u8 flags_1;
|
||||
u8 not_common3[13];
|
||||
u8 iboost_level;
|
||||
} __packed;
|
||||
|
||||
|
||||
/* This field changes depending on the BDB version, so the most reliable way to
|
||||
* read it is by checking the BDB version and reading the raw pointer. */
|
||||
union child_device_config {
|
||||
/* This one is safe to be used anywhere, but the code should still check
|
||||
* the BDB version. */
|
||||
u8 raw[33];
|
||||
/* This one should only be kept for legacy code. */
|
||||
struct old_child_dev_config old;
|
||||
/* This one should also be safe to use anywhere, even without version
|
||||
* checks. */
|
||||
struct common_child_dev_config common;
|
||||
} __packed;
|
||||
|
||||
struct bdb_general_definitions {
|
||||
/* DDC GPIO */
|
||||
u8 crt_ddc_gmbus_pin;
|
||||
|
||||
/* DPMS bits */
|
||||
u8 dpms_acpi:1;
|
||||
u8 skip_boot_crt_detect:1;
|
||||
u8 dpms_aim:1;
|
||||
u8 rsvd1:5; /* finish byte */
|
||||
|
||||
/* boot device bits */
|
||||
u8 boot_display[2];
|
||||
u8 child_dev_size;
|
||||
|
||||
/*
|
||||
* Device info:
|
||||
* If TV is present, it'll be at devices[0].
|
||||
* LVDS will be next, either devices[0] or [1], if present.
|
||||
* On some platforms the number of device is 6. But could be as few as
|
||||
* 4 if both TV and LVDS are missing.
|
||||
* And the device num is related with the size of general definition
|
||||
* block. It is obtained by using the following formula:
|
||||
* number = (block_size - sizeof(bdb_general_definitions))/
|
||||
* defs->child_dev_size;
|
||||
*/
|
||||
uint8_t devices[0];
|
||||
} __packed;
|
||||
|
||||
/* Mask for DRRS / Panel Channel / SSC / BLT control bits extraction */
|
||||
#define MODE_MASK 0x3
|
||||
|
||||
struct bdb_lvds_options {
|
||||
u8 panel_type;
|
||||
u8 rsvd1;
|
||||
/* LVDS capabilities, stored in a dword */
|
||||
u8 pfit_mode:2;
|
||||
u8 pfit_text_mode_enhanced:1;
|
||||
u8 pfit_gfx_mode_enhanced:1;
|
||||
u8 pfit_ratio_auto:1;
|
||||
u8 pixel_dither:1;
|
||||
u8 lvds_edid:1;
|
||||
u8 rsvd2:1;
|
||||
u8 rsvd4;
|
||||
/* LVDS Panel channel bits stored here */
|
||||
u32 lvds_panel_channel_bits;
|
||||
/* LVDS SSC (Spread Spectrum Clock) bits stored here. */
|
||||
u16 ssc_bits;
|
||||
u16 ssc_freq;
|
||||
u16 ssc_ddt;
|
||||
/* Panel color depth defined here */
|
||||
u16 panel_color_depth;
|
||||
/* LVDS panel type bits stored here */
|
||||
u32 dps_panel_type_bits;
|
||||
/* LVDS backlight control type bits stored here */
|
||||
u32 blt_control_type_bits;
|
||||
} __packed;
|
||||
|
||||
/* LFP pointer table contains entries to the struct below */
|
||||
struct bdb_lvds_lfp_data_ptr {
|
||||
u16 fp_timing_offset; /* offsets are from start of bdb */
|
||||
u8 fp_table_size;
|
||||
u16 dvo_timing_offset;
|
||||
u8 dvo_table_size;
|
||||
u16 panel_pnp_id_offset;
|
||||
u8 pnp_table_size;
|
||||
} __packed;
|
||||
|
||||
struct bdb_lvds_lfp_data_ptrs {
|
||||
u8 lvds_entries; /* followed by one or more lvds_data_ptr structs */
|
||||
struct bdb_lvds_lfp_data_ptr ptr[16];
|
||||
} __packed;
|
||||
|
||||
/* LFP data has 3 blocks per entry */
|
||||
struct lvds_fp_timing {
|
||||
u16 x_res;
|
||||
u16 y_res;
|
||||
u32 lvds_reg;
|
||||
u32 lvds_reg_val;
|
||||
u32 pp_on_reg;
|
||||
u32 pp_on_reg_val;
|
||||
u32 pp_off_reg;
|
||||
u32 pp_off_reg_val;
|
||||
u32 pp_cycle_reg;
|
||||
u32 pp_cycle_reg_val;
|
||||
u32 pfit_reg;
|
||||
u32 pfit_reg_val;
|
||||
u16 terminator;
|
||||
} __packed;
|
||||
|
||||
struct lvds_dvo_timing {
|
||||
u16 clock; /**< In 10khz */
|
||||
u8 hactive_lo;
|
||||
u8 hblank_lo;
|
||||
u8 hblank_hi:4;
|
||||
u8 hactive_hi:4;
|
||||
u8 vactive_lo;
|
||||
u8 vblank_lo;
|
||||
u8 vblank_hi:4;
|
||||
u8 vactive_hi:4;
|
||||
u8 hsync_off_lo;
|
||||
u8 hsync_pulse_width;
|
||||
u8 vsync_pulse_width:4;
|
||||
u8 vsync_off:4;
|
||||
u8 rsvd0:6;
|
||||
u8 hsync_off_hi:2;
|
||||
u8 h_image;
|
||||
u8 v_image;
|
||||
u8 max_hv;
|
||||
u8 h_border;
|
||||
u8 v_border;
|
||||
u8 rsvd1:3;
|
||||
u8 digital:2;
|
||||
u8 vsync_positive:1;
|
||||
u8 hsync_positive:1;
|
||||
u8 rsvd2:1;
|
||||
} __packed;
|
||||
|
||||
struct lvds_pnp_id {
|
||||
u16 mfg_name;
|
||||
u16 product_code;
|
||||
u32 serial;
|
||||
u8 mfg_week;
|
||||
u8 mfg_year;
|
||||
} __packed;
|
||||
|
||||
struct bdb_lvds_lfp_data_entry {
|
||||
struct lvds_fp_timing fp_timing;
|
||||
struct lvds_dvo_timing dvo_timing;
|
||||
struct lvds_pnp_id pnp_id;
|
||||
} __packed;
|
||||
|
||||
struct bdb_lvds_lfp_data {
|
||||
struct bdb_lvds_lfp_data_entry data[16];
|
||||
} __packed;
|
||||
|
||||
#define BDB_BACKLIGHT_TYPE_NONE 0
|
||||
#define BDB_BACKLIGHT_TYPE_PWM 2
|
||||
|
||||
struct bdb_lfp_backlight_data_entry {
|
||||
u8 type:2;
|
||||
u8 active_low_pwm:1;
|
||||
u8 obsolete1:5;
|
||||
u16 pwm_freq_hz;
|
||||
u8 min_brightness;
|
||||
u8 obsolete2;
|
||||
u8 obsolete3;
|
||||
} __packed;
|
||||
|
||||
struct bdb_lfp_backlight_data {
|
||||
u8 entry_size;
|
||||
struct bdb_lfp_backlight_data_entry data[16];
|
||||
u8 level[16];
|
||||
} __packed;
|
||||
|
||||
struct aimdb_header {
|
||||
char signature[16];
|
||||
char oem_device[20];
|
||||
u16 aimdb_version;
|
||||
u16 aimdb_header_size;
|
||||
u16 aimdb_size;
|
||||
} __packed;
|
||||
|
||||
struct aimdb_block {
|
||||
u8 aimdb_id;
|
||||
u16 aimdb_size;
|
||||
} __packed;
|
||||
|
||||
struct vch_panel_data {
|
||||
u16 fp_timing_offset;
|
||||
u8 fp_timing_size;
|
||||
u16 dvo_timing_offset;
|
||||
u8 dvo_timing_size;
|
||||
u16 text_fitting_offset;
|
||||
u8 text_fitting_size;
|
||||
u16 graphics_fitting_offset;
|
||||
u8 graphics_fitting_size;
|
||||
} __packed;
|
||||
|
||||
struct vch_bdb_22 {
|
||||
struct aimdb_block aimdb_block;
|
||||
struct vch_panel_data panels[16];
|
||||
} __packed;
|
||||
|
||||
struct bdb_sdvo_lvds_options {
|
||||
u8 panel_backlight;
|
||||
u8 h40_set_panel_type;
|
||||
u8 panel_type;
|
||||
u8 ssc_clk_freq;
|
||||
u16 als_low_trip;
|
||||
u16 als_high_trip;
|
||||
u8 sclalarcoeff_tab_row_num;
|
||||
u8 sclalarcoeff_tab_row_size;
|
||||
u8 coefficient[8];
|
||||
u8 panel_misc_bits_1;
|
||||
u8 panel_misc_bits_2;
|
||||
u8 panel_misc_bits_3;
|
||||
u8 panel_misc_bits_4;
|
||||
} __packed;
|
||||
|
||||
|
||||
#define BDB_DRIVER_FEATURE_NO_LVDS 0
|
||||
#define BDB_DRIVER_FEATURE_INT_LVDS 1
|
||||
#define BDB_DRIVER_FEATURE_SDVO_LVDS 2
|
||||
#define BDB_DRIVER_FEATURE_EDP 3
|
||||
|
||||
struct bdb_driver_features {
|
||||
u8 boot_dev_algorithm:1;
|
||||
u8 block_display_switch:1;
|
||||
u8 allow_display_switch:1;
|
||||
u8 hotplug_dvo:1;
|
||||
u8 dual_view_zoom:1;
|
||||
u8 int15h_hook:1;
|
||||
u8 sprite_in_clone:1;
|
||||
u8 primary_lfp_id:1;
|
||||
|
||||
u16 boot_mode_x;
|
||||
u16 boot_mode_y;
|
||||
u8 boot_mode_bpp;
|
||||
u8 boot_mode_refresh;
|
||||
|
||||
u16 enable_lfp_primary:1;
|
||||
u16 selective_mode_pruning:1;
|
||||
u16 dual_frequency:1;
|
||||
u16 render_clock_freq:1; /* 0: high freq; 1: low freq */
|
||||
u16 nt_clone_support:1;
|
||||
u16 power_scheme_ui:1; /* 0: CUI; 1: 3rd party */
|
||||
u16 sprite_display_assign:1; /* 0: secondary; 1: primary */
|
||||
u16 cui_aspect_scaling:1;
|
||||
u16 preserve_aspect_ratio:1;
|
||||
u16 sdvo_device_power_down:1;
|
||||
u16 crt_hotplug:1;
|
||||
u16 lvds_config:2;
|
||||
u16 tv_hotplug:1;
|
||||
u16 hdmi_config:2;
|
||||
|
||||
u8 static_display:1;
|
||||
u8 reserved2:7;
|
||||
u16 legacy_crt_max_x;
|
||||
u16 legacy_crt_max_y;
|
||||
u8 legacy_crt_max_refresh;
|
||||
|
||||
u8 hdmi_termination;
|
||||
u8 custom_vbt_version;
|
||||
/* Driver features data block */
|
||||
u16 rmpm_enabled:1;
|
||||
u16 s2ddt_enabled:1;
|
||||
u16 dpst_enabled:1;
|
||||
u16 bltclt_enabled:1;
|
||||
u16 adb_enabled:1;
|
||||
u16 drrs_enabled:1;
|
||||
u16 grs_enabled:1;
|
||||
u16 gpmt_enabled:1;
|
||||
u16 tbt_enabled:1;
|
||||
u16 psr_enabled:1;
|
||||
u16 ips_enabled:1;
|
||||
u16 reserved3:4;
|
||||
u16 pc_feature_valid:1;
|
||||
} __packed;
|
||||
|
||||
#define EDP_18BPP 0
|
||||
#define EDP_24BPP 1
|
||||
#define EDP_30BPP 2
|
||||
#define EDP_RATE_1_62 0
|
||||
#define EDP_RATE_2_7 1
|
||||
#define EDP_LANE_1 0
|
||||
#define EDP_LANE_2 1
|
||||
#define EDP_LANE_4 3
|
||||
#define EDP_PREEMPHASIS_NONE 0
|
||||
#define EDP_PREEMPHASIS_3_5dB 1
|
||||
#define EDP_PREEMPHASIS_6dB 2
|
||||
#define EDP_PREEMPHASIS_9_5dB 3
|
||||
#define EDP_VSWING_0_4V 0
|
||||
#define EDP_VSWING_0_6V 1
|
||||
#define EDP_VSWING_0_8V 2
|
||||
#define EDP_VSWING_1_2V 3
|
||||
|
||||
|
||||
struct edp_link_params {
|
||||
u8 rate:4;
|
||||
u8 lanes:4;
|
||||
u8 preemphasis:4;
|
||||
u8 vswing:4;
|
||||
} __packed;
|
||||
|
||||
struct bdb_edp {
|
||||
struct edp_power_seq power_seqs[16];
|
||||
u32 color_depth;
|
||||
struct edp_link_params link_params[16];
|
||||
u32 sdrrs_msa_timing_delay;
|
||||
|
||||
/* ith bit indicates enabled/disabled for (i+1)th panel */
|
||||
u16 edp_s3d_feature;
|
||||
u16 edp_t3_optimization;
|
||||
u64 edp_vswing_preemph; /* v173 */
|
||||
} __packed;
|
||||
|
||||
struct psr_table {
|
||||
/* Feature bits */
|
||||
u8 full_link:1;
|
||||
u8 require_aux_to_wakeup:1;
|
||||
u8 feature_bits_rsvd:6;
|
||||
|
||||
/* Wait times */
|
||||
u8 idle_frames:4;
|
||||
u8 lines_to_wait:3;
|
||||
u8 wait_times_rsvd:1;
|
||||
|
||||
/* TP wake up time in multiple of 100 */
|
||||
u16 tp1_wakeup_time;
|
||||
u16 tp2_tp3_wakeup_time;
|
||||
} __packed;
|
||||
|
||||
struct bdb_psr {
|
||||
struct psr_table psr_table[16];
|
||||
} __packed;
|
||||
|
||||
/*
|
||||
* Driver<->VBIOS interaction occurs through scratch bits in
|
||||
* GR18 & SWF*.
|
||||
*/
|
||||
|
||||
/* GR18 bits are set on display switch and hotkey events */
|
||||
#define GR18_DRIVER_SWITCH_EN (1<<7) /* 0: VBIOS control, 1: driver control */
|
||||
#define GR18_HOTKEY_MASK 0x78 /* See also SWF4 15:0 */
|
||||
#define GR18_HK_NONE (0x0<<3)
|
||||
#define GR18_HK_LFP_STRETCH (0x1<<3)
|
||||
#define GR18_HK_TOGGLE_DISP (0x2<<3)
|
||||
#define GR18_HK_DISP_SWITCH (0x4<<3) /* see SWF14 15:0 for what to enable */
|
||||
#define GR18_HK_POPUP_DISABLED (0x6<<3)
|
||||
#define GR18_HK_POPUP_ENABLED (0x7<<3)
|
||||
#define GR18_HK_PFIT (0x8<<3)
|
||||
#define GR18_HK_APM_CHANGE (0xa<<3)
|
||||
#define GR18_HK_MULTIPLE (0xc<<3)
|
||||
#define GR18_USER_INT_EN (1<<2)
|
||||
#define GR18_A0000_FLUSH_EN (1<<1)
|
||||
#define GR18_SMM_EN (1<<0)
|
||||
|
||||
/* Set by driver, cleared by VBIOS */
|
||||
#define SWF00_YRES_SHIFT 16
|
||||
#define SWF00_XRES_SHIFT 0
|
||||
#define SWF00_RES_MASK 0xffff
|
||||
|
||||
/* Set by VBIOS at boot time and driver at runtime */
|
||||
#define SWF01_TV2_FORMAT_SHIFT 8
|
||||
#define SWF01_TV1_FORMAT_SHIFT 0
|
||||
#define SWF01_TV_FORMAT_MASK 0xffff
|
||||
|
||||
#define SWF10_VBIOS_BLC_I2C_EN (1<<29)
|
||||
#define SWF10_GTT_OVERRIDE_EN (1<<28)
|
||||
#define SWF10_LFP_DPMS_OVR (1<<27) /* override DPMS on display switch */
|
||||
#define SWF10_ACTIVE_TOGGLE_LIST_MASK (7<<24)
|
||||
#define SWF10_OLD_TOGGLE 0x0
|
||||
#define SWF10_TOGGLE_LIST_1 0x1
|
||||
#define SWF10_TOGGLE_LIST_2 0x2
|
||||
#define SWF10_TOGGLE_LIST_3 0x3
|
||||
#define SWF10_TOGGLE_LIST_4 0x4
|
||||
#define SWF10_PANNING_EN (1<<23)
|
||||
#define SWF10_DRIVER_LOADED (1<<22)
|
||||
#define SWF10_EXTENDED_DESKTOP (1<<21)
|
||||
#define SWF10_EXCLUSIVE_MODE (1<<20)
|
||||
#define SWF10_OVERLAY_EN (1<<19)
|
||||
#define SWF10_PLANEB_HOLDOFF (1<<18)
|
||||
#define SWF10_PLANEA_HOLDOFF (1<<17)
|
||||
#define SWF10_VGA_HOLDOFF (1<<16)
|
||||
#define SWF10_ACTIVE_DISP_MASK 0xffff
|
||||
#define SWF10_PIPEB_LFP2 (1<<15)
|
||||
#define SWF10_PIPEB_EFP2 (1<<14)
|
||||
#define SWF10_PIPEB_TV2 (1<<13)
|
||||
#define SWF10_PIPEB_CRT2 (1<<12)
|
||||
#define SWF10_PIPEB_LFP (1<<11)
|
||||
#define SWF10_PIPEB_EFP (1<<10)
|
||||
#define SWF10_PIPEB_TV (1<<9)
|
||||
#define SWF10_PIPEB_CRT (1<<8)
|
||||
#define SWF10_PIPEA_LFP2 (1<<7)
|
||||
#define SWF10_PIPEA_EFP2 (1<<6)
|
||||
#define SWF10_PIPEA_TV2 (1<<5)
|
||||
#define SWF10_PIPEA_CRT2 (1<<4)
|
||||
#define SWF10_PIPEA_LFP (1<<3)
|
||||
#define SWF10_PIPEA_EFP (1<<2)
|
||||
#define SWF10_PIPEA_TV (1<<1)
|
||||
#define SWF10_PIPEA_CRT (1<<0)
|
||||
|
||||
#define SWF11_MEMORY_SIZE_SHIFT 16
|
||||
#define SWF11_SV_TEST_EN (1<<15)
|
||||
#define SWF11_IS_AGP (1<<14)
|
||||
#define SWF11_DISPLAY_HOLDOFF (1<<13)
|
||||
#define SWF11_DPMS_REDUCED (1<<12)
|
||||
#define SWF11_IS_VBE_MODE (1<<11)
|
||||
#define SWF11_PIPEB_ACCESS (1<<10) /* 0 here means pipe a */
|
||||
#define SWF11_DPMS_MASK 0x07
|
||||
#define SWF11_DPMS_OFF (1<<2)
|
||||
#define SWF11_DPMS_SUSPEND (1<<1)
|
||||
#define SWF11_DPMS_STANDBY (1<<0)
|
||||
#define SWF11_DPMS_ON 0
|
||||
|
||||
#define SWF14_GFX_PFIT_EN (1<<31)
|
||||
#define SWF14_TEXT_PFIT_EN (1<<30)
|
||||
#define SWF14_LID_STATUS_CLOSED (1<<29) /* 0 here means open */
|
||||
#define SWF14_POPUP_EN (1<<28)
|
||||
#define SWF14_DISPLAY_HOLDOFF (1<<27)
|
||||
#define SWF14_DISP_DETECT_EN (1<<26)
|
||||
#define SWF14_DOCKING_STATUS_DOCKED (1<<25) /* 0 here means undocked */
|
||||
#define SWF14_DRIVER_STATUS (1<<24)
|
||||
#define SWF14_OS_TYPE_WIN9X (1<<23)
|
||||
#define SWF14_OS_TYPE_WINNT (1<<22)
|
||||
/* 21:19 rsvd */
|
||||
#define SWF14_PM_TYPE_MASK 0x00070000
|
||||
#define SWF14_PM_ACPI_VIDEO (0x4 << 16)
|
||||
#define SWF14_PM_ACPI (0x3 << 16)
|
||||
#define SWF14_PM_APM_12 (0x2 << 16)
|
||||
#define SWF14_PM_APM_11 (0x1 << 16)
|
||||
#define SWF14_HK_REQUEST_MASK 0x0000ffff /* see GR18 6:3 for event type */
|
||||
/* if GR18 indicates a display switch */
|
||||
#define SWF14_DS_PIPEB_LFP2_EN (1<<15)
|
||||
#define SWF14_DS_PIPEB_EFP2_EN (1<<14)
|
||||
#define SWF14_DS_PIPEB_TV2_EN (1<<13)
|
||||
#define SWF14_DS_PIPEB_CRT2_EN (1<<12)
|
||||
#define SWF14_DS_PIPEB_LFP_EN (1<<11)
|
||||
#define SWF14_DS_PIPEB_EFP_EN (1<<10)
|
||||
#define SWF14_DS_PIPEB_TV_EN (1<<9)
|
||||
#define SWF14_DS_PIPEB_CRT_EN (1<<8)
|
||||
#define SWF14_DS_PIPEA_LFP2_EN (1<<7)
|
||||
#define SWF14_DS_PIPEA_EFP2_EN (1<<6)
|
||||
#define SWF14_DS_PIPEA_TV2_EN (1<<5)
|
||||
#define SWF14_DS_PIPEA_CRT2_EN (1<<4)
|
||||
#define SWF14_DS_PIPEA_LFP_EN (1<<3)
|
||||
#define SWF14_DS_PIPEA_EFP_EN (1<<2)
|
||||
#define SWF14_DS_PIPEA_TV_EN (1<<1)
|
||||
#define SWF14_DS_PIPEA_CRT_EN (1<<0)
|
||||
/* if GR18 indicates a panel fitting request */
|
||||
#define SWF14_PFIT_EN (1<<0) /* 0 means disable */
|
||||
/* if GR18 indicates an APM change request */
|
||||
#define SWF14_APM_HIBERNATE 0x4
|
||||
#define SWF14_APM_SUSPEND 0x3
|
||||
#define SWF14_APM_STANDBY 0x1
|
||||
#define SWF14_APM_RESTORE 0x0
|
||||
|
||||
/* Add the device class for LFP, TV, HDMI */
|
||||
#define DEVICE_TYPE_INT_LFP 0x1022
|
||||
#define DEVICE_TYPE_INT_TV 0x1009
|
||||
#define DEVICE_TYPE_HDMI 0x60D2
|
||||
#define DEVICE_TYPE_DP 0x68C6
|
||||
#define DEVICE_TYPE_eDP 0x78C6
|
||||
|
||||
#define DEVICE_TYPE_CLASS_EXTENSION (1 << 15)
|
||||
#define DEVICE_TYPE_POWER_MANAGEMENT (1 << 14)
|
||||
#define DEVICE_TYPE_HOTPLUG_SIGNALING (1 << 13)
|
||||
#define DEVICE_TYPE_INTERNAL_CONNECTOR (1 << 12)
|
||||
#define DEVICE_TYPE_NOT_HDMI_OUTPUT (1 << 11)
|
||||
#define DEVICE_TYPE_MIPI_OUTPUT (1 << 10)
|
||||
#define DEVICE_TYPE_COMPOSITE_OUTPUT (1 << 9)
|
||||
#define DEVICE_TYPE_DUAL_CHANNEL (1 << 8)
|
||||
#define DEVICE_TYPE_HIGH_SPEED_LINK (1 << 6)
|
||||
#define DEVICE_TYPE_LVDS_SINGALING (1 << 5)
|
||||
#define DEVICE_TYPE_TMDS_DVI_SIGNALING (1 << 4)
|
||||
#define DEVICE_TYPE_VIDEO_SIGNALING (1 << 3)
|
||||
#define DEVICE_TYPE_DISPLAYPORT_OUTPUT (1 << 2)
|
||||
#define DEVICE_TYPE_DIGITAL_OUTPUT (1 << 1)
|
||||
#define DEVICE_TYPE_ANALOG_OUTPUT (1 << 0)
|
||||
|
||||
/*
|
||||
* Bits we care about when checking for DEVICE_TYPE_eDP
|
||||
* Depending on the system, the other bits may or may not
|
||||
* be set for eDP outputs.
|
||||
*/
|
||||
#define DEVICE_TYPE_eDP_BITS \
|
||||
(DEVICE_TYPE_INTERNAL_CONNECTOR | \
|
||||
DEVICE_TYPE_MIPI_OUTPUT | \
|
||||
DEVICE_TYPE_COMPOSITE_OUTPUT | \
|
||||
DEVICE_TYPE_DUAL_CHANNEL | \
|
||||
DEVICE_TYPE_LVDS_SINGALING | \
|
||||
DEVICE_TYPE_TMDS_DVI_SIGNALING | \
|
||||
DEVICE_TYPE_VIDEO_SIGNALING | \
|
||||
DEVICE_TYPE_DISPLAYPORT_OUTPUT | \
|
||||
DEVICE_TYPE_ANALOG_OUTPUT)
|
||||
|
||||
/* define the DVO port for HDMI output type */
|
||||
#define DVO_B 1
|
||||
#define DVO_C 2
|
||||
#define DVO_D 3
|
||||
|
||||
/* Possible values for the "DVO Port" field for versions >= 155: */
|
||||
#define DVO_PORT_HDMIA 0
|
||||
#define DVO_PORT_HDMIB 1
|
||||
#define DVO_PORT_HDMIC 2
|
||||
#define DVO_PORT_HDMID 3
|
||||
#define DVO_PORT_LVDS 4
|
||||
#define DVO_PORT_TV 5
|
||||
#define DVO_PORT_CRT 6
|
||||
#define DVO_PORT_DPB 7
|
||||
#define DVO_PORT_DPC 8
|
||||
#define DVO_PORT_DPD 9
|
||||
#define DVO_PORT_DPA 10
|
||||
#define DVO_PORT_DPE 11
|
||||
#define DVO_PORT_HDMIE 12
|
||||
#define DVO_PORT_MIPIA 21
|
||||
#define DVO_PORT_MIPIB 22
|
||||
#define DVO_PORT_MIPIC 23
|
||||
#define DVO_PORT_MIPID 24
|
||||
|
||||
/* Block 52 contains MIPI configuration block
|
||||
* 6 * bdb_mipi_config, followed by 6 pps data block
|
||||
* block below
|
||||
*/
|
||||
#define MAX_MIPI_CONFIGURATIONS 6
|
||||
|
||||
struct bdb_mipi_config {
|
||||
struct mipi_config config[MAX_MIPI_CONFIGURATIONS];
|
||||
struct mipi_pps_data pps[MAX_MIPI_CONFIGURATIONS];
|
||||
} __packed;
|
||||
|
||||
/* Block 53 contains MIPI sequences as needed by the panel
|
||||
* for enabling it. This block can be variable in size and
|
||||
* can be maximum of 6 blocks
|
||||
*/
|
||||
struct bdb_mipi_sequence {
|
||||
u8 version;
|
||||
u8 data[0];
|
||||
} __packed;
|
||||
|
||||
enum mipi_gpio_pin_index {
|
||||
MIPI_GPIO_UNDEFINED = 0,
|
||||
MIPI_GPIO_PANEL_ENABLE,
|
||||
MIPI_GPIO_BL_ENABLE,
|
||||
MIPI_GPIO_PWM_ENABLE,
|
||||
MIPI_GPIO_RESET_N,
|
||||
MIPI_GPIO_PWR_DOWN_R,
|
||||
MIPI_GPIO_STDBY_RST_N,
|
||||
MIPI_GPIO_MAX
|
||||
};
|
||||
|
||||
#endif /* _INTEL_VBT_DEFS_H_ */
|
Loading…
Reference in New Issue
Block a user