forked from Minki/linux
Merge tag 'drm-intel-next-2014-09-01' of git://anongit.freedesktop.org/drm-intel into drm-next
drm-intel-next-2014-08-22: - basic code for execlist, which is the fancy new cmd submission on gen8. Still disabled by default (Ben, Oscar Mateo, Thomas Daniel et al) - remove the useless usage of console_lock for I915_FBDEV=n (Chris) - clean up relations between ctx and ppgtt - clean up ppgtt lifetime handling (Michel Thierry) - various cursor code improvements from Ville - execbuffer code cleanups and secure batch fixes (Chris) - prep work for dev -> dev_priv transition (Chris) - some of the prep patches for the seqno -> request object transition (Chris) - various small improvements all over * tag 'drm-intel-next-2014-09-01' of git://anongit.freedesktop.org/drm-intel: (86 commits) drm/i915: fix suspend/resume for GENs w/o runtime PM support drm/i915: Update DRIVER_DATE to 20140822 drm: fix plane rotation when restoring fbdev configuration drm/i915/bdw: Disable execlists by default drm/i915/bdw: Enable Logical Ring Contexts (hence, Execlists) drm/i915/bdw: Document Logical Rings, LR contexts and Execlists drm/i915/bdw: Print context state in debugfs drm/i915/bdw: Display context backing obj & ringbuffer info in debugfs drm/i915/bdw: Display execlists info in debugfs drm/i915/bdw: Disable semaphores for Execlists drm/i915/bdw: Make sure gpu reset still works with Execlists drm/i915/bdw: Don't write PDP in the legacy way when using LRCs drm/i915: Track cursor changes as frontbuffer tracking flushes drm/i915/bdw: Help out the ctx switch interrupt handler drm/i915/bdw: Avoid non-lite-restore preemptions drm/i915/bdw: Handle context switch events drm/i915/bdw: Two-stage execlist submit process drm/i915/bdw: Write the tail pointer, LRC style drm/i915/bdw: Implement context switching (somewhat) drm/i915/bdw: Emission of requests with logical rings ... Conflicts: drivers/gpu/drm/i915/i915_drv.c
This commit is contained in:
commit
a18b29f0c6
@ -3919,6 +3919,11 @@ int num_ioctls;</synopsis>
|
|||||||
!Pdrivers/gpu/drm/i915/i915_cmd_parser.c batch buffer command parser
|
!Pdrivers/gpu/drm/i915/i915_cmd_parser.c batch buffer command parser
|
||||||
!Idrivers/gpu/drm/i915/i915_cmd_parser.c
|
!Idrivers/gpu/drm/i915/i915_cmd_parser.c
|
||||||
</sect2>
|
</sect2>
|
||||||
|
<sect2>
|
||||||
|
<title>Logical Rings, Logical Ring Contexts and Execlists</title>
|
||||||
|
!Pdrivers/gpu/drm/i915/intel_lrc.c Logical Rings, Logical Ring Contexts and Execlists
|
||||||
|
!Idrivers/gpu/drm/i915/intel_lrc.c
|
||||||
|
</sect2>
|
||||||
</sect1>
|
</sect1>
|
||||||
</chapter>
|
</chapter>
|
||||||
</part>
|
</part>
|
||||||
|
@ -4175,12 +4175,25 @@ static int drm_mode_crtc_set_obj_prop(struct drm_mode_object *obj,
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int drm_mode_plane_set_obj_prop(struct drm_mode_object *obj,
|
/**
|
||||||
struct drm_property *property,
|
* drm_mode_plane_set_obj_prop - set the value of a property
|
||||||
uint64_t value)
|
* @plane: drm plane object to set property value for
|
||||||
|
* @property: property to set
|
||||||
|
* @value: value the property should be set to
|
||||||
|
*
|
||||||
|
* This functions sets a given property on a given plane object. This function
|
||||||
|
* calls the driver's ->set_property callback and changes the software state of
|
||||||
|
* the property if the callback succeeds.
|
||||||
|
*
|
||||||
|
* Returns:
|
||||||
|
* Zero on success, error code on failure.
|
||||||
|
*/
|
||||||
|
int drm_mode_plane_set_obj_prop(struct drm_plane *plane,
|
||||||
|
struct drm_property *property,
|
||||||
|
uint64_t value)
|
||||||
{
|
{
|
||||||
int ret = -EINVAL;
|
int ret = -EINVAL;
|
||||||
struct drm_plane *plane = obj_to_plane(obj);
|
struct drm_mode_object *obj = &plane->base;
|
||||||
|
|
||||||
if (plane->funcs->set_property)
|
if (plane->funcs->set_property)
|
||||||
ret = plane->funcs->set_property(plane, property, value);
|
ret = plane->funcs->set_property(plane, property, value);
|
||||||
@ -4189,6 +4202,7 @@ static int drm_mode_plane_set_obj_prop(struct drm_mode_object *obj,
|
|||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL(drm_mode_plane_set_obj_prop);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* drm_mode_getproperty_ioctl - get the current value of a object's property
|
* drm_mode_getproperty_ioctl - get the current value of a object's property
|
||||||
@ -4327,7 +4341,8 @@ int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
|
|||||||
ret = drm_mode_crtc_set_obj_prop(arg_obj, property, arg->value);
|
ret = drm_mode_crtc_set_obj_prop(arg_obj, property, arg->value);
|
||||||
break;
|
break;
|
||||||
case DRM_MODE_OBJECT_PLANE:
|
case DRM_MODE_OBJECT_PLANE:
|
||||||
ret = drm_mode_plane_set_obj_prop(arg_obj, property, arg->value);
|
ret = drm_mode_plane_set_obj_prop(obj_to_plane(arg_obj),
|
||||||
|
property, arg->value);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -296,9 +296,9 @@ static bool restore_fbdev_mode(struct drm_fb_helper *fb_helper)
|
|||||||
drm_plane_force_disable(plane);
|
drm_plane_force_disable(plane);
|
||||||
|
|
||||||
if (dev->mode_config.rotation_property) {
|
if (dev->mode_config.rotation_property) {
|
||||||
drm_object_property_set_value(&plane->base,
|
drm_mode_plane_set_obj_prop(plane,
|
||||||
dev->mode_config.rotation_property,
|
dev->mode_config.rotation_property,
|
||||||
BIT(DRM_ROTATE_0));
|
BIT(DRM_ROTATE_0));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -31,6 +31,7 @@ i915-y += i915_cmd_parser.o \
|
|||||||
i915_gpu_error.o \
|
i915_gpu_error.o \
|
||||||
i915_irq.o \
|
i915_irq.o \
|
||||||
i915_trace_points.o \
|
i915_trace_points.o \
|
||||||
|
intel_lrc.o \
|
||||||
intel_ringbuffer.o \
|
intel_ringbuffer.o \
|
||||||
intel_uncore.o
|
intel_uncore.o
|
||||||
|
|
||||||
|
@ -842,8 +842,6 @@ finish:
|
|||||||
*/
|
*/
|
||||||
bool i915_needs_cmd_parser(struct intel_engine_cs *ring)
|
bool i915_needs_cmd_parser(struct intel_engine_cs *ring)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv = ring->dev->dev_private;
|
|
||||||
|
|
||||||
if (!ring->needs_cmd_parser)
|
if (!ring->needs_cmd_parser)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
@ -852,7 +850,7 @@ bool i915_needs_cmd_parser(struct intel_engine_cs *ring)
|
|||||||
* disabled. That will cause all of the parser's PPGTT checks to
|
* disabled. That will cause all of the parser's PPGTT checks to
|
||||||
* fail. For now, disable parsing when PPGTT is off.
|
* fail. For now, disable parsing when PPGTT is off.
|
||||||
*/
|
*/
|
||||||
if (!dev_priv->mm.aliasing_ppgtt)
|
if (USES_PPGTT(ring->dev))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
return (i915.enable_cmd_parser == 1);
|
return (i915.enable_cmd_parser == 1);
|
||||||
|
@ -333,7 +333,7 @@ static int per_file_stats(int id, void *ptr, void *data)
|
|||||||
}
|
}
|
||||||
|
|
||||||
ppgtt = container_of(vma->vm, struct i915_hw_ppgtt, base);
|
ppgtt = container_of(vma->vm, struct i915_hw_ppgtt, base);
|
||||||
if (ppgtt->ctx && ppgtt->ctx->file_priv != stats->file_priv)
|
if (ppgtt->file_priv != stats->file_priv)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (obj->ring) /* XXX per-vma statistic */
|
if (obj->ring) /* XXX per-vma statistic */
|
||||||
@ -703,6 +703,12 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
|
|||||||
}
|
}
|
||||||
|
|
||||||
for_each_pipe(pipe) {
|
for_each_pipe(pipe) {
|
||||||
|
if (!intel_display_power_enabled(dev_priv,
|
||||||
|
POWER_DOMAIN_PIPE(pipe))) {
|
||||||
|
seq_printf(m, "Pipe %c power disabled\n",
|
||||||
|
pipe_name(pipe));
|
||||||
|
continue;
|
||||||
|
}
|
||||||
seq_printf(m, "Pipe %c IMR:\t%08x\n",
|
seq_printf(m, "Pipe %c IMR:\t%08x\n",
|
||||||
pipe_name(pipe),
|
pipe_name(pipe),
|
||||||
I915_READ(GEN8_DE_PIPE_IMR(pipe)));
|
I915_READ(GEN8_DE_PIPE_IMR(pipe)));
|
||||||
@ -1671,6 +1677,14 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void describe_ctx_ringbuf(struct seq_file *m,
|
||||||
|
struct intel_ringbuffer *ringbuf)
|
||||||
|
{
|
||||||
|
seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, last head: %d)",
|
||||||
|
ringbuf->space, ringbuf->head, ringbuf->tail,
|
||||||
|
ringbuf->last_retired_head);
|
||||||
|
}
|
||||||
|
|
||||||
static int i915_context_status(struct seq_file *m, void *unused)
|
static int i915_context_status(struct seq_file *m, void *unused)
|
||||||
{
|
{
|
||||||
struct drm_info_node *node = m->private;
|
struct drm_info_node *node = m->private;
|
||||||
@ -1697,16 +1711,168 @@ static int i915_context_status(struct seq_file *m, void *unused)
|
|||||||
}
|
}
|
||||||
|
|
||||||
list_for_each_entry(ctx, &dev_priv->context_list, link) {
|
list_for_each_entry(ctx, &dev_priv->context_list, link) {
|
||||||
if (ctx->legacy_hw_ctx.rcs_state == NULL)
|
if (!i915.enable_execlists &&
|
||||||
|
ctx->legacy_hw_ctx.rcs_state == NULL)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
seq_puts(m, "HW context ");
|
seq_puts(m, "HW context ");
|
||||||
describe_ctx(m, ctx);
|
describe_ctx(m, ctx);
|
||||||
for_each_ring(ring, dev_priv, i)
|
for_each_ring(ring, dev_priv, i) {
|
||||||
if (ring->default_context == ctx)
|
if (ring->default_context == ctx)
|
||||||
seq_printf(m, "(default context %s) ", ring->name);
|
seq_printf(m, "(default context %s) ",
|
||||||
|
ring->name);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (i915.enable_execlists) {
|
||||||
|
seq_putc(m, '\n');
|
||||||
|
for_each_ring(ring, dev_priv, i) {
|
||||||
|
struct drm_i915_gem_object *ctx_obj =
|
||||||
|
ctx->engine[i].state;
|
||||||
|
struct intel_ringbuffer *ringbuf =
|
||||||
|
ctx->engine[i].ringbuf;
|
||||||
|
|
||||||
|
seq_printf(m, "%s: ", ring->name);
|
||||||
|
if (ctx_obj)
|
||||||
|
describe_obj(m, ctx_obj);
|
||||||
|
if (ringbuf)
|
||||||
|
describe_ctx_ringbuf(m, ringbuf);
|
||||||
|
seq_putc(m, '\n');
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
describe_obj(m, ctx->legacy_hw_ctx.rcs_state);
|
||||||
|
}
|
||||||
|
|
||||||
|
seq_putc(m, '\n');
|
||||||
|
}
|
||||||
|
|
||||||
|
mutex_unlock(&dev->struct_mutex);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int i915_dump_lrc(struct seq_file *m, void *unused)
|
||||||
|
{
|
||||||
|
struct drm_info_node *node = (struct drm_info_node *) m->private;
|
||||||
|
struct drm_device *dev = node->minor->dev;
|
||||||
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||||
|
struct intel_engine_cs *ring;
|
||||||
|
struct intel_context *ctx;
|
||||||
|
int ret, i;
|
||||||
|
|
||||||
|
if (!i915.enable_execlists) {
|
||||||
|
seq_printf(m, "Logical Ring Contexts are disabled\n");
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = mutex_lock_interruptible(&dev->struct_mutex);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
list_for_each_entry(ctx, &dev_priv->context_list, link) {
|
||||||
|
for_each_ring(ring, dev_priv, i) {
|
||||||
|
struct drm_i915_gem_object *ctx_obj = ctx->engine[i].state;
|
||||||
|
|
||||||
|
if (ring->default_context == ctx)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
if (ctx_obj) {
|
||||||
|
struct page *page = i915_gem_object_get_page(ctx_obj, 1);
|
||||||
|
uint32_t *reg_state = kmap_atomic(page);
|
||||||
|
int j;
|
||||||
|
|
||||||
|
seq_printf(m, "CONTEXT: %s %u\n", ring->name,
|
||||||
|
intel_execlists_ctx_id(ctx_obj));
|
||||||
|
|
||||||
|
for (j = 0; j < 0x600 / sizeof(u32) / 4; j += 4) {
|
||||||
|
seq_printf(m, "\t[0x%08lx] 0x%08x 0x%08x 0x%08x 0x%08x\n",
|
||||||
|
i915_gem_obj_ggtt_offset(ctx_obj) + 4096 + (j * 4),
|
||||||
|
reg_state[j], reg_state[j + 1],
|
||||||
|
reg_state[j + 2], reg_state[j + 3]);
|
||||||
|
}
|
||||||
|
kunmap_atomic(reg_state);
|
||||||
|
|
||||||
|
seq_putc(m, '\n');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
mutex_unlock(&dev->struct_mutex);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int i915_execlists(struct seq_file *m, void *data)
|
||||||
|
{
|
||||||
|
struct drm_info_node *node = (struct drm_info_node *)m->private;
|
||||||
|
struct drm_device *dev = node->minor->dev;
|
||||||
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||||
|
struct intel_engine_cs *ring;
|
||||||
|
u32 status_pointer;
|
||||||
|
u8 read_pointer;
|
||||||
|
u8 write_pointer;
|
||||||
|
u32 status;
|
||||||
|
u32 ctx_id;
|
||||||
|
struct list_head *cursor;
|
||||||
|
int ring_id, i;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
if (!i915.enable_execlists) {
|
||||||
|
seq_puts(m, "Logical Ring Contexts are disabled\n");
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = mutex_lock_interruptible(&dev->struct_mutex);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
for_each_ring(ring, dev_priv, ring_id) {
|
||||||
|
struct intel_ctx_submit_request *head_req = NULL;
|
||||||
|
int count = 0;
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
|
seq_printf(m, "%s\n", ring->name);
|
||||||
|
|
||||||
|
status = I915_READ(RING_EXECLIST_STATUS(ring));
|
||||||
|
ctx_id = I915_READ(RING_EXECLIST_STATUS(ring) + 4);
|
||||||
|
seq_printf(m, "\tExeclist status: 0x%08X, context: %u\n",
|
||||||
|
status, ctx_id);
|
||||||
|
|
||||||
|
status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(ring));
|
||||||
|
seq_printf(m, "\tStatus pointer: 0x%08X\n", status_pointer);
|
||||||
|
|
||||||
|
read_pointer = ring->next_context_status_buffer;
|
||||||
|
write_pointer = status_pointer & 0x07;
|
||||||
|
if (read_pointer > write_pointer)
|
||||||
|
write_pointer += 6;
|
||||||
|
seq_printf(m, "\tRead pointer: 0x%08X, write pointer 0x%08X\n",
|
||||||
|
read_pointer, write_pointer);
|
||||||
|
|
||||||
|
for (i = 0; i < 6; i++) {
|
||||||
|
status = I915_READ(RING_CONTEXT_STATUS_BUF(ring) + 8*i);
|
||||||
|
ctx_id = I915_READ(RING_CONTEXT_STATUS_BUF(ring) + 8*i + 4);
|
||||||
|
|
||||||
|
seq_printf(m, "\tStatus buffer %d: 0x%08X, context: %u\n",
|
||||||
|
i, status, ctx_id);
|
||||||
|
}
|
||||||
|
|
||||||
|
spin_lock_irqsave(&ring->execlist_lock, flags);
|
||||||
|
list_for_each(cursor, &ring->execlist_queue)
|
||||||
|
count++;
|
||||||
|
head_req = list_first_entry_or_null(&ring->execlist_queue,
|
||||||
|
struct intel_ctx_submit_request, execlist_link);
|
||||||
|
spin_unlock_irqrestore(&ring->execlist_lock, flags);
|
||||||
|
|
||||||
|
seq_printf(m, "\t%d requests in queue\n", count);
|
||||||
|
if (head_req) {
|
||||||
|
struct drm_i915_gem_object *ctx_obj;
|
||||||
|
|
||||||
|
ctx_obj = head_req->ctx->engine[ring_id].state;
|
||||||
|
seq_printf(m, "\tHead request id: %u\n",
|
||||||
|
intel_execlists_ctx_id(ctx_obj));
|
||||||
|
seq_printf(m, "\tHead request tail: %u\n",
|
||||||
|
head_req->tail);
|
||||||
|
}
|
||||||
|
|
||||||
describe_obj(m, ctx->legacy_hw_ctx.rcs_state);
|
|
||||||
seq_putc(m, '\n');
|
seq_putc(m, '\n');
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1815,7 +1981,13 @@ static int per_file_ctx(int id, void *ptr, void *data)
|
|||||||
{
|
{
|
||||||
struct intel_context *ctx = ptr;
|
struct intel_context *ctx = ptr;
|
||||||
struct seq_file *m = data;
|
struct seq_file *m = data;
|
||||||
struct i915_hw_ppgtt *ppgtt = ctx_to_ppgtt(ctx);
|
struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
|
||||||
|
|
||||||
|
if (!ppgtt) {
|
||||||
|
seq_printf(m, " no ppgtt for context %d\n",
|
||||||
|
ctx->user_handle);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
if (i915_gem_context_is_default(ctx))
|
if (i915_gem_context_is_default(ctx))
|
||||||
seq_puts(m, " default context:\n");
|
seq_puts(m, " default context:\n");
|
||||||
@ -1875,8 +2047,7 @@ static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
|
|||||||
seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd_offset);
|
seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd_offset);
|
||||||
|
|
||||||
ppgtt->debug_dump(ppgtt, m);
|
ppgtt->debug_dump(ppgtt, m);
|
||||||
} else
|
}
|
||||||
return;
|
|
||||||
|
|
||||||
list_for_each_entry_reverse(file, &dev->filelist, lhead) {
|
list_for_each_entry_reverse(file, &dev->filelist, lhead) {
|
||||||
struct drm_i915_file_private *file_priv = file->driver_priv;
|
struct drm_i915_file_private *file_priv = file->driver_priv;
|
||||||
@ -3963,6 +4134,8 @@ static const struct drm_info_list i915_debugfs_list[] = {
|
|||||||
{"i915_opregion", i915_opregion, 0},
|
{"i915_opregion", i915_opregion, 0},
|
||||||
{"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
|
{"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
|
||||||
{"i915_context_status", i915_context_status, 0},
|
{"i915_context_status", i915_context_status, 0},
|
||||||
|
{"i915_dump_lrc", i915_dump_lrc, 0},
|
||||||
|
{"i915_execlists", i915_execlists, 0},
|
||||||
{"i915_gen6_forcewake_count", i915_gen6_forcewake_count_info, 0},
|
{"i915_gen6_forcewake_count", i915_gen6_forcewake_count_info, 0},
|
||||||
{"i915_swizzle_info", i915_swizzle_info, 0},
|
{"i915_swizzle_info", i915_swizzle_info, 0},
|
||||||
{"i915_ppgtt_info", i915_ppgtt_info, 0},
|
{"i915_ppgtt_info", i915_ppgtt_info, 0},
|
||||||
|
@ -999,7 +999,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
|
|||||||
value = HAS_WT(dev);
|
value = HAS_WT(dev);
|
||||||
break;
|
break;
|
||||||
case I915_PARAM_HAS_ALIASING_PPGTT:
|
case I915_PARAM_HAS_ALIASING_PPGTT:
|
||||||
value = dev_priv->mm.aliasing_ppgtt || USES_FULL_PPGTT(dev);
|
value = USES_PPGTT(dev);
|
||||||
break;
|
break;
|
||||||
case I915_PARAM_HAS_WAIT_TIMEOUT:
|
case I915_PARAM_HAS_WAIT_TIMEOUT:
|
||||||
value = 1;
|
value = 1;
|
||||||
@ -1350,8 +1350,6 @@ static int i915_load_modeset_init(struct drm_device *dev)
|
|||||||
if (ret)
|
if (ret)
|
||||||
goto cleanup_irq;
|
goto cleanup_irq;
|
||||||
|
|
||||||
INIT_WORK(&dev_priv->console_resume_work, intel_console_resume);
|
|
||||||
|
|
||||||
intel_modeset_gem_init(dev);
|
intel_modeset_gem_init(dev);
|
||||||
|
|
||||||
/* Always safe in the mode setting case. */
|
/* Always safe in the mode setting case. */
|
||||||
@ -1388,7 +1386,6 @@ cleanup_gem:
|
|||||||
i915_gem_cleanup_ringbuffer(dev);
|
i915_gem_cleanup_ringbuffer(dev);
|
||||||
i915_gem_context_fini(dev);
|
i915_gem_context_fini(dev);
|
||||||
mutex_unlock(&dev->struct_mutex);
|
mutex_unlock(&dev->struct_mutex);
|
||||||
WARN_ON(dev_priv->mm.aliasing_ppgtt);
|
|
||||||
cleanup_irq:
|
cleanup_irq:
|
||||||
drm_irq_uninstall(dev);
|
drm_irq_uninstall(dev);
|
||||||
cleanup_gem_stolen:
|
cleanup_gem_stolen:
|
||||||
@ -1603,9 +1600,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
|
|||||||
dev->dev_private = dev_priv;
|
dev->dev_private = dev_priv;
|
||||||
dev_priv->dev = dev;
|
dev_priv->dev = dev;
|
||||||
|
|
||||||
/* copy initial configuration to dev_priv->info */
|
/* Setup the write-once "constant" device info */
|
||||||
device_info = (struct intel_device_info *)&dev_priv->info;
|
device_info = (struct intel_device_info *)&dev_priv->info;
|
||||||
*device_info = *info;
|
memcpy(device_info, info, sizeof(dev_priv->info));
|
||||||
|
device_info->device_id = dev->pdev->device;
|
||||||
|
|
||||||
spin_lock_init(&dev_priv->irq_lock);
|
spin_lock_init(&dev_priv->irq_lock);
|
||||||
spin_lock_init(&dev_priv->gpu_error.lock);
|
spin_lock_init(&dev_priv->gpu_error.lock);
|
||||||
@ -1817,7 +1815,7 @@ out_mtrrfree:
|
|||||||
arch_phys_wc_del(dev_priv->gtt.mtrr);
|
arch_phys_wc_del(dev_priv->gtt.mtrr);
|
||||||
io_mapping_free(dev_priv->gtt.mappable);
|
io_mapping_free(dev_priv->gtt.mappable);
|
||||||
out_gtt:
|
out_gtt:
|
||||||
dev_priv->gtt.base.cleanup(&dev_priv->gtt.base);
|
i915_global_gtt_cleanup(dev);
|
||||||
out_regs:
|
out_regs:
|
||||||
intel_uncore_fini(dev);
|
intel_uncore_fini(dev);
|
||||||
pci_iounmap(dev->pdev, dev_priv->regs);
|
pci_iounmap(dev->pdev, dev_priv->regs);
|
||||||
@ -1864,7 +1862,6 @@ int i915_driver_unload(struct drm_device *dev)
|
|||||||
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
|
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
|
||||||
intel_fbdev_fini(dev);
|
intel_fbdev_fini(dev);
|
||||||
intel_modeset_cleanup(dev);
|
intel_modeset_cleanup(dev);
|
||||||
cancel_work_sync(&dev_priv->console_resume_work);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* free the memory space allocated for the child device
|
* free the memory space allocated for the child device
|
||||||
@ -1897,7 +1894,6 @@ int i915_driver_unload(struct drm_device *dev)
|
|||||||
mutex_lock(&dev->struct_mutex);
|
mutex_lock(&dev->struct_mutex);
|
||||||
i915_gem_cleanup_ringbuffer(dev);
|
i915_gem_cleanup_ringbuffer(dev);
|
||||||
i915_gem_context_fini(dev);
|
i915_gem_context_fini(dev);
|
||||||
WARN_ON(dev_priv->mm.aliasing_ppgtt);
|
|
||||||
mutex_unlock(&dev->struct_mutex);
|
mutex_unlock(&dev->struct_mutex);
|
||||||
i915_gem_cleanup_stolen(dev);
|
i915_gem_cleanup_stolen(dev);
|
||||||
|
|
||||||
@ -1905,8 +1901,6 @@ int i915_driver_unload(struct drm_device *dev)
|
|||||||
i915_free_hws(dev);
|
i915_free_hws(dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
WARN_ON(!list_empty(&dev_priv->vm_list));
|
|
||||||
|
|
||||||
drm_vblank_cleanup(dev);
|
drm_vblank_cleanup(dev);
|
||||||
|
|
||||||
intel_teardown_gmbus(dev);
|
intel_teardown_gmbus(dev);
|
||||||
@ -1916,7 +1910,7 @@ int i915_driver_unload(struct drm_device *dev)
|
|||||||
destroy_workqueue(dev_priv->wq);
|
destroy_workqueue(dev_priv->wq);
|
||||||
pm_qos_remove_request(&dev_priv->pm_qos);
|
pm_qos_remove_request(&dev_priv->pm_qos);
|
||||||
|
|
||||||
dev_priv->gtt.base.cleanup(&dev_priv->gtt.base);
|
i915_global_gtt_cleanup(dev);
|
||||||
|
|
||||||
intel_uncore_fini(dev);
|
intel_uncore_fini(dev);
|
||||||
if (dev_priv->regs != NULL)
|
if (dev_priv->regs != NULL)
|
||||||
|
@ -481,6 +481,10 @@ bool i915_semaphore_is_enabled(struct drm_device *dev)
|
|||||||
if (i915.semaphores >= 0)
|
if (i915.semaphores >= 0)
|
||||||
return i915.semaphores;
|
return i915.semaphores;
|
||||||
|
|
||||||
|
/* TODO: make semaphores and Execlists play nicely together */
|
||||||
|
if (i915.enable_execlists)
|
||||||
|
return false;
|
||||||
|
|
||||||
/* Until we get further testing... */
|
/* Until we get further testing... */
|
||||||
if (IS_GEN8(dev))
|
if (IS_GEN8(dev))
|
||||||
return false;
|
return false;
|
||||||
@ -524,6 +528,10 @@ static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
|
|||||||
drm_modeset_unlock_all(dev);
|
drm_modeset_unlock_all(dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int intel_suspend_complete(struct drm_i915_private *dev_priv);
|
||||||
|
static int intel_resume_prepare(struct drm_i915_private *dev_priv,
|
||||||
|
bool rpm_resume);
|
||||||
|
|
||||||
static int i915_drm_freeze(struct drm_device *dev)
|
static int i915_drm_freeze(struct drm_device *dev)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||||
@ -591,9 +599,7 @@ static int i915_drm_freeze(struct drm_device *dev)
|
|||||||
intel_uncore_forcewake_reset(dev, false);
|
intel_uncore_forcewake_reset(dev, false);
|
||||||
intel_opregion_fini(dev);
|
intel_opregion_fini(dev);
|
||||||
|
|
||||||
console_lock();
|
intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
|
||||||
intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED);
|
|
||||||
console_unlock();
|
|
||||||
|
|
||||||
dev_priv->suspend_count++;
|
dev_priv->suspend_count++;
|
||||||
|
|
||||||
@ -632,30 +638,20 @@ int i915_suspend(struct drm_device *dev, pm_message_t state)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void intel_console_resume(struct work_struct *work)
|
|
||||||
{
|
|
||||||
struct drm_i915_private *dev_priv =
|
|
||||||
container_of(work, struct drm_i915_private,
|
|
||||||
console_resume_work);
|
|
||||||
struct drm_device *dev = dev_priv->dev;
|
|
||||||
|
|
||||||
console_lock();
|
|
||||||
intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING);
|
|
||||||
console_unlock();
|
|
||||||
}
|
|
||||||
|
|
||||||
static int i915_drm_thaw_early(struct drm_device *dev)
|
static int i915_drm_thaw_early(struct drm_device *dev)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||||
|
int ret;
|
||||||
|
|
||||||
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
|
ret = intel_resume_prepare(dev_priv, false);
|
||||||
hsw_disable_pc8(dev_priv);
|
if (ret)
|
||||||
|
DRM_ERROR("Resume prepare failed: %d,Continuing resume\n", ret);
|
||||||
|
|
||||||
intel_uncore_early_sanitize(dev, true);
|
intel_uncore_early_sanitize(dev, true);
|
||||||
intel_uncore_sanitize(dev);
|
intel_uncore_sanitize(dev);
|
||||||
intel_power_domains_init_hw(dev_priv);
|
intel_power_domains_init_hw(dev_priv);
|
||||||
|
|
||||||
return 0;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
|
static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
|
||||||
@ -714,17 +710,7 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
|
|||||||
|
|
||||||
intel_opregion_init(dev);
|
intel_opregion_init(dev);
|
||||||
|
|
||||||
/*
|
intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false);
|
||||||
* The console lock can be pretty contented on resume due
|
|
||||||
* to all the printk activity. Try to keep it out of the hot
|
|
||||||
* path of resume if possible.
|
|
||||||
*/
|
|
||||||
if (console_trylock()) {
|
|
||||||
intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING);
|
|
||||||
console_unlock();
|
|
||||||
} else {
|
|
||||||
schedule_work(&dev_priv->console_resume_work);
|
|
||||||
}
|
|
||||||
|
|
||||||
mutex_lock(&dev_priv->modeset_restore_lock);
|
mutex_lock(&dev_priv->modeset_restore_lock);
|
||||||
dev_priv->modeset_restore = MODESET_DONE;
|
dev_priv->modeset_restore = MODESET_DONE;
|
||||||
@ -941,6 +927,7 @@ static int i915_pm_suspend_late(struct device *dev)
|
|||||||
struct pci_dev *pdev = to_pci_dev(dev);
|
struct pci_dev *pdev = to_pci_dev(dev);
|
||||||
struct drm_device *drm_dev = pci_get_drvdata(pdev);
|
struct drm_device *drm_dev = pci_get_drvdata(pdev);
|
||||||
struct drm_i915_private *dev_priv = drm_dev->dev_private;
|
struct drm_i915_private *dev_priv = drm_dev->dev_private;
|
||||||
|
int ret;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We have a suspedn ordering issue with the snd-hda driver also
|
* We have a suspedn ordering issue with the snd-hda driver also
|
||||||
@ -954,13 +941,16 @@ static int i915_pm_suspend_late(struct device *dev)
|
|||||||
if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
|
if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (IS_HASWELL(drm_dev) || IS_BROADWELL(drm_dev))
|
ret = intel_suspend_complete(dev_priv);
|
||||||
hsw_enable_pc8(dev_priv);
|
|
||||||
|
|
||||||
pci_disable_device(pdev);
|
if (ret)
|
||||||
pci_set_power_state(pdev, PCI_D3hot);
|
DRM_ERROR("Suspend complete failed: %d\n", ret);
|
||||||
|
else {
|
||||||
|
pci_disable_device(pdev);
|
||||||
|
pci_set_power_state(pdev, PCI_D3hot);
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int i915_pm_resume_early(struct device *dev)
|
static int i915_pm_resume_early(struct device *dev)
|
||||||
@ -1016,23 +1006,26 @@ static int i915_pm_poweroff(struct device *dev)
|
|||||||
return i915_drm_freeze(drm_dev);
|
return i915_drm_freeze(drm_dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int hsw_runtime_suspend(struct drm_i915_private *dev_priv)
|
static int hsw_suspend_complete(struct drm_i915_private *dev_priv)
|
||||||
{
|
{
|
||||||
hsw_enable_pc8(dev_priv);
|
hsw_enable_pc8(dev_priv);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int snb_runtime_resume(struct drm_i915_private *dev_priv)
|
static int snb_resume_prepare(struct drm_i915_private *dev_priv,
|
||||||
|
bool rpm_resume)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = dev_priv->dev;
|
struct drm_device *dev = dev_priv->dev;
|
||||||
|
|
||||||
intel_init_pch_refclk(dev);
|
if (rpm_resume)
|
||||||
|
intel_init_pch_refclk(dev);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int hsw_runtime_resume(struct drm_i915_private *dev_priv)
|
static int hsw_resume_prepare(struct drm_i915_private *dev_priv,
|
||||||
|
bool rpm_resume)
|
||||||
{
|
{
|
||||||
hsw_disable_pc8(dev_priv);
|
hsw_disable_pc8(dev_priv);
|
||||||
|
|
||||||
@ -1328,7 +1321,7 @@ static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv)
|
|||||||
I915_WRITE(VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR);
|
I915_WRITE(VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int vlv_runtime_suspend(struct drm_i915_private *dev_priv)
|
static int vlv_suspend_complete(struct drm_i915_private *dev_priv)
|
||||||
{
|
{
|
||||||
u32 mask;
|
u32 mask;
|
||||||
int err;
|
int err;
|
||||||
@ -1368,7 +1361,8 @@ err1:
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int vlv_runtime_resume(struct drm_i915_private *dev_priv)
|
static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
|
||||||
|
bool rpm_resume)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = dev_priv->dev;
|
struct drm_device *dev = dev_priv->dev;
|
||||||
int err;
|
int err;
|
||||||
@ -1393,8 +1387,10 @@ static int vlv_runtime_resume(struct drm_i915_private *dev_priv)
|
|||||||
|
|
||||||
vlv_check_no_gt_access(dev_priv);
|
vlv_check_no_gt_access(dev_priv);
|
||||||
|
|
||||||
intel_init_clock_gating(dev);
|
if (rpm_resume) {
|
||||||
i915_gem_restore_fences(dev);
|
intel_init_clock_gating(dev);
|
||||||
|
i915_gem_restore_fences(dev);
|
||||||
|
}
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@ -1409,7 +1405,9 @@ static int intel_runtime_suspend(struct device *device)
|
|||||||
if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6(dev))))
|
if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6(dev))))
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
WARN_ON(!HAS_RUNTIME_PM(dev));
|
if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
|
||||||
|
return -ENODEV;
|
||||||
|
|
||||||
assert_force_wake_inactive(dev_priv);
|
assert_force_wake_inactive(dev_priv);
|
||||||
|
|
||||||
DRM_DEBUG_KMS("Suspending device\n");
|
DRM_DEBUG_KMS("Suspending device\n");
|
||||||
@ -1446,17 +1444,7 @@ static int intel_runtime_suspend(struct device *device)
|
|||||||
cancel_work_sync(&dev_priv->rps.work);
|
cancel_work_sync(&dev_priv->rps.work);
|
||||||
intel_runtime_pm_disable_interrupts(dev);
|
intel_runtime_pm_disable_interrupts(dev);
|
||||||
|
|
||||||
if (IS_GEN6(dev)) {
|
ret = intel_suspend_complete(dev_priv);
|
||||||
ret = 0;
|
|
||||||
} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
|
|
||||||
ret = hsw_runtime_suspend(dev_priv);
|
|
||||||
} else if (IS_VALLEYVIEW(dev)) {
|
|
||||||
ret = vlv_runtime_suspend(dev_priv);
|
|
||||||
} else {
|
|
||||||
ret = -ENODEV;
|
|
||||||
WARN_ON(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (ret) {
|
if (ret) {
|
||||||
DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret);
|
DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret);
|
||||||
intel_runtime_pm_restore_interrupts(dev);
|
intel_runtime_pm_restore_interrupts(dev);
|
||||||
@ -1487,24 +1475,15 @@ static int intel_runtime_resume(struct device *device)
|
|||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
WARN_ON(!HAS_RUNTIME_PM(dev));
|
if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
|
||||||
|
return -ENODEV;
|
||||||
|
|
||||||
DRM_DEBUG_KMS("Resuming device\n");
|
DRM_DEBUG_KMS("Resuming device\n");
|
||||||
|
|
||||||
intel_opregion_notify_adapter(dev, PCI_D0);
|
intel_opregion_notify_adapter(dev, PCI_D0);
|
||||||
dev_priv->pm.suspended = false;
|
dev_priv->pm.suspended = false;
|
||||||
|
|
||||||
if (IS_GEN6(dev)) {
|
ret = intel_resume_prepare(dev_priv, true);
|
||||||
ret = snb_runtime_resume(dev_priv);
|
|
||||||
} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
|
|
||||||
ret = hsw_runtime_resume(dev_priv);
|
|
||||||
} else if (IS_VALLEYVIEW(dev)) {
|
|
||||||
ret = vlv_runtime_resume(dev_priv);
|
|
||||||
} else {
|
|
||||||
WARN_ON(1);
|
|
||||||
ret = -ENODEV;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* No point of rolling back things in case of an error, as the best
|
* No point of rolling back things in case of an error, as the best
|
||||||
* we can do is to hope that things will still work (and disable RPM).
|
* we can do is to hope that things will still work (and disable RPM).
|
||||||
@ -1523,6 +1502,48 @@ static int intel_runtime_resume(struct device *device)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This function implements common functionality of runtime and system
|
||||||
|
* suspend sequence.
|
||||||
|
*/
|
||||||
|
static int intel_suspend_complete(struct drm_i915_private *dev_priv)
|
||||||
|
{
|
||||||
|
struct drm_device *dev = dev_priv->dev;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
|
||||||
|
ret = hsw_suspend_complete(dev_priv);
|
||||||
|
else if (IS_VALLEYVIEW(dev))
|
||||||
|
ret = vlv_suspend_complete(dev_priv);
|
||||||
|
else
|
||||||
|
ret = 0;
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This function implements common functionality of runtime and system
|
||||||
|
* resume sequence. Variable rpm_resume used for implementing different
|
||||||
|
* code paths.
|
||||||
|
*/
|
||||||
|
static int intel_resume_prepare(struct drm_i915_private *dev_priv,
|
||||||
|
bool rpm_resume)
|
||||||
|
{
|
||||||
|
struct drm_device *dev = dev_priv->dev;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
if (IS_GEN6(dev))
|
||||||
|
ret = snb_resume_prepare(dev_priv, rpm_resume);
|
||||||
|
else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
|
||||||
|
ret = hsw_resume_prepare(dev_priv, rpm_resume);
|
||||||
|
else if (IS_VALLEYVIEW(dev))
|
||||||
|
ret = vlv_resume_prepare(dev_priv, rpm_resume);
|
||||||
|
else
|
||||||
|
ret = 0;
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
static const struct dev_pm_ops i915_pm_ops = {
|
static const struct dev_pm_ops i915_pm_ops = {
|
||||||
.suspend = i915_pm_suspend,
|
.suspend = i915_pm_suspend,
|
||||||
.suspend_late = i915_pm_suspend_late,
|
.suspend_late = i915_pm_suspend_late,
|
||||||
|
@ -35,6 +35,7 @@
|
|||||||
#include "i915_reg.h"
|
#include "i915_reg.h"
|
||||||
#include "intel_bios.h"
|
#include "intel_bios.h"
|
||||||
#include "intel_ringbuffer.h"
|
#include "intel_ringbuffer.h"
|
||||||
|
#include "intel_lrc.h"
|
||||||
#include "i915_gem_gtt.h"
|
#include "i915_gem_gtt.h"
|
||||||
#include <linux/io-mapping.h>
|
#include <linux/io-mapping.h>
|
||||||
#include <linux/i2c.h>
|
#include <linux/i2c.h>
|
||||||
@ -53,7 +54,7 @@
|
|||||||
|
|
||||||
#define DRIVER_NAME "i915"
|
#define DRIVER_NAME "i915"
|
||||||
#define DRIVER_DESC "Intel Graphics"
|
#define DRIVER_DESC "Intel Graphics"
|
||||||
#define DRIVER_DATE "20140808"
|
#define DRIVER_DATE "20140822"
|
||||||
|
|
||||||
enum pipe {
|
enum pipe {
|
||||||
INVALID_PIPE = -1,
|
INVALID_PIPE = -1,
|
||||||
@ -395,6 +396,7 @@ struct drm_i915_error_state {
|
|||||||
pid_t pid;
|
pid_t pid;
|
||||||
char comm[TASK_COMM_LEN];
|
char comm[TASK_COMM_LEN];
|
||||||
} ring[I915_NUM_RINGS];
|
} ring[I915_NUM_RINGS];
|
||||||
|
|
||||||
struct drm_i915_error_buffer {
|
struct drm_i915_error_buffer {
|
||||||
u32 size;
|
u32 size;
|
||||||
u32 name;
|
u32 name;
|
||||||
@ -413,6 +415,7 @@ struct drm_i915_error_state {
|
|||||||
} **active_bo, **pinned_bo;
|
} **active_bo, **pinned_bo;
|
||||||
|
|
||||||
u32 *active_bo_count, *pinned_bo_count;
|
u32 *active_bo_count, *pinned_bo_count;
|
||||||
|
u32 vm_count;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct intel_connector;
|
struct intel_connector;
|
||||||
@ -558,6 +561,7 @@ struct intel_uncore {
|
|||||||
|
|
||||||
struct intel_device_info {
|
struct intel_device_info {
|
||||||
u32 display_mmio_offset;
|
u32 display_mmio_offset;
|
||||||
|
u16 device_id;
|
||||||
u8 num_pipes:3;
|
u8 num_pipes:3;
|
||||||
u8 num_sprites[I915_MAX_PIPES];
|
u8 num_sprites[I915_MAX_PIPES];
|
||||||
u8 gen;
|
u8 gen;
|
||||||
@ -622,13 +626,20 @@ struct intel_context {
|
|||||||
uint8_t remap_slice;
|
uint8_t remap_slice;
|
||||||
struct drm_i915_file_private *file_priv;
|
struct drm_i915_file_private *file_priv;
|
||||||
struct i915_ctx_hang_stats hang_stats;
|
struct i915_ctx_hang_stats hang_stats;
|
||||||
struct i915_address_space *vm;
|
struct i915_hw_ppgtt *ppgtt;
|
||||||
|
|
||||||
|
/* Legacy ring buffer submission */
|
||||||
struct {
|
struct {
|
||||||
struct drm_i915_gem_object *rcs_state;
|
struct drm_i915_gem_object *rcs_state;
|
||||||
bool initialized;
|
bool initialized;
|
||||||
} legacy_hw_ctx;
|
} legacy_hw_ctx;
|
||||||
|
|
||||||
|
/* Execlists */
|
||||||
|
struct {
|
||||||
|
struct drm_i915_gem_object *state;
|
||||||
|
struct intel_ringbuffer *ringbuf;
|
||||||
|
} engine[I915_NUM_RINGS];
|
||||||
|
|
||||||
struct list_head link;
|
struct list_head link;
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -1577,14 +1588,9 @@ struct drm_i915_private {
|
|||||||
#ifdef CONFIG_DRM_I915_FBDEV
|
#ifdef CONFIG_DRM_I915_FBDEV
|
||||||
/* list of fbdev register on this device */
|
/* list of fbdev register on this device */
|
||||||
struct intel_fbdev *fbdev;
|
struct intel_fbdev *fbdev;
|
||||||
|
struct work_struct fbdev_suspend_work;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*
|
|
||||||
* The console may be contended at resume, but we don't
|
|
||||||
* want it to block on it.
|
|
||||||
*/
|
|
||||||
struct work_struct console_resume_work;
|
|
||||||
|
|
||||||
struct drm_property *broadcast_rgb_property;
|
struct drm_property *broadcast_rgb_property;
|
||||||
struct drm_property *force_audio_property;
|
struct drm_property *force_audio_property;
|
||||||
|
|
||||||
@ -1636,6 +1642,20 @@ struct drm_i915_private {
|
|||||||
/* Old ums support infrastructure, same warning applies. */
|
/* Old ums support infrastructure, same warning applies. */
|
||||||
struct i915_ums_state ums;
|
struct i915_ums_state ums;
|
||||||
|
|
||||||
|
/* Abstract the submission mechanism (legacy ringbuffer or execlists) away */
|
||||||
|
struct {
|
||||||
|
int (*do_execbuf)(struct drm_device *dev, struct drm_file *file,
|
||||||
|
struct intel_engine_cs *ring,
|
||||||
|
struct intel_context *ctx,
|
||||||
|
struct drm_i915_gem_execbuffer2 *args,
|
||||||
|
struct list_head *vmas,
|
||||||
|
struct drm_i915_gem_object *batch_obj,
|
||||||
|
u64 exec_start, u32 flags);
|
||||||
|
int (*init_rings)(struct drm_device *dev);
|
||||||
|
void (*cleanup_ring)(struct intel_engine_cs *ring);
|
||||||
|
void (*stop_ring)(struct intel_engine_cs *ring);
|
||||||
|
} gt;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch
|
* NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch
|
||||||
* will be rejected. Instead look for a better place.
|
* will be rejected. Instead look for a better place.
|
||||||
@ -1777,13 +1797,6 @@ struct drm_i915_gem_object {
|
|||||||
* Only honoured if hardware has relevant pte bit
|
* Only honoured if hardware has relevant pte bit
|
||||||
*/
|
*/
|
||||||
unsigned long gt_ro:1;
|
unsigned long gt_ro:1;
|
||||||
|
|
||||||
/*
|
|
||||||
* Is the GPU currently using a fence to access this buffer,
|
|
||||||
*/
|
|
||||||
unsigned int pending_fenced_gpu_access:1;
|
|
||||||
unsigned int fenced_gpu_access:1;
|
|
||||||
|
|
||||||
unsigned int cache_level:3;
|
unsigned int cache_level:3;
|
||||||
|
|
||||||
unsigned int has_aliasing_ppgtt_mapping:1;
|
unsigned int has_aliasing_ppgtt_mapping:1;
|
||||||
@ -1987,51 +2000,63 @@ struct drm_i915_cmd_table {
|
|||||||
int count;
|
int count;
|
||||||
};
|
};
|
||||||
|
|
||||||
#define INTEL_INFO(dev) (&to_i915(dev)->info)
|
/* Note that the (struct drm_i915_private *) cast is just to shut up gcc. */
|
||||||
|
#define __I915__(p) ({ \
|
||||||
|
struct drm_i915_private *__p; \
|
||||||
|
if (__builtin_types_compatible_p(typeof(*p), struct drm_i915_private)) \
|
||||||
|
__p = (struct drm_i915_private *)p; \
|
||||||
|
else if (__builtin_types_compatible_p(typeof(*p), struct drm_device)) \
|
||||||
|
__p = to_i915((struct drm_device *)p); \
|
||||||
|
else \
|
||||||
|
BUILD_BUG(); \
|
||||||
|
__p; \
|
||||||
|
})
|
||||||
|
#define INTEL_INFO(p) (&__I915__(p)->info)
|
||||||
|
#define INTEL_DEVID(p) (INTEL_INFO(p)->device_id)
|
||||||
|
|
||||||
#define IS_I830(dev) ((dev)->pdev->device == 0x3577)
|
#define IS_I830(dev) (INTEL_DEVID(dev) == 0x3577)
|
||||||
#define IS_845G(dev) ((dev)->pdev->device == 0x2562)
|
#define IS_845G(dev) (INTEL_DEVID(dev) == 0x2562)
|
||||||
#define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x)
|
#define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x)
|
||||||
#define IS_I865G(dev) ((dev)->pdev->device == 0x2572)
|
#define IS_I865G(dev) (INTEL_DEVID(dev) == 0x2572)
|
||||||
#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g)
|
#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g)
|
||||||
#define IS_I915GM(dev) ((dev)->pdev->device == 0x2592)
|
#define IS_I915GM(dev) (INTEL_DEVID(dev) == 0x2592)
|
||||||
#define IS_I945G(dev) ((dev)->pdev->device == 0x2772)
|
#define IS_I945G(dev) (INTEL_DEVID(dev) == 0x2772)
|
||||||
#define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm)
|
#define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm)
|
||||||
#define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater)
|
#define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater)
|
||||||
#define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline)
|
#define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline)
|
||||||
#define IS_GM45(dev) ((dev)->pdev->device == 0x2A42)
|
#define IS_GM45(dev) (INTEL_DEVID(dev) == 0x2A42)
|
||||||
#define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x)
|
#define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x)
|
||||||
#define IS_PINEVIEW_G(dev) ((dev)->pdev->device == 0xa001)
|
#define IS_PINEVIEW_G(dev) (INTEL_DEVID(dev) == 0xa001)
|
||||||
#define IS_PINEVIEW_M(dev) ((dev)->pdev->device == 0xa011)
|
#define IS_PINEVIEW_M(dev) (INTEL_DEVID(dev) == 0xa011)
|
||||||
#define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview)
|
#define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview)
|
||||||
#define IS_G33(dev) (INTEL_INFO(dev)->is_g33)
|
#define IS_G33(dev) (INTEL_INFO(dev)->is_g33)
|
||||||
#define IS_IRONLAKE_M(dev) ((dev)->pdev->device == 0x0046)
|
#define IS_IRONLAKE_M(dev) (INTEL_DEVID(dev) == 0x0046)
|
||||||
#define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge)
|
#define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge)
|
||||||
#define IS_IVB_GT1(dev) ((dev)->pdev->device == 0x0156 || \
|
#define IS_IVB_GT1(dev) (INTEL_DEVID(dev) == 0x0156 || \
|
||||||
(dev)->pdev->device == 0x0152 || \
|
INTEL_DEVID(dev) == 0x0152 || \
|
||||||
(dev)->pdev->device == 0x015a)
|
INTEL_DEVID(dev) == 0x015a)
|
||||||
#define IS_SNB_GT1(dev) ((dev)->pdev->device == 0x0102 || \
|
#define IS_SNB_GT1(dev) (INTEL_DEVID(dev) == 0x0102 || \
|
||||||
(dev)->pdev->device == 0x0106 || \
|
INTEL_DEVID(dev) == 0x0106 || \
|
||||||
(dev)->pdev->device == 0x010A)
|
INTEL_DEVID(dev) == 0x010A)
|
||||||
#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview)
|
#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview)
|
||||||
#define IS_CHERRYVIEW(dev) (INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev))
|
#define IS_CHERRYVIEW(dev) (INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev))
|
||||||
#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
|
#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
|
||||||
#define IS_BROADWELL(dev) (!INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev))
|
#define IS_BROADWELL(dev) (!INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev))
|
||||||
#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
|
#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
|
||||||
#define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \
|
#define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \
|
||||||
((dev)->pdev->device & 0xFF00) == 0x0C00)
|
(INTEL_DEVID(dev) & 0xFF00) == 0x0C00)
|
||||||
#define IS_BDW_ULT(dev) (IS_BROADWELL(dev) && \
|
#define IS_BDW_ULT(dev) (IS_BROADWELL(dev) && \
|
||||||
(((dev)->pdev->device & 0xf) == 0x2 || \
|
((INTEL_DEVID(dev) & 0xf) == 0x2 || \
|
||||||
((dev)->pdev->device & 0xf) == 0x6 || \
|
(INTEL_DEVID(dev) & 0xf) == 0x6 || \
|
||||||
((dev)->pdev->device & 0xf) == 0xe))
|
(INTEL_DEVID(dev) & 0xf) == 0xe))
|
||||||
#define IS_HSW_ULT(dev) (IS_HASWELL(dev) && \
|
#define IS_HSW_ULT(dev) (IS_HASWELL(dev) && \
|
||||||
((dev)->pdev->device & 0xFF00) == 0x0A00)
|
(INTEL_DEVID(dev) & 0xFF00) == 0x0A00)
|
||||||
#define IS_ULT(dev) (IS_HSW_ULT(dev) || IS_BDW_ULT(dev))
|
#define IS_ULT(dev) (IS_HSW_ULT(dev) || IS_BDW_ULT(dev))
|
||||||
#define IS_HSW_GT3(dev) (IS_HASWELL(dev) && \
|
#define IS_HSW_GT3(dev) (IS_HASWELL(dev) && \
|
||||||
((dev)->pdev->device & 0x00F0) == 0x0020)
|
(INTEL_DEVID(dev) & 0x00F0) == 0x0020)
|
||||||
/* ULX machines are also considered ULT. */
|
/* ULX machines are also considered ULT. */
|
||||||
#define IS_HSW_ULX(dev) ((dev)->pdev->device == 0x0A0E || \
|
#define IS_HSW_ULX(dev) (INTEL_DEVID(dev) == 0x0A0E || \
|
||||||
(dev)->pdev->device == 0x0A1E)
|
INTEL_DEVID(dev) == 0x0A1E)
|
||||||
#define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary)
|
#define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -2063,6 +2088,7 @@ struct drm_i915_cmd_table {
|
|||||||
#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
|
#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
|
||||||
|
|
||||||
#define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6)
|
#define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6)
|
||||||
|
#define HAS_LOGICAL_RING_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 8)
|
||||||
#define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >= 6)
|
#define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >= 6)
|
||||||
#define HAS_PPGTT(dev) (INTEL_INFO(dev)->gen >= 7 && !IS_GEN8(dev))
|
#define HAS_PPGTT(dev) (INTEL_INFO(dev)->gen >= 7 && !IS_GEN8(dev))
|
||||||
#define USES_PPGTT(dev) (i915.enable_ppgtt)
|
#define USES_PPGTT(dev) (i915.enable_ppgtt)
|
||||||
@ -2150,6 +2176,7 @@ struct i915_params {
|
|||||||
int enable_rc6;
|
int enable_rc6;
|
||||||
int enable_fbc;
|
int enable_fbc;
|
||||||
int enable_ppgtt;
|
int enable_ppgtt;
|
||||||
|
int enable_execlists;
|
||||||
int enable_psr;
|
int enable_psr;
|
||||||
unsigned int preliminary_hw_support;
|
unsigned int preliminary_hw_support;
|
||||||
int disable_power_well;
|
int disable_power_well;
|
||||||
@ -2196,8 +2223,6 @@ extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
|
|||||||
int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on);
|
int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on);
|
||||||
void intel_hpd_cancel_work(struct drm_i915_private *dev_priv);
|
void intel_hpd_cancel_work(struct drm_i915_private *dev_priv);
|
||||||
|
|
||||||
extern void intel_console_resume(struct work_struct *work);
|
|
||||||
|
|
||||||
/* i915_irq.c */
|
/* i915_irq.c */
|
||||||
void i915_queue_hangcheck(struct drm_device *dev);
|
void i915_queue_hangcheck(struct drm_device *dev);
|
||||||
__printf(3, 4)
|
__printf(3, 4)
|
||||||
@ -2245,6 +2270,20 @@ int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
|
|||||||
struct drm_file *file_priv);
|
struct drm_file *file_priv);
|
||||||
int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
|
int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
|
||||||
struct drm_file *file_priv);
|
struct drm_file *file_priv);
|
||||||
|
void i915_gem_execbuffer_move_to_active(struct list_head *vmas,
|
||||||
|
struct intel_engine_cs *ring);
|
||||||
|
void i915_gem_execbuffer_retire_commands(struct drm_device *dev,
|
||||||
|
struct drm_file *file,
|
||||||
|
struct intel_engine_cs *ring,
|
||||||
|
struct drm_i915_gem_object *obj);
|
||||||
|
int i915_gem_ringbuffer_submission(struct drm_device *dev,
|
||||||
|
struct drm_file *file,
|
||||||
|
struct intel_engine_cs *ring,
|
||||||
|
struct intel_context *ctx,
|
||||||
|
struct drm_i915_gem_execbuffer2 *args,
|
||||||
|
struct list_head *vmas,
|
||||||
|
struct drm_i915_gem_object *batch_obj,
|
||||||
|
u64 exec_start, u32 flags);
|
||||||
int i915_gem_execbuffer(struct drm_device *dev, void *data,
|
int i915_gem_execbuffer(struct drm_device *dev, void *data,
|
||||||
struct drm_file *file_priv);
|
struct drm_file *file_priv);
|
||||||
int i915_gem_execbuffer2(struct drm_device *dev, void *data,
|
int i915_gem_execbuffer2(struct drm_device *dev, void *data,
|
||||||
@ -2397,6 +2436,7 @@ void i915_gem_reset(struct drm_device *dev);
|
|||||||
bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
|
bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
|
||||||
int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
|
int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
|
||||||
int __must_check i915_gem_init(struct drm_device *dev);
|
int __must_check i915_gem_init(struct drm_device *dev);
|
||||||
|
int i915_gem_init_rings(struct drm_device *dev);
|
||||||
int __must_check i915_gem_init_hw(struct drm_device *dev);
|
int __must_check i915_gem_init_hw(struct drm_device *dev);
|
||||||
int i915_gem_l3_remap(struct intel_engine_cs *ring, int slice);
|
int i915_gem_l3_remap(struct intel_engine_cs *ring, int slice);
|
||||||
void i915_gem_init_swizzling(struct drm_device *dev);
|
void i915_gem_init_swizzling(struct drm_device *dev);
|
||||||
@ -2467,7 +2507,7 @@ static inline bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Some GGTT VM helpers */
|
/* Some GGTT VM helpers */
|
||||||
#define obj_to_ggtt(obj) \
|
#define i915_obj_to_ggtt(obj) \
|
||||||
(&((struct drm_i915_private *)(obj)->base.dev->dev_private)->gtt.base)
|
(&((struct drm_i915_private *)(obj)->base.dev->dev_private)->gtt.base)
|
||||||
static inline bool i915_is_ggtt(struct i915_address_space *vm)
|
static inline bool i915_is_ggtt(struct i915_address_space *vm)
|
||||||
{
|
{
|
||||||
@ -2476,21 +2516,30 @@ static inline bool i915_is_ggtt(struct i915_address_space *vm)
|
|||||||
return vm == ggtt;
|
return vm == ggtt;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline struct i915_hw_ppgtt *
|
||||||
|
i915_vm_to_ppgtt(struct i915_address_space *vm)
|
||||||
|
{
|
||||||
|
WARN_ON(i915_is_ggtt(vm));
|
||||||
|
|
||||||
|
return container_of(vm, struct i915_hw_ppgtt, base);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
static inline bool i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *obj)
|
static inline bool i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *obj)
|
||||||
{
|
{
|
||||||
return i915_gem_obj_bound(obj, obj_to_ggtt(obj));
|
return i915_gem_obj_bound(obj, i915_obj_to_ggtt(obj));
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline unsigned long
|
static inline unsigned long
|
||||||
i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *obj)
|
i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *obj)
|
||||||
{
|
{
|
||||||
return i915_gem_obj_offset(obj, obj_to_ggtt(obj));
|
return i915_gem_obj_offset(obj, i915_obj_to_ggtt(obj));
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline unsigned long
|
static inline unsigned long
|
||||||
i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj)
|
i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj)
|
||||||
{
|
{
|
||||||
return i915_gem_obj_size(obj, obj_to_ggtt(obj));
|
return i915_gem_obj_size(obj, i915_obj_to_ggtt(obj));
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int __must_check
|
static inline int __must_check
|
||||||
@ -2498,7 +2547,8 @@ i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj,
|
|||||||
uint32_t alignment,
|
uint32_t alignment,
|
||||||
unsigned flags)
|
unsigned flags)
|
||||||
{
|
{
|
||||||
return i915_gem_object_pin(obj, obj_to_ggtt(obj), alignment, flags | PIN_GLOBAL);
|
return i915_gem_object_pin(obj, i915_obj_to_ggtt(obj),
|
||||||
|
alignment, flags | PIN_GLOBAL);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int
|
static inline int
|
||||||
@ -2510,7 +2560,6 @@ i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj)
|
|||||||
void i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj);
|
void i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj);
|
||||||
|
|
||||||
/* i915_gem_context.c */
|
/* i915_gem_context.c */
|
||||||
#define ctx_to_ppgtt(ctx) container_of((ctx)->vm, struct i915_hw_ppgtt, base)
|
|
||||||
int __must_check i915_gem_context_init(struct drm_device *dev);
|
int __must_check i915_gem_context_init(struct drm_device *dev);
|
||||||
void i915_gem_context_fini(struct drm_device *dev);
|
void i915_gem_context_fini(struct drm_device *dev);
|
||||||
void i915_gem_context_reset(struct drm_device *dev);
|
void i915_gem_context_reset(struct drm_device *dev);
|
||||||
@ -2522,6 +2571,8 @@ int i915_switch_context(struct intel_engine_cs *ring,
|
|||||||
struct intel_context *
|
struct intel_context *
|
||||||
i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id);
|
i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id);
|
||||||
void i915_gem_context_free(struct kref *ctx_ref);
|
void i915_gem_context_free(struct kref *ctx_ref);
|
||||||
|
struct drm_i915_gem_object *
|
||||||
|
i915_gem_alloc_context_obj(struct drm_device *dev, size_t size);
|
||||||
static inline void i915_gem_context_reference(struct intel_context *ctx)
|
static inline void i915_gem_context_reference(struct intel_context *ctx)
|
||||||
{
|
{
|
||||||
kref_get(&ctx->ref);
|
kref_get(&ctx->ref);
|
||||||
|
@ -2160,8 +2160,6 @@ static void
|
|||||||
i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
|
i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
|
||||||
struct intel_engine_cs *ring)
|
struct intel_engine_cs *ring)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = obj->base.dev;
|
|
||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
|
||||||
u32 seqno = intel_ring_get_seqno(ring);
|
u32 seqno = intel_ring_get_seqno(ring);
|
||||||
|
|
||||||
BUG_ON(ring == NULL);
|
BUG_ON(ring == NULL);
|
||||||
@ -2180,19 +2178,6 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
|
|||||||
list_move_tail(&obj->ring_list, &ring->active_list);
|
list_move_tail(&obj->ring_list, &ring->active_list);
|
||||||
|
|
||||||
obj->last_read_seqno = seqno;
|
obj->last_read_seqno = seqno;
|
||||||
|
|
||||||
if (obj->fenced_gpu_access) {
|
|
||||||
obj->last_fenced_seqno = seqno;
|
|
||||||
|
|
||||||
/* Bump MRU to take account of the delayed flush */
|
|
||||||
if (obj->fence_reg != I915_FENCE_REG_NONE) {
|
|
||||||
struct drm_i915_fence_reg *reg;
|
|
||||||
|
|
||||||
reg = &dev_priv->fence_regs[obj->fence_reg];
|
|
||||||
list_move_tail(®->lru_list,
|
|
||||||
&dev_priv->mm.fence_list);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void i915_vma_move_to_active(struct i915_vma *vma,
|
void i915_vma_move_to_active(struct i915_vma *vma,
|
||||||
@ -2228,7 +2213,6 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
|
|||||||
obj->base.write_domain = 0;
|
obj->base.write_domain = 0;
|
||||||
|
|
||||||
obj->last_fenced_seqno = 0;
|
obj->last_fenced_seqno = 0;
|
||||||
obj->fenced_gpu_access = false;
|
|
||||||
|
|
||||||
obj->active = 0;
|
obj->active = 0;
|
||||||
drm_gem_object_unreference(&obj->base);
|
drm_gem_object_unreference(&obj->base);
|
||||||
@ -2326,10 +2310,21 @@ int __i915_add_request(struct intel_engine_cs *ring,
|
|||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv = ring->dev->dev_private;
|
struct drm_i915_private *dev_priv = ring->dev->dev_private;
|
||||||
struct drm_i915_gem_request *request;
|
struct drm_i915_gem_request *request;
|
||||||
|
struct intel_ringbuffer *ringbuf;
|
||||||
u32 request_ring_position, request_start;
|
u32 request_ring_position, request_start;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
request_start = intel_ring_get_tail(ring->buffer);
|
request = ring->preallocated_lazy_request;
|
||||||
|
if (WARN_ON(request == NULL))
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
if (i915.enable_execlists) {
|
||||||
|
struct intel_context *ctx = request->ctx;
|
||||||
|
ringbuf = ctx->engine[ring->id].ringbuf;
|
||||||
|
} else
|
||||||
|
ringbuf = ring->buffer;
|
||||||
|
|
||||||
|
request_start = intel_ring_get_tail(ringbuf);
|
||||||
/*
|
/*
|
||||||
* Emit any outstanding flushes - execbuf can fail to emit the flush
|
* Emit any outstanding flushes - execbuf can fail to emit the flush
|
||||||
* after having emitted the batchbuffer command. Hence we need to fix
|
* after having emitted the batchbuffer command. Hence we need to fix
|
||||||
@ -2337,24 +2332,32 @@ int __i915_add_request(struct intel_engine_cs *ring,
|
|||||||
* is that the flush _must_ happen before the next request, no matter
|
* is that the flush _must_ happen before the next request, no matter
|
||||||
* what.
|
* what.
|
||||||
*/
|
*/
|
||||||
ret = intel_ring_flush_all_caches(ring);
|
if (i915.enable_execlists) {
|
||||||
if (ret)
|
ret = logical_ring_flush_all_caches(ringbuf);
|
||||||
return ret;
|
if (ret)
|
||||||
|
return ret;
|
||||||
request = ring->preallocated_lazy_request;
|
} else {
|
||||||
if (WARN_ON(request == NULL))
|
ret = intel_ring_flush_all_caches(ring);
|
||||||
return -ENOMEM;
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
/* Record the position of the start of the request so that
|
/* Record the position of the start of the request so that
|
||||||
* should we detect the updated seqno part-way through the
|
* should we detect the updated seqno part-way through the
|
||||||
* GPU processing the request, we never over-estimate the
|
* GPU processing the request, we never over-estimate the
|
||||||
* position of the head.
|
* position of the head.
|
||||||
*/
|
*/
|
||||||
request_ring_position = intel_ring_get_tail(ring->buffer);
|
request_ring_position = intel_ring_get_tail(ringbuf);
|
||||||
|
|
||||||
ret = ring->add_request(ring);
|
if (i915.enable_execlists) {
|
||||||
if (ret)
|
ret = ring->emit_request(ringbuf);
|
||||||
return ret;
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
} else {
|
||||||
|
ret = ring->add_request(ring);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
request->seqno = intel_ring_get_seqno(ring);
|
request->seqno = intel_ring_get_seqno(ring);
|
||||||
request->ring = ring;
|
request->ring = ring;
|
||||||
@ -2369,12 +2372,14 @@ int __i915_add_request(struct intel_engine_cs *ring,
|
|||||||
*/
|
*/
|
||||||
request->batch_obj = obj;
|
request->batch_obj = obj;
|
||||||
|
|
||||||
/* Hold a reference to the current context so that we can inspect
|
if (!i915.enable_execlists) {
|
||||||
* it later in case a hangcheck error event fires.
|
/* Hold a reference to the current context so that we can inspect
|
||||||
*/
|
* it later in case a hangcheck error event fires.
|
||||||
request->ctx = ring->last_context;
|
*/
|
||||||
if (request->ctx)
|
request->ctx = ring->last_context;
|
||||||
i915_gem_context_reference(request->ctx);
|
if (request->ctx)
|
||||||
|
i915_gem_context_reference(request->ctx);
|
||||||
|
}
|
||||||
|
|
||||||
request->emitted_jiffies = jiffies;
|
request->emitted_jiffies = jiffies;
|
||||||
list_add_tail(&request->list, &ring->request_list);
|
list_add_tail(&request->list, &ring->request_list);
|
||||||
@ -2545,6 +2550,18 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
|
|||||||
i915_gem_free_request(request);
|
i915_gem_free_request(request);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
while (!list_empty(&ring->execlist_queue)) {
|
||||||
|
struct intel_ctx_submit_request *submit_req;
|
||||||
|
|
||||||
|
submit_req = list_first_entry(&ring->execlist_queue,
|
||||||
|
struct intel_ctx_submit_request,
|
||||||
|
execlist_link);
|
||||||
|
list_del(&submit_req->execlist_link);
|
||||||
|
intel_runtime_pm_put(dev_priv);
|
||||||
|
i915_gem_context_unreference(submit_req->ctx);
|
||||||
|
kfree(submit_req);
|
||||||
|
}
|
||||||
|
|
||||||
/* These may not have been flush before the reset, do so now */
|
/* These may not have been flush before the reset, do so now */
|
||||||
kfree(ring->preallocated_lazy_request);
|
kfree(ring->preallocated_lazy_request);
|
||||||
ring->preallocated_lazy_request = NULL;
|
ring->preallocated_lazy_request = NULL;
|
||||||
@ -2629,6 +2646,7 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
|
|||||||
|
|
||||||
while (!list_empty(&ring->request_list)) {
|
while (!list_empty(&ring->request_list)) {
|
||||||
struct drm_i915_gem_request *request;
|
struct drm_i915_gem_request *request;
|
||||||
|
struct intel_ringbuffer *ringbuf;
|
||||||
|
|
||||||
request = list_first_entry(&ring->request_list,
|
request = list_first_entry(&ring->request_list,
|
||||||
struct drm_i915_gem_request,
|
struct drm_i915_gem_request,
|
||||||
@ -2638,12 +2656,24 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
|
|||||||
break;
|
break;
|
||||||
|
|
||||||
trace_i915_gem_request_retire(ring, request->seqno);
|
trace_i915_gem_request_retire(ring, request->seqno);
|
||||||
|
|
||||||
|
/* This is one of the few common intersection points
|
||||||
|
* between legacy ringbuffer submission and execlists:
|
||||||
|
* we need to tell them apart in order to find the correct
|
||||||
|
* ringbuffer to which the request belongs to.
|
||||||
|
*/
|
||||||
|
if (i915.enable_execlists) {
|
||||||
|
struct intel_context *ctx = request->ctx;
|
||||||
|
ringbuf = ctx->engine[ring->id].ringbuf;
|
||||||
|
} else
|
||||||
|
ringbuf = ring->buffer;
|
||||||
|
|
||||||
/* We know the GPU must have read the request to have
|
/* We know the GPU must have read the request to have
|
||||||
* sent us the seqno + interrupt, so use the position
|
* sent us the seqno + interrupt, so use the position
|
||||||
* of tail of the request to update the last known position
|
* of tail of the request to update the last known position
|
||||||
* of the GPU head.
|
* of the GPU head.
|
||||||
*/
|
*/
|
||||||
ring->buffer->last_retired_head = request->tail;
|
ringbuf->last_retired_head = request->tail;
|
||||||
|
|
||||||
i915_gem_free_request(request);
|
i915_gem_free_request(request);
|
||||||
}
|
}
|
||||||
@ -2919,9 +2949,8 @@ int i915_vma_unbind(struct i915_vma *vma)
|
|||||||
vma->unbind_vma(vma);
|
vma->unbind_vma(vma);
|
||||||
|
|
||||||
list_del_init(&vma->mm_list);
|
list_del_init(&vma->mm_list);
|
||||||
/* Avoid an unnecessary call to unbind on rebind. */
|
|
||||||
if (i915_is_ggtt(vma->vm))
|
if (i915_is_ggtt(vma->vm))
|
||||||
obj->map_and_fenceable = true;
|
obj->map_and_fenceable = false;
|
||||||
|
|
||||||
drm_mm_remove_node(&vma->node);
|
drm_mm_remove_node(&vma->node);
|
||||||
i915_gem_vma_destroy(vma);
|
i915_gem_vma_destroy(vma);
|
||||||
@ -3166,7 +3195,6 @@ i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
|
|||||||
obj->last_fenced_seqno = 0;
|
obj->last_fenced_seqno = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
obj->fenced_gpu_access = false;
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3273,6 +3301,9 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
} else if (enable) {
|
} else if (enable) {
|
||||||
|
if (WARN_ON(!obj->map_and_fenceable))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
reg = i915_find_fence_reg(dev);
|
reg = i915_find_fence_reg(dev);
|
||||||
if (IS_ERR(reg))
|
if (IS_ERR(reg))
|
||||||
return PTR_ERR(reg);
|
return PTR_ERR(reg);
|
||||||
@ -3583,11 +3614,12 @@ int
|
|||||||
i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
|
i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
|
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
|
||||||
|
struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
|
||||||
uint32_t old_write_domain, old_read_domains;
|
uint32_t old_write_domain, old_read_domains;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
/* Not valid to be called on unbound objects. */
|
/* Not valid to be called on unbound objects. */
|
||||||
if (!i915_gem_obj_bound_any(obj))
|
if (vma == NULL)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
|
if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
|
||||||
@ -3629,13 +3661,9 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
|
|||||||
old_write_domain);
|
old_write_domain);
|
||||||
|
|
||||||
/* And bump the LRU for this access */
|
/* And bump the LRU for this access */
|
||||||
if (i915_gem_object_is_inactive(obj)) {
|
if (i915_gem_object_is_inactive(obj))
|
||||||
struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
|
list_move_tail(&vma->mm_list,
|
||||||
if (vma)
|
&dev_priv->gtt.base.inactive_list);
|
||||||
list_move_tail(&vma->mm_list,
|
|
||||||
&dev_priv->gtt.base.inactive_list);
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -3799,9 +3827,6 @@ static bool is_pin_display(struct drm_i915_gem_object *obj)
|
|||||||
{
|
{
|
||||||
struct i915_vma *vma;
|
struct i915_vma *vma;
|
||||||
|
|
||||||
if (list_empty(&obj->vma_list))
|
|
||||||
return false;
|
|
||||||
|
|
||||||
vma = i915_gem_obj_to_ggtt(obj);
|
vma = i915_gem_obj_to_ggtt(obj);
|
||||||
if (!vma)
|
if (!vma)
|
||||||
return false;
|
return false;
|
||||||
@ -4328,8 +4353,6 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
|
|||||||
|
|
||||||
obj->fence_reg = I915_FENCE_REG_NONE;
|
obj->fence_reg = I915_FENCE_REG_NONE;
|
||||||
obj->madv = I915_MADV_WILLNEED;
|
obj->madv = I915_MADV_WILLNEED;
|
||||||
/* Avoid an unnecessary call to unbind on the first bind. */
|
|
||||||
obj->map_and_fenceable = true;
|
|
||||||
|
|
||||||
i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
|
i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
|
||||||
}
|
}
|
||||||
@ -4490,12 +4513,18 @@ struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
|
|||||||
|
|
||||||
void i915_gem_vma_destroy(struct i915_vma *vma)
|
void i915_gem_vma_destroy(struct i915_vma *vma)
|
||||||
{
|
{
|
||||||
|
struct i915_address_space *vm = NULL;
|
||||||
WARN_ON(vma->node.allocated);
|
WARN_ON(vma->node.allocated);
|
||||||
|
|
||||||
/* Keep the vma as a placeholder in the execbuffer reservation lists */
|
/* Keep the vma as a placeholder in the execbuffer reservation lists */
|
||||||
if (!list_empty(&vma->exec_list))
|
if (!list_empty(&vma->exec_list))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
vm = vma->vm;
|
||||||
|
|
||||||
|
if (!i915_is_ggtt(vm))
|
||||||
|
i915_ppgtt_put(i915_vm_to_ppgtt(vm));
|
||||||
|
|
||||||
list_del(&vma->vma_link);
|
list_del(&vma->vma_link);
|
||||||
|
|
||||||
kfree(vma);
|
kfree(vma);
|
||||||
@ -4509,7 +4538,7 @@ i915_gem_stop_ringbuffers(struct drm_device *dev)
|
|||||||
int i;
|
int i;
|
||||||
|
|
||||||
for_each_ring(ring, dev_priv, i)
|
for_each_ring(ring, dev_priv, i)
|
||||||
intel_stop_ring_buffer(ring);
|
dev_priv->gt.stop_ring(ring);
|
||||||
}
|
}
|
||||||
|
|
||||||
int
|
int
|
||||||
@ -4626,7 +4655,7 @@ intel_enable_blt(struct drm_device *dev)
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int i915_gem_init_rings(struct drm_device *dev)
|
int i915_gem_init_rings(struct drm_device *dev)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||||
int ret;
|
int ret;
|
||||||
@ -4709,7 +4738,7 @@ i915_gem_init_hw(struct drm_device *dev)
|
|||||||
|
|
||||||
i915_gem_init_swizzling(dev);
|
i915_gem_init_swizzling(dev);
|
||||||
|
|
||||||
ret = i915_gem_init_rings(dev);
|
ret = dev_priv->gt.init_rings(dev);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
@ -4727,6 +4756,14 @@ i915_gem_init_hw(struct drm_device *dev)
|
|||||||
if (ret && ret != -EIO) {
|
if (ret && ret != -EIO) {
|
||||||
DRM_ERROR("Context enable failed %d\n", ret);
|
DRM_ERROR("Context enable failed %d\n", ret);
|
||||||
i915_gem_cleanup_ringbuffer(dev);
|
i915_gem_cleanup_ringbuffer(dev);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = i915_ppgtt_init_hw(dev);
|
||||||
|
if (ret && ret != -EIO) {
|
||||||
|
DRM_ERROR("PPGTT enable failed %d\n", ret);
|
||||||
|
i915_gem_cleanup_ringbuffer(dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
@ -4737,6 +4774,9 @@ int i915_gem_init(struct drm_device *dev)
|
|||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
i915.enable_execlists = intel_sanitize_enable_execlists(dev,
|
||||||
|
i915.enable_execlists);
|
||||||
|
|
||||||
mutex_lock(&dev->struct_mutex);
|
mutex_lock(&dev->struct_mutex);
|
||||||
|
|
||||||
if (IS_VALLEYVIEW(dev)) {
|
if (IS_VALLEYVIEW(dev)) {
|
||||||
@ -4747,7 +4787,24 @@ int i915_gem_init(struct drm_device *dev)
|
|||||||
DRM_DEBUG_DRIVER("allow wake ack timed out\n");
|
DRM_DEBUG_DRIVER("allow wake ack timed out\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
i915_gem_init_userptr(dev);
|
if (!i915.enable_execlists) {
|
||||||
|
dev_priv->gt.do_execbuf = i915_gem_ringbuffer_submission;
|
||||||
|
dev_priv->gt.init_rings = i915_gem_init_rings;
|
||||||
|
dev_priv->gt.cleanup_ring = intel_cleanup_ring_buffer;
|
||||||
|
dev_priv->gt.stop_ring = intel_stop_ring_buffer;
|
||||||
|
} else {
|
||||||
|
dev_priv->gt.do_execbuf = intel_execlists_submission;
|
||||||
|
dev_priv->gt.init_rings = intel_logical_rings_init;
|
||||||
|
dev_priv->gt.cleanup_ring = intel_logical_ring_cleanup;
|
||||||
|
dev_priv->gt.stop_ring = intel_logical_ring_stop;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = i915_gem_init_userptr(dev);
|
||||||
|
if (ret) {
|
||||||
|
mutex_unlock(&dev->struct_mutex);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
i915_gem_init_global_gtt(dev);
|
i915_gem_init_global_gtt(dev);
|
||||||
|
|
||||||
ret = i915_gem_context_init(dev);
|
ret = i915_gem_context_init(dev);
|
||||||
@ -4782,7 +4839,7 @@ i915_gem_cleanup_ringbuffer(struct drm_device *dev)
|
|||||||
int i;
|
int i;
|
||||||
|
|
||||||
for_each_ring(ring, dev_priv, i)
|
for_each_ring(ring, dev_priv, i)
|
||||||
intel_cleanup_ring_buffer(ring);
|
dev_priv->gt.cleanup_ring(ring);
|
||||||
}
|
}
|
||||||
|
|
||||||
int
|
int
|
||||||
@ -5094,9 +5151,7 @@ unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
|
|||||||
struct drm_i915_private *dev_priv = o->base.dev->dev_private;
|
struct drm_i915_private *dev_priv = o->base.dev->dev_private;
|
||||||
struct i915_vma *vma;
|
struct i915_vma *vma;
|
||||||
|
|
||||||
if (!dev_priv->mm.aliasing_ppgtt ||
|
WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
|
||||||
vm == &dev_priv->mm.aliasing_ppgtt->base)
|
|
||||||
vm = &dev_priv->gtt.base;
|
|
||||||
|
|
||||||
list_for_each_entry(vma, &o->vma_list, vma_link) {
|
list_for_each_entry(vma, &o->vma_list, vma_link) {
|
||||||
if (vma->vm == vm)
|
if (vma->vm == vm)
|
||||||
@ -5137,9 +5192,7 @@ unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
|
|||||||
struct drm_i915_private *dev_priv = o->base.dev->dev_private;
|
struct drm_i915_private *dev_priv = o->base.dev->dev_private;
|
||||||
struct i915_vma *vma;
|
struct i915_vma *vma;
|
||||||
|
|
||||||
if (!dev_priv->mm.aliasing_ppgtt ||
|
WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
|
||||||
vm == &dev_priv->mm.aliasing_ppgtt->base)
|
|
||||||
vm = &dev_priv->gtt.base;
|
|
||||||
|
|
||||||
BUG_ON(list_empty(&o->vma_list));
|
BUG_ON(list_empty(&o->vma_list));
|
||||||
|
|
||||||
@ -5244,14 +5297,8 @@ struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
|
|||||||
{
|
{
|
||||||
struct i915_vma *vma;
|
struct i915_vma *vma;
|
||||||
|
|
||||||
/* This WARN has probably outlived its usefulness (callers already
|
|
||||||
* WARN if they don't find the GGTT vma they expect). When removing,
|
|
||||||
* remember to remove the pre-check in is_pin_display() as well */
|
|
||||||
if (WARN_ON(list_empty(&obj->vma_list)))
|
|
||||||
return NULL;
|
|
||||||
|
|
||||||
vma = list_first_entry(&obj->vma_list, typeof(*vma), vma_link);
|
vma = list_first_entry(&obj->vma_list, typeof(*vma), vma_link);
|
||||||
if (vma->vm != obj_to_ggtt(obj))
|
if (vma->vm != i915_obj_to_ggtt(obj))
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
return vma;
|
return vma;
|
||||||
|
@ -96,50 +96,6 @@
|
|||||||
#define GEN6_CONTEXT_ALIGN (64<<10)
|
#define GEN6_CONTEXT_ALIGN (64<<10)
|
||||||
#define GEN7_CONTEXT_ALIGN 4096
|
#define GEN7_CONTEXT_ALIGN 4096
|
||||||
|
|
||||||
static void do_ppgtt_cleanup(struct i915_hw_ppgtt *ppgtt)
|
|
||||||
{
|
|
||||||
struct drm_device *dev = ppgtt->base.dev;
|
|
||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
|
||||||
struct i915_address_space *vm = &ppgtt->base;
|
|
||||||
|
|
||||||
if (ppgtt == dev_priv->mm.aliasing_ppgtt ||
|
|
||||||
(list_empty(&vm->active_list) && list_empty(&vm->inactive_list))) {
|
|
||||||
ppgtt->base.cleanup(&ppgtt->base);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Make sure vmas are unbound before we take down the drm_mm
|
|
||||||
*
|
|
||||||
* FIXME: Proper refcounting should take care of this, this shouldn't be
|
|
||||||
* needed at all.
|
|
||||||
*/
|
|
||||||
if (!list_empty(&vm->active_list)) {
|
|
||||||
struct i915_vma *vma;
|
|
||||||
|
|
||||||
list_for_each_entry(vma, &vm->active_list, mm_list)
|
|
||||||
if (WARN_ON(list_empty(&vma->vma_link) ||
|
|
||||||
list_is_singular(&vma->vma_link)))
|
|
||||||
break;
|
|
||||||
|
|
||||||
i915_gem_evict_vm(&ppgtt->base, true);
|
|
||||||
} else {
|
|
||||||
i915_gem_retire_requests(dev);
|
|
||||||
i915_gem_evict_vm(&ppgtt->base, false);
|
|
||||||
}
|
|
||||||
|
|
||||||
ppgtt->base.cleanup(&ppgtt->base);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void ppgtt_release(struct kref *kref)
|
|
||||||
{
|
|
||||||
struct i915_hw_ppgtt *ppgtt =
|
|
||||||
container_of(kref, struct i915_hw_ppgtt, ref);
|
|
||||||
|
|
||||||
do_ppgtt_cleanup(ppgtt);
|
|
||||||
kfree(ppgtt);
|
|
||||||
}
|
|
||||||
|
|
||||||
static size_t get_context_alignment(struct drm_device *dev)
|
static size_t get_context_alignment(struct drm_device *dev)
|
||||||
{
|
{
|
||||||
if (IS_GEN6(dev))
|
if (IS_GEN6(dev))
|
||||||
@ -179,24 +135,20 @@ static int get_context_size(struct drm_device *dev)
|
|||||||
void i915_gem_context_free(struct kref *ctx_ref)
|
void i915_gem_context_free(struct kref *ctx_ref)
|
||||||
{
|
{
|
||||||
struct intel_context *ctx = container_of(ctx_ref,
|
struct intel_context *ctx = container_of(ctx_ref,
|
||||||
typeof(*ctx), ref);
|
typeof(*ctx), ref);
|
||||||
struct i915_hw_ppgtt *ppgtt = NULL;
|
|
||||||
|
|
||||||
if (ctx->legacy_hw_ctx.rcs_state) {
|
if (i915.enable_execlists)
|
||||||
/* We refcount even the aliasing PPGTT to keep the code symmetric */
|
intel_lr_context_free(ctx);
|
||||||
if (USES_PPGTT(ctx->legacy_hw_ctx.rcs_state->base.dev))
|
|
||||||
ppgtt = ctx_to_ppgtt(ctx);
|
i915_ppgtt_put(ctx->ppgtt);
|
||||||
}
|
|
||||||
|
|
||||||
if (ppgtt)
|
|
||||||
kref_put(&ppgtt->ref, ppgtt_release);
|
|
||||||
if (ctx->legacy_hw_ctx.rcs_state)
|
if (ctx->legacy_hw_ctx.rcs_state)
|
||||||
drm_gem_object_unreference(&ctx->legacy_hw_ctx.rcs_state->base);
|
drm_gem_object_unreference(&ctx->legacy_hw_ctx.rcs_state->base);
|
||||||
list_del(&ctx->link);
|
list_del(&ctx->link);
|
||||||
kfree(ctx);
|
kfree(ctx);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct drm_i915_gem_object *
|
struct drm_i915_gem_object *
|
||||||
i915_gem_alloc_context_obj(struct drm_device *dev, size_t size)
|
i915_gem_alloc_context_obj(struct drm_device *dev, size_t size)
|
||||||
{
|
{
|
||||||
struct drm_i915_gem_object *obj;
|
struct drm_i915_gem_object *obj;
|
||||||
@ -226,29 +178,9 @@ i915_gem_alloc_context_obj(struct drm_device *dev, size_t size)
|
|||||||
return obj;
|
return obj;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct i915_hw_ppgtt *
|
|
||||||
create_vm_for_ctx(struct drm_device *dev, struct intel_context *ctx)
|
|
||||||
{
|
|
||||||
struct i915_hw_ppgtt *ppgtt;
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
|
|
||||||
if (!ppgtt)
|
|
||||||
return ERR_PTR(-ENOMEM);
|
|
||||||
|
|
||||||
ret = i915_gem_init_ppgtt(dev, ppgtt);
|
|
||||||
if (ret) {
|
|
||||||
kfree(ppgtt);
|
|
||||||
return ERR_PTR(ret);
|
|
||||||
}
|
|
||||||
|
|
||||||
ppgtt->ctx = ctx;
|
|
||||||
return ppgtt;
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct intel_context *
|
static struct intel_context *
|
||||||
__create_hw_context(struct drm_device *dev,
|
__create_hw_context(struct drm_device *dev,
|
||||||
struct drm_i915_file_private *file_priv)
|
struct drm_i915_file_private *file_priv)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||||
struct intel_context *ctx;
|
struct intel_context *ctx;
|
||||||
@ -301,11 +233,9 @@ err_out:
|
|||||||
*/
|
*/
|
||||||
static struct intel_context *
|
static struct intel_context *
|
||||||
i915_gem_create_context(struct drm_device *dev,
|
i915_gem_create_context(struct drm_device *dev,
|
||||||
struct drm_i915_file_private *file_priv,
|
struct drm_i915_file_private *file_priv)
|
||||||
bool create_vm)
|
|
||||||
{
|
{
|
||||||
const bool is_global_default_ctx = file_priv == NULL;
|
const bool is_global_default_ctx = file_priv == NULL;
|
||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
|
||||||
struct intel_context *ctx;
|
struct intel_context *ctx;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
@ -331,34 +261,18 @@ i915_gem_create_context(struct drm_device *dev,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (create_vm) {
|
if (USES_FULL_PPGTT(dev)) {
|
||||||
struct i915_hw_ppgtt *ppgtt = create_vm_for_ctx(dev, ctx);
|
struct i915_hw_ppgtt *ppgtt = i915_ppgtt_create(dev, file_priv);
|
||||||
|
|
||||||
if (IS_ERR_OR_NULL(ppgtt)) {
|
if (IS_ERR_OR_NULL(ppgtt)) {
|
||||||
DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
|
DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
|
||||||
PTR_ERR(ppgtt));
|
PTR_ERR(ppgtt));
|
||||||
ret = PTR_ERR(ppgtt);
|
ret = PTR_ERR(ppgtt);
|
||||||
goto err_unpin;
|
goto err_unpin;
|
||||||
} else
|
|
||||||
ctx->vm = &ppgtt->base;
|
|
||||||
|
|
||||||
/* This case is reserved for the global default context and
|
|
||||||
* should only happen once. */
|
|
||||||
if (is_global_default_ctx) {
|
|
||||||
if (WARN_ON(dev_priv->mm.aliasing_ppgtt)) {
|
|
||||||
ret = -EEXIST;
|
|
||||||
goto err_unpin;
|
|
||||||
}
|
|
||||||
|
|
||||||
dev_priv->mm.aliasing_ppgtt = ppgtt;
|
|
||||||
}
|
}
|
||||||
} else if (USES_PPGTT(dev)) {
|
|
||||||
/* For platforms which only have aliasing PPGTT, we fake the
|
ctx->ppgtt = ppgtt;
|
||||||
* address space and refcounting. */
|
}
|
||||||
ctx->vm = &dev_priv->mm.aliasing_ppgtt->base;
|
|
||||||
kref_get(&dev_priv->mm.aliasing_ppgtt->ref);
|
|
||||||
} else
|
|
||||||
ctx->vm = &dev_priv->gtt.base;
|
|
||||||
|
|
||||||
return ctx;
|
return ctx;
|
||||||
|
|
||||||
@ -417,7 +331,11 @@ int i915_gem_context_init(struct drm_device *dev)
|
|||||||
if (WARN_ON(dev_priv->ring[RCS].default_context))
|
if (WARN_ON(dev_priv->ring[RCS].default_context))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (HAS_HW_CONTEXTS(dev)) {
|
if (i915.enable_execlists) {
|
||||||
|
/* NB: intentionally left blank. We will allocate our own
|
||||||
|
* backing objects as we need them, thank you very much */
|
||||||
|
dev_priv->hw_context_size = 0;
|
||||||
|
} else if (HAS_HW_CONTEXTS(dev)) {
|
||||||
dev_priv->hw_context_size = round_up(get_context_size(dev), 4096);
|
dev_priv->hw_context_size = round_up(get_context_size(dev), 4096);
|
||||||
if (dev_priv->hw_context_size > (1<<20)) {
|
if (dev_priv->hw_context_size > (1<<20)) {
|
||||||
DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size %d\n",
|
DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size %d\n",
|
||||||
@ -426,18 +344,23 @@ int i915_gem_context_init(struct drm_device *dev)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx = i915_gem_create_context(dev, NULL, USES_PPGTT(dev));
|
ctx = i915_gem_create_context(dev, NULL);
|
||||||
if (IS_ERR(ctx)) {
|
if (IS_ERR(ctx)) {
|
||||||
DRM_ERROR("Failed to create default global context (error %ld)\n",
|
DRM_ERROR("Failed to create default global context (error %ld)\n",
|
||||||
PTR_ERR(ctx));
|
PTR_ERR(ctx));
|
||||||
return PTR_ERR(ctx);
|
return PTR_ERR(ctx);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* NB: RCS will hold a ref for all rings */
|
for (i = 0; i < I915_NUM_RINGS; i++) {
|
||||||
for (i = 0; i < I915_NUM_RINGS; i++)
|
struct intel_engine_cs *ring = &dev_priv->ring[i];
|
||||||
dev_priv->ring[i].default_context = ctx;
|
|
||||||
|
|
||||||
DRM_DEBUG_DRIVER("%s context support initialized\n", dev_priv->hw_context_size ? "HW" : "fake");
|
/* NB: RCS will hold a ref for all rings */
|
||||||
|
ring->default_context = ctx;
|
||||||
|
}
|
||||||
|
|
||||||
|
DRM_DEBUG_DRIVER("%s context support initialized\n",
|
||||||
|
i915.enable_execlists ? "LR" :
|
||||||
|
dev_priv->hw_context_size ? "HW" : "fake");
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -489,13 +412,6 @@ int i915_gem_context_enable(struct drm_i915_private *dev_priv)
|
|||||||
struct intel_engine_cs *ring;
|
struct intel_engine_cs *ring;
|
||||||
int ret, i;
|
int ret, i;
|
||||||
|
|
||||||
/* This is the only place the aliasing PPGTT gets enabled, which means
|
|
||||||
* it has to happen before we bail on reset */
|
|
||||||
if (dev_priv->mm.aliasing_ppgtt) {
|
|
||||||
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
|
|
||||||
ppgtt->enable(ppgtt);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* FIXME: We should make this work, even in reset */
|
/* FIXME: We should make this work, even in reset */
|
||||||
if (i915_reset_in_progress(&dev_priv->gpu_error))
|
if (i915_reset_in_progress(&dev_priv->gpu_error))
|
||||||
return 0;
|
return 0;
|
||||||
@ -527,7 +443,7 @@ int i915_gem_context_open(struct drm_device *dev, struct drm_file *file)
|
|||||||
idr_init(&file_priv->context_idr);
|
idr_init(&file_priv->context_idr);
|
||||||
|
|
||||||
mutex_lock(&dev->struct_mutex);
|
mutex_lock(&dev->struct_mutex);
|
||||||
ctx = i915_gem_create_context(dev, file_priv, USES_FULL_PPGTT(dev));
|
ctx = i915_gem_create_context(dev, file_priv);
|
||||||
mutex_unlock(&dev->struct_mutex);
|
mutex_unlock(&dev->struct_mutex);
|
||||||
|
|
||||||
if (IS_ERR(ctx)) {
|
if (IS_ERR(ctx)) {
|
||||||
@ -614,7 +530,6 @@ static int do_switch(struct intel_engine_cs *ring,
|
|||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv = ring->dev->dev_private;
|
struct drm_i915_private *dev_priv = ring->dev->dev_private;
|
||||||
struct intel_context *from = ring->last_context;
|
struct intel_context *from = ring->last_context;
|
||||||
struct i915_hw_ppgtt *ppgtt = ctx_to_ppgtt(to);
|
|
||||||
u32 hw_flags = 0;
|
u32 hw_flags = 0;
|
||||||
bool uninitialized = false;
|
bool uninitialized = false;
|
||||||
int ret, i;
|
int ret, i;
|
||||||
@ -642,8 +557,8 @@ static int do_switch(struct intel_engine_cs *ring,
|
|||||||
*/
|
*/
|
||||||
from = ring->last_context;
|
from = ring->last_context;
|
||||||
|
|
||||||
if (USES_FULL_PPGTT(ring->dev)) {
|
if (to->ppgtt) {
|
||||||
ret = ppgtt->switch_mm(ppgtt, ring, false);
|
ret = to->ppgtt->switch_mm(to->ppgtt, ring, false);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto unpin_out;
|
goto unpin_out;
|
||||||
}
|
}
|
||||||
@ -766,9 +681,9 @@ int i915_switch_context(struct intel_engine_cs *ring,
|
|||||||
return do_switch(ring, to);
|
return do_switch(ring, to);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool hw_context_enabled(struct drm_device *dev)
|
static bool contexts_enabled(struct drm_device *dev)
|
||||||
{
|
{
|
||||||
return to_i915(dev)->hw_context_size;
|
return i915.enable_execlists || to_i915(dev)->hw_context_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
|
int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
|
||||||
@ -779,14 +694,14 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
|
|||||||
struct intel_context *ctx;
|
struct intel_context *ctx;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (!hw_context_enabled(dev))
|
if (!contexts_enabled(dev))
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
ret = i915_mutex_lock_interruptible(dev);
|
ret = i915_mutex_lock_interruptible(dev);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
ctx = i915_gem_create_context(dev, file_priv, USES_FULL_PPGTT(dev));
|
ctx = i915_gem_create_context(dev, file_priv);
|
||||||
mutex_unlock(&dev->struct_mutex);
|
mutex_unlock(&dev->struct_mutex);
|
||||||
if (IS_ERR(ctx))
|
if (IS_ERR(ctx))
|
||||||
return PTR_ERR(ctx);
|
return PTR_ERR(ctx);
|
||||||
|
@ -35,6 +35,7 @@
|
|||||||
|
|
||||||
#define __EXEC_OBJECT_HAS_PIN (1<<31)
|
#define __EXEC_OBJECT_HAS_PIN (1<<31)
|
||||||
#define __EXEC_OBJECT_HAS_FENCE (1<<30)
|
#define __EXEC_OBJECT_HAS_FENCE (1<<30)
|
||||||
|
#define __EXEC_OBJECT_NEEDS_MAP (1<<29)
|
||||||
#define __EXEC_OBJECT_NEEDS_BIAS (1<<28)
|
#define __EXEC_OBJECT_NEEDS_BIAS (1<<28)
|
||||||
|
|
||||||
#define BATCH_OFFSET_BIAS (256*1024)
|
#define BATCH_OFFSET_BIAS (256*1024)
|
||||||
@ -94,7 +95,6 @@ eb_lookup_vmas(struct eb_vmas *eb,
|
|||||||
struct i915_address_space *vm,
|
struct i915_address_space *vm,
|
||||||
struct drm_file *file)
|
struct drm_file *file)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv = vm->dev->dev_private;
|
|
||||||
struct drm_i915_gem_object *obj;
|
struct drm_i915_gem_object *obj;
|
||||||
struct list_head objects;
|
struct list_head objects;
|
||||||
int i, ret;
|
int i, ret;
|
||||||
@ -129,20 +129,6 @@ eb_lookup_vmas(struct eb_vmas *eb,
|
|||||||
i = 0;
|
i = 0;
|
||||||
while (!list_empty(&objects)) {
|
while (!list_empty(&objects)) {
|
||||||
struct i915_vma *vma;
|
struct i915_vma *vma;
|
||||||
struct i915_address_space *bind_vm = vm;
|
|
||||||
|
|
||||||
if (exec[i].flags & EXEC_OBJECT_NEEDS_GTT &&
|
|
||||||
USES_FULL_PPGTT(vm->dev)) {
|
|
||||||
ret = -EINVAL;
|
|
||||||
goto err;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* If we have secure dispatch, or the userspace assures us that
|
|
||||||
* they know what they're doing, use the GGTT VM.
|
|
||||||
*/
|
|
||||||
if (((args->flags & I915_EXEC_SECURE) &&
|
|
||||||
(i == (args->buffer_count - 1))))
|
|
||||||
bind_vm = &dev_priv->gtt.base;
|
|
||||||
|
|
||||||
obj = list_first_entry(&objects,
|
obj = list_first_entry(&objects,
|
||||||
struct drm_i915_gem_object,
|
struct drm_i915_gem_object,
|
||||||
@ -156,7 +142,7 @@ eb_lookup_vmas(struct eb_vmas *eb,
|
|||||||
* from the (obj, vm) we don't run the risk of creating
|
* from the (obj, vm) we don't run the risk of creating
|
||||||
* duplicated vmas for the same vm.
|
* duplicated vmas for the same vm.
|
||||||
*/
|
*/
|
||||||
vma = i915_gem_obj_lookup_or_create_vma(obj, bind_vm);
|
vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
|
||||||
if (IS_ERR(vma)) {
|
if (IS_ERR(vma)) {
|
||||||
DRM_DEBUG("Failed to lookup VMA\n");
|
DRM_DEBUG("Failed to lookup VMA\n");
|
||||||
ret = PTR_ERR(vma);
|
ret = PTR_ERR(vma);
|
||||||
@ -307,7 +293,7 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
|
|||||||
struct drm_device *dev = obj->base.dev;
|
struct drm_device *dev = obj->base.dev;
|
||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||||
uint64_t delta = reloc->delta + target_offset;
|
uint64_t delta = reloc->delta + target_offset;
|
||||||
uint32_t __iomem *reloc_entry;
|
uint64_t offset;
|
||||||
void __iomem *reloc_page;
|
void __iomem *reloc_page;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
@ -320,25 +306,24 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
|
|||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
/* Map the page containing the relocation we're going to perform. */
|
/* Map the page containing the relocation we're going to perform. */
|
||||||
reloc->offset += i915_gem_obj_ggtt_offset(obj);
|
offset = i915_gem_obj_ggtt_offset(obj);
|
||||||
|
offset += reloc->offset;
|
||||||
reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
|
reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
|
||||||
reloc->offset & PAGE_MASK);
|
offset & PAGE_MASK);
|
||||||
reloc_entry = (uint32_t __iomem *)
|
iowrite32(lower_32_bits(delta), reloc_page + offset_in_page(offset));
|
||||||
(reloc_page + offset_in_page(reloc->offset));
|
|
||||||
iowrite32(lower_32_bits(delta), reloc_entry);
|
|
||||||
|
|
||||||
if (INTEL_INFO(dev)->gen >= 8) {
|
if (INTEL_INFO(dev)->gen >= 8) {
|
||||||
reloc_entry += 1;
|
offset += sizeof(uint32_t);
|
||||||
|
|
||||||
if (offset_in_page(reloc->offset + sizeof(uint32_t)) == 0) {
|
if (offset_in_page(offset) == 0) {
|
||||||
io_mapping_unmap_atomic(reloc_page);
|
io_mapping_unmap_atomic(reloc_page);
|
||||||
reloc_page = io_mapping_map_atomic_wc(
|
reloc_page =
|
||||||
dev_priv->gtt.mappable,
|
io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
|
||||||
reloc->offset + sizeof(uint32_t));
|
offset);
|
||||||
reloc_entry = reloc_page;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
iowrite32(upper_32_bits(delta), reloc_entry);
|
iowrite32(upper_32_bits(delta),
|
||||||
|
reloc_page + offset_in_page(offset));
|
||||||
}
|
}
|
||||||
|
|
||||||
io_mapping_unmap_atomic(reloc_page);
|
io_mapping_unmap_atomic(reloc_page);
|
||||||
@ -534,14 +519,6 @@ i915_gem_execbuffer_relocate(struct eb_vmas *eb)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
|
||||||
need_reloc_mappable(struct i915_vma *vma)
|
|
||||||
{
|
|
||||||
struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
|
|
||||||
return entry->relocation_count && !use_cpu_reloc(vma->obj) &&
|
|
||||||
i915_is_ggtt(vma->vm);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int
|
static int
|
||||||
i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
|
i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
|
||||||
struct intel_engine_cs *ring,
|
struct intel_engine_cs *ring,
|
||||||
@ -549,20 +526,12 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
|
|||||||
{
|
{
|
||||||
struct drm_i915_gem_object *obj = vma->obj;
|
struct drm_i915_gem_object *obj = vma->obj;
|
||||||
struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
|
struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
|
||||||
bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
|
|
||||||
bool need_fence;
|
|
||||||
uint64_t flags;
|
uint64_t flags;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
flags = 0;
|
flags = 0;
|
||||||
|
if (entry->flags & __EXEC_OBJECT_NEEDS_MAP)
|
||||||
need_fence =
|
|
||||||
has_fenced_gpu_access &&
|
|
||||||
entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
|
|
||||||
obj->tiling_mode != I915_TILING_NONE;
|
|
||||||
if (need_fence || need_reloc_mappable(vma))
|
|
||||||
flags |= PIN_MAPPABLE;
|
flags |= PIN_MAPPABLE;
|
||||||
|
|
||||||
if (entry->flags & EXEC_OBJECT_NEEDS_GTT)
|
if (entry->flags & EXEC_OBJECT_NEEDS_GTT)
|
||||||
flags |= PIN_GLOBAL;
|
flags |= PIN_GLOBAL;
|
||||||
if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS)
|
if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS)
|
||||||
@ -574,17 +543,13 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
|
|||||||
|
|
||||||
entry->flags |= __EXEC_OBJECT_HAS_PIN;
|
entry->flags |= __EXEC_OBJECT_HAS_PIN;
|
||||||
|
|
||||||
if (has_fenced_gpu_access) {
|
if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
|
||||||
if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
|
ret = i915_gem_object_get_fence(obj);
|
||||||
ret = i915_gem_object_get_fence(obj);
|
if (ret)
|
||||||
if (ret)
|
return ret;
|
||||||
return ret;
|
|
||||||
|
|
||||||
if (i915_gem_object_pin_fence(obj))
|
if (i915_gem_object_pin_fence(obj))
|
||||||
entry->flags |= __EXEC_OBJECT_HAS_FENCE;
|
entry->flags |= __EXEC_OBJECT_HAS_FENCE;
|
||||||
|
|
||||||
obj->pending_fenced_gpu_access = true;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (entry->offset != vma->node.start) {
|
if (entry->offset != vma->node.start) {
|
||||||
@ -601,26 +566,40 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static bool
|
static bool
|
||||||
eb_vma_misplaced(struct i915_vma *vma, bool has_fenced_gpu_access)
|
need_reloc_mappable(struct i915_vma *vma)
|
||||||
|
{
|
||||||
|
struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
|
||||||
|
|
||||||
|
if (entry->relocation_count == 0)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
if (!i915_is_ggtt(vma->vm))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
/* See also use_cpu_reloc() */
|
||||||
|
if (HAS_LLC(vma->obj->base.dev))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
if (vma->obj->base.write_domain == I915_GEM_DOMAIN_CPU)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool
|
||||||
|
eb_vma_misplaced(struct i915_vma *vma)
|
||||||
{
|
{
|
||||||
struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
|
struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
|
||||||
struct drm_i915_gem_object *obj = vma->obj;
|
struct drm_i915_gem_object *obj = vma->obj;
|
||||||
bool need_fence, need_mappable;
|
|
||||||
|
|
||||||
need_fence =
|
WARN_ON(entry->flags & __EXEC_OBJECT_NEEDS_MAP &&
|
||||||
has_fenced_gpu_access &&
|
|
||||||
entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
|
|
||||||
obj->tiling_mode != I915_TILING_NONE;
|
|
||||||
need_mappable = need_fence || need_reloc_mappable(vma);
|
|
||||||
|
|
||||||
WARN_ON((need_mappable || need_fence) &&
|
|
||||||
!i915_is_ggtt(vma->vm));
|
!i915_is_ggtt(vma->vm));
|
||||||
|
|
||||||
if (entry->alignment &&
|
if (entry->alignment &&
|
||||||
vma->node.start & (entry->alignment - 1))
|
vma->node.start & (entry->alignment - 1))
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
if (need_mappable && !obj->map_and_fenceable)
|
if (entry->flags & __EXEC_OBJECT_NEEDS_MAP && !obj->map_and_fenceable)
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS &&
|
if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS &&
|
||||||
@ -642,9 +621,6 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *ring,
|
|||||||
bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
|
bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
|
||||||
int retry;
|
int retry;
|
||||||
|
|
||||||
if (list_empty(vmas))
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
i915_gem_retire_requests_ring(ring);
|
i915_gem_retire_requests_ring(ring);
|
||||||
|
|
||||||
vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;
|
vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;
|
||||||
@ -658,20 +634,21 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *ring,
|
|||||||
obj = vma->obj;
|
obj = vma->obj;
|
||||||
entry = vma->exec_entry;
|
entry = vma->exec_entry;
|
||||||
|
|
||||||
|
if (!has_fenced_gpu_access)
|
||||||
|
entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE;
|
||||||
need_fence =
|
need_fence =
|
||||||
has_fenced_gpu_access &&
|
|
||||||
entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
|
entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
|
||||||
obj->tiling_mode != I915_TILING_NONE;
|
obj->tiling_mode != I915_TILING_NONE;
|
||||||
need_mappable = need_fence || need_reloc_mappable(vma);
|
need_mappable = need_fence || need_reloc_mappable(vma);
|
||||||
|
|
||||||
if (need_mappable)
|
if (need_mappable) {
|
||||||
|
entry->flags |= __EXEC_OBJECT_NEEDS_MAP;
|
||||||
list_move(&vma->exec_list, &ordered_vmas);
|
list_move(&vma->exec_list, &ordered_vmas);
|
||||||
else
|
} else
|
||||||
list_move_tail(&vma->exec_list, &ordered_vmas);
|
list_move_tail(&vma->exec_list, &ordered_vmas);
|
||||||
|
|
||||||
obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND;
|
obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND;
|
||||||
obj->base.pending_write_domain = 0;
|
obj->base.pending_write_domain = 0;
|
||||||
obj->pending_fenced_gpu_access = false;
|
|
||||||
}
|
}
|
||||||
list_splice(&ordered_vmas, vmas);
|
list_splice(&ordered_vmas, vmas);
|
||||||
|
|
||||||
@ -696,7 +673,7 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *ring,
|
|||||||
if (!drm_mm_node_allocated(&vma->node))
|
if (!drm_mm_node_allocated(&vma->node))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (eb_vma_misplaced(vma, has_fenced_gpu_access))
|
if (eb_vma_misplaced(vma))
|
||||||
ret = i915_vma_unbind(vma);
|
ret = i915_vma_unbind(vma);
|
||||||
else
|
else
|
||||||
ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
|
ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
|
||||||
@ -744,9 +721,6 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
|
|||||||
int i, total, ret;
|
int i, total, ret;
|
||||||
unsigned count = args->buffer_count;
|
unsigned count = args->buffer_count;
|
||||||
|
|
||||||
if (WARN_ON(list_empty(&eb->vmas)))
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
vm = list_first_entry(&eb->vmas, struct i915_vma, exec_list)->vm;
|
vm = list_first_entry(&eb->vmas, struct i915_vma, exec_list)->vm;
|
||||||
|
|
||||||
/* We may process another execbuffer during the unlock... */
|
/* We may process another execbuffer during the unlock... */
|
||||||
@ -890,18 +864,24 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
|
validate_exec_list(struct drm_device *dev,
|
||||||
|
struct drm_i915_gem_exec_object2 *exec,
|
||||||
int count)
|
int count)
|
||||||
{
|
{
|
||||||
int i;
|
|
||||||
unsigned relocs_total = 0;
|
unsigned relocs_total = 0;
|
||||||
unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
|
unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
|
||||||
|
unsigned invalid_flags;
|
||||||
|
int i;
|
||||||
|
|
||||||
|
invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
|
||||||
|
if (USES_FULL_PPGTT(dev))
|
||||||
|
invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
|
||||||
|
|
||||||
for (i = 0; i < count; i++) {
|
for (i = 0; i < count; i++) {
|
||||||
char __user *ptr = to_user_ptr(exec[i].relocs_ptr);
|
char __user *ptr = to_user_ptr(exec[i].relocs_ptr);
|
||||||
int length; /* limited by fault_in_pages_readable() */
|
int length; /* limited by fault_in_pages_readable() */
|
||||||
|
|
||||||
if (exec[i].flags & __EXEC_OBJECT_UNKNOWN_FLAGS)
|
if (exec[i].flags & invalid_flags)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
/* First check for malicious input causing overflow in
|
/* First check for malicious input causing overflow in
|
||||||
@ -951,16 +931,26 @@ i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
|
|||||||
return ERR_PTR(-EIO);
|
return ERR_PTR(-EIO);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (i915.enable_execlists && !ctx->engine[ring->id].state) {
|
||||||
|
int ret = intel_lr_context_deferred_create(ctx, ring);
|
||||||
|
if (ret) {
|
||||||
|
DRM_DEBUG("Could not create LRC %u: %d\n", ctx_id, ret);
|
||||||
|
return ERR_PTR(ret);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return ctx;
|
return ctx;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
void
|
||||||
i915_gem_execbuffer_move_to_active(struct list_head *vmas,
|
i915_gem_execbuffer_move_to_active(struct list_head *vmas,
|
||||||
struct intel_engine_cs *ring)
|
struct intel_engine_cs *ring)
|
||||||
{
|
{
|
||||||
|
u32 seqno = intel_ring_get_seqno(ring);
|
||||||
struct i915_vma *vma;
|
struct i915_vma *vma;
|
||||||
|
|
||||||
list_for_each_entry(vma, vmas, exec_list) {
|
list_for_each_entry(vma, vmas, exec_list) {
|
||||||
|
struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
|
||||||
struct drm_i915_gem_object *obj = vma->obj;
|
struct drm_i915_gem_object *obj = vma->obj;
|
||||||
u32 old_read = obj->base.read_domains;
|
u32 old_read = obj->base.read_domains;
|
||||||
u32 old_write = obj->base.write_domain;
|
u32 old_write = obj->base.write_domain;
|
||||||
@ -969,24 +959,31 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
|
|||||||
if (obj->base.write_domain == 0)
|
if (obj->base.write_domain == 0)
|
||||||
obj->base.pending_read_domains |= obj->base.read_domains;
|
obj->base.pending_read_domains |= obj->base.read_domains;
|
||||||
obj->base.read_domains = obj->base.pending_read_domains;
|
obj->base.read_domains = obj->base.pending_read_domains;
|
||||||
obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
|
|
||||||
|
|
||||||
i915_vma_move_to_active(vma, ring);
|
i915_vma_move_to_active(vma, ring);
|
||||||
if (obj->base.write_domain) {
|
if (obj->base.write_domain) {
|
||||||
obj->dirty = 1;
|
obj->dirty = 1;
|
||||||
obj->last_write_seqno = intel_ring_get_seqno(ring);
|
obj->last_write_seqno = seqno;
|
||||||
|
|
||||||
intel_fb_obj_invalidate(obj, ring);
|
intel_fb_obj_invalidate(obj, ring);
|
||||||
|
|
||||||
/* update for the implicit flush after a batch */
|
/* update for the implicit flush after a batch */
|
||||||
obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
|
obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
|
||||||
}
|
}
|
||||||
|
if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
|
||||||
|
obj->last_fenced_seqno = seqno;
|
||||||
|
if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
|
||||||
|
struct drm_i915_private *dev_priv = to_i915(ring->dev);
|
||||||
|
list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list,
|
||||||
|
&dev_priv->mm.fence_list);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
trace_i915_gem_object_change_domain(obj, old_read, old_write);
|
trace_i915_gem_object_change_domain(obj, old_read, old_write);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
void
|
||||||
i915_gem_execbuffer_retire_commands(struct drm_device *dev,
|
i915_gem_execbuffer_retire_commands(struct drm_device *dev,
|
||||||
struct drm_file *file,
|
struct drm_file *file,
|
||||||
struct intel_engine_cs *ring,
|
struct intel_engine_cs *ring,
|
||||||
@ -1026,14 +1023,14 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
int
|
||||||
legacy_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
|
i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
|
||||||
struct intel_engine_cs *ring,
|
struct intel_engine_cs *ring,
|
||||||
struct intel_context *ctx,
|
struct intel_context *ctx,
|
||||||
struct drm_i915_gem_execbuffer2 *args,
|
struct drm_i915_gem_execbuffer2 *args,
|
||||||
struct list_head *vmas,
|
struct list_head *vmas,
|
||||||
struct drm_i915_gem_object *batch_obj,
|
struct drm_i915_gem_object *batch_obj,
|
||||||
u64 exec_start, u32 flags)
|
u64 exec_start, u32 flags)
|
||||||
{
|
{
|
||||||
struct drm_clip_rect *cliprects = NULL;
|
struct drm_clip_rect *cliprects = NULL;
|
||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||||
@ -1254,7 +1251,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
|||||||
if (!i915_gem_check_execbuffer(args))
|
if (!i915_gem_check_execbuffer(args))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
ret = validate_exec_list(exec, args->buffer_count);
|
ret = validate_exec_list(dev, exec, args->buffer_count);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
@ -1318,8 +1315,9 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
|||||||
|
|
||||||
i915_gem_context_reference(ctx);
|
i915_gem_context_reference(ctx);
|
||||||
|
|
||||||
vm = ctx->vm;
|
if (ctx->ppgtt)
|
||||||
if (!USES_FULL_PPGTT(dev))
|
vm = &ctx->ppgtt->base;
|
||||||
|
else
|
||||||
vm = &dev_priv->gtt.base;
|
vm = &dev_priv->gtt.base;
|
||||||
|
|
||||||
eb = eb_create(args);
|
eb = eb_create(args);
|
||||||
@ -1386,25 +1384,36 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
|||||||
/* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
|
/* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
|
||||||
* batch" bit. Hence we need to pin secure batches into the global gtt.
|
* batch" bit. Hence we need to pin secure batches into the global gtt.
|
||||||
* hsw should have this fixed, but bdw mucks it up again. */
|
* hsw should have this fixed, but bdw mucks it up again. */
|
||||||
if (flags & I915_DISPATCH_SECURE &&
|
if (flags & I915_DISPATCH_SECURE) {
|
||||||
!batch_obj->has_global_gtt_mapping) {
|
/*
|
||||||
/* When we have multiple VMs, we'll need to make sure that we
|
* So on first glance it looks freaky that we pin the batch here
|
||||||
* allocate space first */
|
* outside of the reservation loop. But:
|
||||||
struct i915_vma *vma = i915_gem_obj_to_ggtt(batch_obj);
|
* - The batch is already pinned into the relevant ppgtt, so we
|
||||||
BUG_ON(!vma);
|
* already have the backing storage fully allocated.
|
||||||
vma->bind_vma(vma, batch_obj->cache_level, GLOBAL_BIND);
|
* - No other BO uses the global gtt (well contexts, but meh),
|
||||||
}
|
* so we don't really have issues with mutliple objects not
|
||||||
|
* fitting due to fragmentation.
|
||||||
|
* So this is actually safe.
|
||||||
|
*/
|
||||||
|
ret = i915_gem_obj_ggtt_pin(batch_obj, 0, 0);
|
||||||
|
if (ret)
|
||||||
|
goto err;
|
||||||
|
|
||||||
if (flags & I915_DISPATCH_SECURE)
|
|
||||||
exec_start += i915_gem_obj_ggtt_offset(batch_obj);
|
exec_start += i915_gem_obj_ggtt_offset(batch_obj);
|
||||||
else
|
} else
|
||||||
exec_start += i915_gem_obj_offset(batch_obj, vm);
|
exec_start += i915_gem_obj_offset(batch_obj, vm);
|
||||||
|
|
||||||
ret = legacy_ringbuffer_submission(dev, file, ring, ctx,
|
ret = dev_priv->gt.do_execbuf(dev, file, ring, ctx, args,
|
||||||
args, &eb->vmas, batch_obj, exec_start, flags);
|
&eb->vmas, batch_obj, exec_start, flags);
|
||||||
if (ret)
|
|
||||||
goto err;
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
* FIXME: We crucially rely upon the active tracking for the (ppgtt)
|
||||||
|
* batch vma for correctness. For less ugly and less fragility this
|
||||||
|
* needs to be adjusted to also track the ggtt batch vma properly as
|
||||||
|
* active.
|
||||||
|
*/
|
||||||
|
if (flags & I915_DISPATCH_SECURE)
|
||||||
|
i915_gem_object_ggtt_unpin(batch_obj);
|
||||||
err:
|
err:
|
||||||
/* the request owns the ref now */
|
/* the request owns the ref now */
|
||||||
i915_gem_context_unreference(ctx);
|
i915_gem_context_unreference(ctx);
|
||||||
|
@ -67,7 +67,6 @@ static void ppgtt_bind_vma(struct i915_vma *vma,
|
|||||||
enum i915_cache_level cache_level,
|
enum i915_cache_level cache_level,
|
||||||
u32 flags);
|
u32 flags);
|
||||||
static void ppgtt_unbind_vma(struct i915_vma *vma);
|
static void ppgtt_unbind_vma(struct i915_vma *vma);
|
||||||
static int gen8_ppgtt_enable(struct i915_hw_ppgtt *ppgtt);
|
|
||||||
|
|
||||||
static inline gen8_gtt_pte_t gen8_pte_encode(dma_addr_t addr,
|
static inline gen8_gtt_pte_t gen8_pte_encode(dma_addr_t addr,
|
||||||
enum i915_cache_level level,
|
enum i915_cache_level level,
|
||||||
@ -392,9 +391,6 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
|
|||||||
struct i915_hw_ppgtt *ppgtt =
|
struct i915_hw_ppgtt *ppgtt =
|
||||||
container_of(vm, struct i915_hw_ppgtt, base);
|
container_of(vm, struct i915_hw_ppgtt, base);
|
||||||
|
|
||||||
list_del(&vm->global_link);
|
|
||||||
drm_mm_takedown(&vm->mm);
|
|
||||||
|
|
||||||
gen8_ppgtt_unmap_pages(ppgtt);
|
gen8_ppgtt_unmap_pages(ppgtt);
|
||||||
gen8_ppgtt_free(ppgtt);
|
gen8_ppgtt_free(ppgtt);
|
||||||
}
|
}
|
||||||
@ -604,7 +600,6 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
|
|||||||
kunmap_atomic(pd_vaddr);
|
kunmap_atomic(pd_vaddr);
|
||||||
}
|
}
|
||||||
|
|
||||||
ppgtt->enable = gen8_ppgtt_enable;
|
|
||||||
ppgtt->switch_mm = gen8_mm_switch;
|
ppgtt->switch_mm = gen8_mm_switch;
|
||||||
ppgtt->base.clear_range = gen8_ppgtt_clear_range;
|
ppgtt->base.clear_range = gen8_ppgtt_clear_range;
|
||||||
ppgtt->base.insert_entries = gen8_ppgtt_insert_entries;
|
ppgtt->base.insert_entries = gen8_ppgtt_insert_entries;
|
||||||
@ -825,39 +820,26 @@ static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int gen8_ppgtt_enable(struct i915_hw_ppgtt *ppgtt)
|
static void gen8_ppgtt_enable(struct drm_device *dev)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = ppgtt->base.dev;
|
|
||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||||
struct intel_engine_cs *ring;
|
struct intel_engine_cs *ring;
|
||||||
int j, ret;
|
int j;
|
||||||
|
|
||||||
|
/* In the case of execlists, PPGTT is enabled by the context descriptor
|
||||||
|
* and the PDPs are contained within the context itself. We don't
|
||||||
|
* need to do anything here. */
|
||||||
|
if (i915.enable_execlists)
|
||||||
|
return;
|
||||||
|
|
||||||
for_each_ring(ring, dev_priv, j) {
|
for_each_ring(ring, dev_priv, j) {
|
||||||
I915_WRITE(RING_MODE_GEN7(ring),
|
I915_WRITE(RING_MODE_GEN7(ring),
|
||||||
_MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
|
_MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
|
||||||
|
|
||||||
/* We promise to do a switch later with FULL PPGTT. If this is
|
|
||||||
* aliasing, this is the one and only switch we'll do */
|
|
||||||
if (USES_FULL_PPGTT(dev))
|
|
||||||
continue;
|
|
||||||
|
|
||||||
ret = ppgtt->switch_mm(ppgtt, ring, true);
|
|
||||||
if (ret)
|
|
||||||
goto err_out;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
err_out:
|
|
||||||
for_each_ring(ring, dev_priv, j)
|
|
||||||
I915_WRITE(RING_MODE_GEN7(ring),
|
|
||||||
_MASKED_BIT_DISABLE(GFX_PPGTT_ENABLE));
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int gen7_ppgtt_enable(struct i915_hw_ppgtt *ppgtt)
|
static void gen7_ppgtt_enable(struct drm_device *dev)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = ppgtt->base.dev;
|
|
||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||||
struct intel_engine_cs *ring;
|
struct intel_engine_cs *ring;
|
||||||
uint32_t ecochk, ecobits;
|
uint32_t ecochk, ecobits;
|
||||||
@ -876,31 +858,16 @@ static int gen7_ppgtt_enable(struct i915_hw_ppgtt *ppgtt)
|
|||||||
I915_WRITE(GAM_ECOCHK, ecochk);
|
I915_WRITE(GAM_ECOCHK, ecochk);
|
||||||
|
|
||||||
for_each_ring(ring, dev_priv, i) {
|
for_each_ring(ring, dev_priv, i) {
|
||||||
int ret;
|
|
||||||
/* GFX_MODE is per-ring on gen7+ */
|
/* GFX_MODE is per-ring on gen7+ */
|
||||||
I915_WRITE(RING_MODE_GEN7(ring),
|
I915_WRITE(RING_MODE_GEN7(ring),
|
||||||
_MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
|
_MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
|
||||||
|
|
||||||
/* We promise to do a switch later with FULL PPGTT. If this is
|
|
||||||
* aliasing, this is the one and only switch we'll do */
|
|
||||||
if (USES_FULL_PPGTT(dev))
|
|
||||||
continue;
|
|
||||||
|
|
||||||
ret = ppgtt->switch_mm(ppgtt, ring, true);
|
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int gen6_ppgtt_enable(struct i915_hw_ppgtt *ppgtt)
|
static void gen6_ppgtt_enable(struct drm_device *dev)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = ppgtt->base.dev;
|
|
||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||||
struct intel_engine_cs *ring;
|
|
||||||
uint32_t ecochk, gab_ctl, ecobits;
|
uint32_t ecochk, gab_ctl, ecobits;
|
||||||
int i;
|
|
||||||
|
|
||||||
ecobits = I915_READ(GAC_ECO_BITS);
|
ecobits = I915_READ(GAC_ECO_BITS);
|
||||||
I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT |
|
I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT |
|
||||||
@ -913,14 +880,6 @@ static int gen6_ppgtt_enable(struct i915_hw_ppgtt *ppgtt)
|
|||||||
I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B);
|
I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B);
|
||||||
|
|
||||||
I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
|
I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
|
||||||
|
|
||||||
for_each_ring(ring, dev_priv, i) {
|
|
||||||
int ret = ppgtt->switch_mm(ppgtt, ring, true);
|
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* PPGTT support for Sandybdrige/Gen6 and later */
|
/* PPGTT support for Sandybdrige/Gen6 and later */
|
||||||
@ -1018,8 +977,6 @@ static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
|
|||||||
struct i915_hw_ppgtt *ppgtt =
|
struct i915_hw_ppgtt *ppgtt =
|
||||||
container_of(vm, struct i915_hw_ppgtt, base);
|
container_of(vm, struct i915_hw_ppgtt, base);
|
||||||
|
|
||||||
list_del(&vm->global_link);
|
|
||||||
drm_mm_takedown(&ppgtt->base.mm);
|
|
||||||
drm_mm_remove_node(&ppgtt->node);
|
drm_mm_remove_node(&ppgtt->node);
|
||||||
|
|
||||||
gen6_ppgtt_unmap_pages(ppgtt);
|
gen6_ppgtt_unmap_pages(ppgtt);
|
||||||
@ -1140,13 +1097,10 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
|
|||||||
|
|
||||||
ppgtt->base.pte_encode = dev_priv->gtt.base.pte_encode;
|
ppgtt->base.pte_encode = dev_priv->gtt.base.pte_encode;
|
||||||
if (IS_GEN6(dev)) {
|
if (IS_GEN6(dev)) {
|
||||||
ppgtt->enable = gen6_ppgtt_enable;
|
|
||||||
ppgtt->switch_mm = gen6_mm_switch;
|
ppgtt->switch_mm = gen6_mm_switch;
|
||||||
} else if (IS_HASWELL(dev)) {
|
} else if (IS_HASWELL(dev)) {
|
||||||
ppgtt->enable = gen7_ppgtt_enable;
|
|
||||||
ppgtt->switch_mm = hsw_mm_switch;
|
ppgtt->switch_mm = hsw_mm_switch;
|
||||||
} else if (IS_GEN7(dev)) {
|
} else if (IS_GEN7(dev)) {
|
||||||
ppgtt->enable = gen7_ppgtt_enable;
|
|
||||||
ppgtt->switch_mm = gen7_mm_switch;
|
ppgtt->switch_mm = gen7_mm_switch;
|
||||||
} else
|
} else
|
||||||
BUG();
|
BUG();
|
||||||
@ -1177,39 +1131,108 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
|
|||||||
ppgtt->node.size >> 20,
|
ppgtt->node.size >> 20,
|
||||||
ppgtt->node.start / PAGE_SIZE);
|
ppgtt->node.start / PAGE_SIZE);
|
||||||
|
|
||||||
|
gen6_write_pdes(ppgtt);
|
||||||
|
DRM_DEBUG("Adding PPGTT at offset %x\n",
|
||||||
|
ppgtt->pd_offset << 10);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int i915_gem_init_ppgtt(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
|
static int __hw_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||||
int ret = 0;
|
|
||||||
|
|
||||||
ppgtt->base.dev = dev;
|
ppgtt->base.dev = dev;
|
||||||
ppgtt->base.scratch = dev_priv->gtt.base.scratch;
|
ppgtt->base.scratch = dev_priv->gtt.base.scratch;
|
||||||
|
|
||||||
if (INTEL_INFO(dev)->gen < 8)
|
if (INTEL_INFO(dev)->gen < 8)
|
||||||
ret = gen6_ppgtt_init(ppgtt);
|
return gen6_ppgtt_init(ppgtt);
|
||||||
else if (IS_GEN8(dev))
|
else if (IS_GEN8(dev))
|
||||||
ret = gen8_ppgtt_init(ppgtt, dev_priv->gtt.base.total);
|
return gen8_ppgtt_init(ppgtt, dev_priv->gtt.base.total);
|
||||||
else
|
else
|
||||||
BUG();
|
BUG();
|
||||||
|
}
|
||||||
|
int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
|
||||||
|
{
|
||||||
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||||
|
int ret = 0;
|
||||||
|
|
||||||
if (!ret) {
|
ret = __hw_ppgtt_init(dev, ppgtt);
|
||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
if (ret == 0) {
|
||||||
kref_init(&ppgtt->ref);
|
kref_init(&ppgtt->ref);
|
||||||
drm_mm_init(&ppgtt->base.mm, ppgtt->base.start,
|
drm_mm_init(&ppgtt->base.mm, ppgtt->base.start,
|
||||||
ppgtt->base.total);
|
ppgtt->base.total);
|
||||||
i915_init_vm(dev_priv, &ppgtt->base);
|
i915_init_vm(dev_priv, &ppgtt->base);
|
||||||
if (INTEL_INFO(dev)->gen < 8) {
|
}
|
||||||
gen6_write_pdes(ppgtt);
|
|
||||||
DRM_DEBUG("Adding PPGTT at offset %x\n",
|
return ret;
|
||||||
ppgtt->pd_offset << 10);
|
}
|
||||||
|
|
||||||
|
int i915_ppgtt_init_hw(struct drm_device *dev)
|
||||||
|
{
|
||||||
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||||
|
struct intel_engine_cs *ring;
|
||||||
|
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
|
||||||
|
int i, ret = 0;
|
||||||
|
|
||||||
|
if (!USES_PPGTT(dev))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
if (IS_GEN6(dev))
|
||||||
|
gen6_ppgtt_enable(dev);
|
||||||
|
else if (IS_GEN7(dev))
|
||||||
|
gen7_ppgtt_enable(dev);
|
||||||
|
else if (INTEL_INFO(dev)->gen >= 8)
|
||||||
|
gen8_ppgtt_enable(dev);
|
||||||
|
else
|
||||||
|
WARN_ON(1);
|
||||||
|
|
||||||
|
if (ppgtt) {
|
||||||
|
for_each_ring(ring, dev_priv, i) {
|
||||||
|
ret = ppgtt->switch_mm(ppgtt, ring, true);
|
||||||
|
if (ret != 0)
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
struct i915_hw_ppgtt *
|
||||||
|
i915_ppgtt_create(struct drm_device *dev, struct drm_i915_file_private *fpriv)
|
||||||
|
{
|
||||||
|
struct i915_hw_ppgtt *ppgtt;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
|
||||||
|
if (!ppgtt)
|
||||||
|
return ERR_PTR(-ENOMEM);
|
||||||
|
|
||||||
|
ret = i915_ppgtt_init(dev, ppgtt);
|
||||||
|
if (ret) {
|
||||||
|
kfree(ppgtt);
|
||||||
|
return ERR_PTR(ret);
|
||||||
|
}
|
||||||
|
|
||||||
|
ppgtt->file_priv = fpriv;
|
||||||
|
|
||||||
|
return ppgtt;
|
||||||
|
}
|
||||||
|
|
||||||
|
void i915_ppgtt_release(struct kref *kref)
|
||||||
|
{
|
||||||
|
struct i915_hw_ppgtt *ppgtt =
|
||||||
|
container_of(kref, struct i915_hw_ppgtt, ref);
|
||||||
|
|
||||||
|
/* vmas should already be unbound */
|
||||||
|
WARN_ON(!list_empty(&ppgtt->base.active_list));
|
||||||
|
WARN_ON(!list_empty(&ppgtt->base.inactive_list));
|
||||||
|
|
||||||
|
list_del(&ppgtt->base.global_link);
|
||||||
|
drm_mm_takedown(&ppgtt->base.mm);
|
||||||
|
|
||||||
|
ppgtt->base.cleanup(&ppgtt->base);
|
||||||
|
kfree(ppgtt);
|
||||||
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
ppgtt_bind_vma(struct i915_vma *vma,
|
ppgtt_bind_vma(struct i915_vma *vma,
|
||||||
@ -1664,10 +1687,10 @@ static void i915_gtt_color_adjust(struct drm_mm_node *node,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void i915_gem_setup_global_gtt(struct drm_device *dev,
|
int i915_gem_setup_global_gtt(struct drm_device *dev,
|
||||||
unsigned long start,
|
unsigned long start,
|
||||||
unsigned long mappable_end,
|
unsigned long mappable_end,
|
||||||
unsigned long end)
|
unsigned long end)
|
||||||
{
|
{
|
||||||
/* Let GEM Manage all of the aperture.
|
/* Let GEM Manage all of the aperture.
|
||||||
*
|
*
|
||||||
@ -1683,6 +1706,7 @@ void i915_gem_setup_global_gtt(struct drm_device *dev,
|
|||||||
struct drm_mm_node *entry;
|
struct drm_mm_node *entry;
|
||||||
struct drm_i915_gem_object *obj;
|
struct drm_i915_gem_object *obj;
|
||||||
unsigned long hole_start, hole_end;
|
unsigned long hole_start, hole_end;
|
||||||
|
int ret;
|
||||||
|
|
||||||
BUG_ON(mappable_end > end);
|
BUG_ON(mappable_end > end);
|
||||||
|
|
||||||
@ -1694,14 +1718,16 @@ void i915_gem_setup_global_gtt(struct drm_device *dev,
|
|||||||
/* Mark any preallocated objects as occupied */
|
/* Mark any preallocated objects as occupied */
|
||||||
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
|
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
|
||||||
struct i915_vma *vma = i915_gem_obj_to_vma(obj, ggtt_vm);
|
struct i915_vma *vma = i915_gem_obj_to_vma(obj, ggtt_vm);
|
||||||
int ret;
|
|
||||||
DRM_DEBUG_KMS("reserving preallocated space: %lx + %zx\n",
|
DRM_DEBUG_KMS("reserving preallocated space: %lx + %zx\n",
|
||||||
i915_gem_obj_ggtt_offset(obj), obj->base.size);
|
i915_gem_obj_ggtt_offset(obj), obj->base.size);
|
||||||
|
|
||||||
WARN_ON(i915_gem_obj_ggtt_bound(obj));
|
WARN_ON(i915_gem_obj_ggtt_bound(obj));
|
||||||
ret = drm_mm_reserve_node(&ggtt_vm->mm, &vma->node);
|
ret = drm_mm_reserve_node(&ggtt_vm->mm, &vma->node);
|
||||||
if (ret)
|
if (ret) {
|
||||||
DRM_DEBUG_KMS("Reservation failed\n");
|
DRM_DEBUG_KMS("Reservation failed: %i\n", ret);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
obj->has_global_gtt_mapping = 1;
|
obj->has_global_gtt_mapping = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1718,6 +1744,22 @@ void i915_gem_setup_global_gtt(struct drm_device *dev,
|
|||||||
|
|
||||||
/* And finally clear the reserved guard page */
|
/* And finally clear the reserved guard page */
|
||||||
ggtt_vm->clear_range(ggtt_vm, end - PAGE_SIZE, PAGE_SIZE, true);
|
ggtt_vm->clear_range(ggtt_vm, end - PAGE_SIZE, PAGE_SIZE, true);
|
||||||
|
|
||||||
|
if (USES_PPGTT(dev) && !USES_FULL_PPGTT(dev)) {
|
||||||
|
struct i915_hw_ppgtt *ppgtt;
|
||||||
|
|
||||||
|
ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
|
||||||
|
if (!ppgtt)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
ret = __hw_ppgtt_init(dev, ppgtt);
|
||||||
|
if (ret != 0)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
dev_priv->mm.aliasing_ppgtt = ppgtt;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void i915_gem_init_global_gtt(struct drm_device *dev)
|
void i915_gem_init_global_gtt(struct drm_device *dev)
|
||||||
@ -1731,6 +1773,25 @@ void i915_gem_init_global_gtt(struct drm_device *dev)
|
|||||||
i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
|
i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void i915_global_gtt_cleanup(struct drm_device *dev)
|
||||||
|
{
|
||||||
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||||
|
struct i915_address_space *vm = &dev_priv->gtt.base;
|
||||||
|
|
||||||
|
if (dev_priv->mm.aliasing_ppgtt) {
|
||||||
|
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
|
||||||
|
|
||||||
|
ppgtt->base.cleanup(&ppgtt->base);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (drm_mm_initialized(&vm->mm)) {
|
||||||
|
drm_mm_takedown(&vm->mm);
|
||||||
|
list_del(&vm->global_link);
|
||||||
|
}
|
||||||
|
|
||||||
|
vm->cleanup(vm);
|
||||||
|
}
|
||||||
|
|
||||||
static int setup_scratch_page(struct drm_device *dev)
|
static int setup_scratch_page(struct drm_device *dev)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||||
@ -1999,10 +2060,6 @@ static void gen6_gmch_remove(struct i915_address_space *vm)
|
|||||||
|
|
||||||
struct i915_gtt *gtt = container_of(vm, struct i915_gtt, base);
|
struct i915_gtt *gtt = container_of(vm, struct i915_gtt, base);
|
||||||
|
|
||||||
if (drm_mm_initialized(&vm->mm)) {
|
|
||||||
drm_mm_takedown(&vm->mm);
|
|
||||||
list_del(&vm->global_link);
|
|
||||||
}
|
|
||||||
iounmap(gtt->gsm);
|
iounmap(gtt->gsm);
|
||||||
teardown_scratch_page(vm->dev);
|
teardown_scratch_page(vm->dev);
|
||||||
}
|
}
|
||||||
@ -2035,10 +2092,6 @@ static int i915_gmch_probe(struct drm_device *dev,
|
|||||||
|
|
||||||
static void i915_gmch_remove(struct i915_address_space *vm)
|
static void i915_gmch_remove(struct i915_address_space *vm)
|
||||||
{
|
{
|
||||||
if (drm_mm_initialized(&vm->mm)) {
|
|
||||||
drm_mm_takedown(&vm->mm);
|
|
||||||
list_del(&vm->global_link);
|
|
||||||
}
|
|
||||||
intel_gmch_remove();
|
intel_gmch_remove();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2153,5 +2206,8 @@ i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
|
|||||||
if (!vma)
|
if (!vma)
|
||||||
vma = __i915_gem_vma_create(obj, vm);
|
vma = __i915_gem_vma_create(obj, vm);
|
||||||
|
|
||||||
|
if (!i915_is_ggtt(vm))
|
||||||
|
i915_ppgtt_get(i915_vm_to_ppgtt(vm));
|
||||||
|
|
||||||
return vma;
|
return vma;
|
||||||
}
|
}
|
||||||
|
@ -34,6 +34,8 @@
|
|||||||
#ifndef __I915_GEM_GTT_H__
|
#ifndef __I915_GEM_GTT_H__
|
||||||
#define __I915_GEM_GTT_H__
|
#define __I915_GEM_GTT_H__
|
||||||
|
|
||||||
|
struct drm_i915_file_private;
|
||||||
|
|
||||||
typedef uint32_t gen6_gtt_pte_t;
|
typedef uint32_t gen6_gtt_pte_t;
|
||||||
typedef uint64_t gen8_gtt_pte_t;
|
typedef uint64_t gen8_gtt_pte_t;
|
||||||
typedef gen8_gtt_pte_t gen8_ppgtt_pde_t;
|
typedef gen8_gtt_pte_t gen8_ppgtt_pde_t;
|
||||||
@ -258,7 +260,7 @@ struct i915_hw_ppgtt {
|
|||||||
dma_addr_t *gen8_pt_dma_addr[4];
|
dma_addr_t *gen8_pt_dma_addr[4];
|
||||||
};
|
};
|
||||||
|
|
||||||
struct intel_context *ctx;
|
struct drm_i915_file_private *file_priv;
|
||||||
|
|
||||||
int (*enable)(struct i915_hw_ppgtt *ppgtt);
|
int (*enable)(struct i915_hw_ppgtt *ppgtt);
|
||||||
int (*switch_mm)(struct i915_hw_ppgtt *ppgtt,
|
int (*switch_mm)(struct i915_hw_ppgtt *ppgtt,
|
||||||
@ -269,10 +271,26 @@ struct i915_hw_ppgtt {
|
|||||||
|
|
||||||
int i915_gem_gtt_init(struct drm_device *dev);
|
int i915_gem_gtt_init(struct drm_device *dev);
|
||||||
void i915_gem_init_global_gtt(struct drm_device *dev);
|
void i915_gem_init_global_gtt(struct drm_device *dev);
|
||||||
void i915_gem_setup_global_gtt(struct drm_device *dev, unsigned long start,
|
int i915_gem_setup_global_gtt(struct drm_device *dev, unsigned long start,
|
||||||
unsigned long mappable_end, unsigned long end);
|
unsigned long mappable_end, unsigned long end);
|
||||||
|
void i915_global_gtt_cleanup(struct drm_device *dev);
|
||||||
|
|
||||||
int i915_gem_init_ppgtt(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt);
|
|
||||||
|
int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt);
|
||||||
|
int i915_ppgtt_init_hw(struct drm_device *dev);
|
||||||
|
void i915_ppgtt_release(struct kref *kref);
|
||||||
|
struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_device *dev,
|
||||||
|
struct drm_i915_file_private *fpriv);
|
||||||
|
static inline void i915_ppgtt_get(struct i915_hw_ppgtt *ppgtt)
|
||||||
|
{
|
||||||
|
if (ppgtt)
|
||||||
|
kref_get(&ppgtt->ref);
|
||||||
|
}
|
||||||
|
static inline void i915_ppgtt_put(struct i915_hw_ppgtt *ppgtt)
|
||||||
|
{
|
||||||
|
if (ppgtt)
|
||||||
|
kref_put(&ppgtt->ref, i915_ppgtt_release);
|
||||||
|
}
|
||||||
|
|
||||||
void i915_check_and_clear_faults(struct drm_device *dev);
|
void i915_check_and_clear_faults(struct drm_device *dev);
|
||||||
void i915_gem_suspend_gtt_mappings(struct drm_device *dev);
|
void i915_gem_suspend_gtt_mappings(struct drm_device *dev);
|
||||||
|
@ -376,7 +376,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
|
|||||||
|
|
||||||
if (ret == 0) {
|
if (ret == 0) {
|
||||||
obj->fence_dirty =
|
obj->fence_dirty =
|
||||||
obj->fenced_gpu_access ||
|
obj->last_fenced_seqno ||
|
||||||
obj->fence_reg != I915_FENCE_REG_NONE;
|
obj->fence_reg != I915_FENCE_REG_NONE;
|
||||||
|
|
||||||
obj->tiling_mode = args->tiling_mode;
|
obj->tiling_mode = args->tiling_mode;
|
||||||
|
@ -192,10 +192,10 @@ static void print_error_buffers(struct drm_i915_error_state_buf *m,
|
|||||||
struct drm_i915_error_buffer *err,
|
struct drm_i915_error_buffer *err,
|
||||||
int count)
|
int count)
|
||||||
{
|
{
|
||||||
err_printf(m, "%s [%d]:\n", name, count);
|
err_printf(m, " %s [%d]:\n", name, count);
|
||||||
|
|
||||||
while (count--) {
|
while (count--) {
|
||||||
err_printf(m, " %08x %8u %02x %02x %x %x",
|
err_printf(m, " %08x %8u %02x %02x %x %x",
|
||||||
err->gtt_offset,
|
err->gtt_offset,
|
||||||
err->size,
|
err->size,
|
||||||
err->read_domains,
|
err->read_domains,
|
||||||
@ -393,15 +393,17 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
|
|||||||
i915_ring_error_state(m, dev, &error->ring[i]);
|
i915_ring_error_state(m, dev, &error->ring[i]);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (error->active_bo)
|
for (i = 0; i < error->vm_count; i++) {
|
||||||
print_error_buffers(m, "Active",
|
err_printf(m, "vm[%d]\n", i);
|
||||||
error->active_bo[0],
|
|
||||||
error->active_bo_count[0]);
|
print_error_buffers(m, "Active",
|
||||||
|
error->active_bo[i],
|
||||||
|
error->active_bo_count[i]);
|
||||||
|
|
||||||
if (error->pinned_bo)
|
|
||||||
print_error_buffers(m, "Pinned",
|
print_error_buffers(m, "Pinned",
|
||||||
error->pinned_bo[0],
|
error->pinned_bo[i],
|
||||||
error->pinned_bo_count[0]);
|
error->pinned_bo_count[i]);
|
||||||
|
}
|
||||||
|
|
||||||
for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
|
for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
|
||||||
obj = error->ring[i].batchbuffer;
|
obj = error->ring[i].batchbuffer;
|
||||||
@ -644,13 +646,15 @@ unwind:
|
|||||||
(src)->base.size>>PAGE_SHIFT)
|
(src)->base.size>>PAGE_SHIFT)
|
||||||
|
|
||||||
static void capture_bo(struct drm_i915_error_buffer *err,
|
static void capture_bo(struct drm_i915_error_buffer *err,
|
||||||
struct drm_i915_gem_object *obj)
|
struct i915_vma *vma)
|
||||||
{
|
{
|
||||||
|
struct drm_i915_gem_object *obj = vma->obj;
|
||||||
|
|
||||||
err->size = obj->base.size;
|
err->size = obj->base.size;
|
||||||
err->name = obj->base.name;
|
err->name = obj->base.name;
|
||||||
err->rseqno = obj->last_read_seqno;
|
err->rseqno = obj->last_read_seqno;
|
||||||
err->wseqno = obj->last_write_seqno;
|
err->wseqno = obj->last_write_seqno;
|
||||||
err->gtt_offset = i915_gem_obj_ggtt_offset(obj);
|
err->gtt_offset = vma->node.start;
|
||||||
err->read_domains = obj->base.read_domains;
|
err->read_domains = obj->base.read_domains;
|
||||||
err->write_domain = obj->base.write_domain;
|
err->write_domain = obj->base.write_domain;
|
||||||
err->fence_reg = obj->fence_reg;
|
err->fence_reg = obj->fence_reg;
|
||||||
@ -674,7 +678,7 @@ static u32 capture_active_bo(struct drm_i915_error_buffer *err,
|
|||||||
int i = 0;
|
int i = 0;
|
||||||
|
|
||||||
list_for_each_entry(vma, head, mm_list) {
|
list_for_each_entry(vma, head, mm_list) {
|
||||||
capture_bo(err++, vma->obj);
|
capture_bo(err++, vma);
|
||||||
if (++i == count)
|
if (++i == count)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -683,21 +687,27 @@ static u32 capture_active_bo(struct drm_i915_error_buffer *err,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
|
static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
|
||||||
int count, struct list_head *head)
|
int count, struct list_head *head,
|
||||||
|
struct i915_address_space *vm)
|
||||||
{
|
{
|
||||||
struct drm_i915_gem_object *obj;
|
struct drm_i915_gem_object *obj;
|
||||||
int i = 0;
|
struct drm_i915_error_buffer * const first = err;
|
||||||
|
struct drm_i915_error_buffer * const last = err + count;
|
||||||
|
|
||||||
list_for_each_entry(obj, head, global_list) {
|
list_for_each_entry(obj, head, global_list) {
|
||||||
if (!i915_gem_obj_is_pinned(obj))
|
struct i915_vma *vma;
|
||||||
continue;
|
|
||||||
|
|
||||||
capture_bo(err++, obj);
|
if (err == last)
|
||||||
if (++i == count)
|
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
list_for_each_entry(vma, &obj->vma_list, vma_link)
|
||||||
|
if (vma->vm == vm && vma->pin_count > 0) {
|
||||||
|
capture_bo(err++, vma);
|
||||||
|
break;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return i;
|
return err - first;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Generate a semi-unique error code. The code is not meant to have meaning, The
|
/* Generate a semi-unique error code. The code is not meant to have meaning, The
|
||||||
@ -967,6 +977,12 @@ static void i915_gem_record_rings(struct drm_device *dev,
|
|||||||
|
|
||||||
request = i915_gem_find_active_request(ring);
|
request = i915_gem_find_active_request(ring);
|
||||||
if (request) {
|
if (request) {
|
||||||
|
struct i915_address_space *vm;
|
||||||
|
|
||||||
|
vm = request->ctx && request->ctx->ppgtt ?
|
||||||
|
&request->ctx->ppgtt->base :
|
||||||
|
&dev_priv->gtt.base;
|
||||||
|
|
||||||
/* We need to copy these to an anonymous buffer
|
/* We need to copy these to an anonymous buffer
|
||||||
* as the simplest method to avoid being overwritten
|
* as the simplest method to avoid being overwritten
|
||||||
* by userspace.
|
* by userspace.
|
||||||
@ -974,9 +990,7 @@ static void i915_gem_record_rings(struct drm_device *dev,
|
|||||||
error->ring[i].batchbuffer =
|
error->ring[i].batchbuffer =
|
||||||
i915_error_object_create(dev_priv,
|
i915_error_object_create(dev_priv,
|
||||||
request->batch_obj,
|
request->batch_obj,
|
||||||
request->ctx ?
|
vm);
|
||||||
request->ctx->vm :
|
|
||||||
&dev_priv->gtt.base);
|
|
||||||
|
|
||||||
if (HAS_BROKEN_CS_TLB(dev_priv->dev) &&
|
if (HAS_BROKEN_CS_TLB(dev_priv->dev) &&
|
||||||
ring->scratch.obj)
|
ring->scratch.obj)
|
||||||
@ -1049,9 +1063,14 @@ static void i915_gem_capture_vm(struct drm_i915_private *dev_priv,
|
|||||||
list_for_each_entry(vma, &vm->active_list, mm_list)
|
list_for_each_entry(vma, &vm->active_list, mm_list)
|
||||||
i++;
|
i++;
|
||||||
error->active_bo_count[ndx] = i;
|
error->active_bo_count[ndx] = i;
|
||||||
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
|
|
||||||
if (i915_gem_obj_is_pinned(obj))
|
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
|
||||||
i++;
|
list_for_each_entry(vma, &obj->vma_list, vma_link)
|
||||||
|
if (vma->vm == vm && vma->pin_count > 0) {
|
||||||
|
i++;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
error->pinned_bo_count[ndx] = i - error->active_bo_count[ndx];
|
error->pinned_bo_count[ndx] = i - error->active_bo_count[ndx];
|
||||||
|
|
||||||
if (i) {
|
if (i) {
|
||||||
@ -1070,7 +1089,7 @@ static void i915_gem_capture_vm(struct drm_i915_private *dev_priv,
|
|||||||
error->pinned_bo_count[ndx] =
|
error->pinned_bo_count[ndx] =
|
||||||
capture_pinned_bo(pinned_bo,
|
capture_pinned_bo(pinned_bo,
|
||||||
error->pinned_bo_count[ndx],
|
error->pinned_bo_count[ndx],
|
||||||
&dev_priv->mm.bound_list);
|
&dev_priv->mm.bound_list, vm);
|
||||||
error->active_bo[ndx] = active_bo;
|
error->active_bo[ndx] = active_bo;
|
||||||
error->pinned_bo[ndx] = pinned_bo;
|
error->pinned_bo[ndx] = pinned_bo;
|
||||||
}
|
}
|
||||||
@ -1091,8 +1110,25 @@ static void i915_gem_capture_buffers(struct drm_i915_private *dev_priv,
|
|||||||
error->pinned_bo_count = kcalloc(cnt, sizeof(*error->pinned_bo_count),
|
error->pinned_bo_count = kcalloc(cnt, sizeof(*error->pinned_bo_count),
|
||||||
GFP_ATOMIC);
|
GFP_ATOMIC);
|
||||||
|
|
||||||
list_for_each_entry(vm, &dev_priv->vm_list, global_link)
|
if (error->active_bo == NULL ||
|
||||||
i915_gem_capture_vm(dev_priv, error, vm, i++);
|
error->pinned_bo == NULL ||
|
||||||
|
error->active_bo_count == NULL ||
|
||||||
|
error->pinned_bo_count == NULL) {
|
||||||
|
kfree(error->active_bo);
|
||||||
|
kfree(error->active_bo_count);
|
||||||
|
kfree(error->pinned_bo);
|
||||||
|
kfree(error->pinned_bo_count);
|
||||||
|
|
||||||
|
error->active_bo = NULL;
|
||||||
|
error->active_bo_count = NULL;
|
||||||
|
error->pinned_bo = NULL;
|
||||||
|
error->pinned_bo_count = NULL;
|
||||||
|
} else {
|
||||||
|
list_for_each_entry(vm, &dev_priv->vm_list, global_link)
|
||||||
|
i915_gem_capture_vm(dev_priv, error, vm, i++);
|
||||||
|
|
||||||
|
error->vm_count = cnt;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Capture all registers which don't fit into another category. */
|
/* Capture all registers which don't fit into another category. */
|
||||||
|
@ -1322,10 +1322,10 @@ static u32 vlv_c0_residency(struct drm_i915_private *dev_priv,
|
|||||||
* @dev_priv: DRM device private
|
* @dev_priv: DRM device private
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
static u32 vlv_calc_delay_from_C0_counters(struct drm_i915_private *dev_priv)
|
static int vlv_calc_delay_from_C0_counters(struct drm_i915_private *dev_priv)
|
||||||
{
|
{
|
||||||
u32 residency_C0_up = 0, residency_C0_down = 0;
|
u32 residency_C0_up = 0, residency_C0_down = 0;
|
||||||
u8 new_delay, adj;
|
int new_delay, adj;
|
||||||
|
|
||||||
dev_priv->rps.ei_interrupt_count++;
|
dev_priv->rps.ei_interrupt_count++;
|
||||||
|
|
||||||
@ -1627,6 +1627,7 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
|
|||||||
struct drm_i915_private *dev_priv,
|
struct drm_i915_private *dev_priv,
|
||||||
u32 master_ctl)
|
u32 master_ctl)
|
||||||
{
|
{
|
||||||
|
struct intel_engine_cs *ring;
|
||||||
u32 rcs, bcs, vcs;
|
u32 rcs, bcs, vcs;
|
||||||
uint32_t tmp = 0;
|
uint32_t tmp = 0;
|
||||||
irqreturn_t ret = IRQ_NONE;
|
irqreturn_t ret = IRQ_NONE;
|
||||||
@ -1636,12 +1637,20 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
|
|||||||
if (tmp) {
|
if (tmp) {
|
||||||
I915_WRITE(GEN8_GT_IIR(0), tmp);
|
I915_WRITE(GEN8_GT_IIR(0), tmp);
|
||||||
ret = IRQ_HANDLED;
|
ret = IRQ_HANDLED;
|
||||||
|
|
||||||
rcs = tmp >> GEN8_RCS_IRQ_SHIFT;
|
rcs = tmp >> GEN8_RCS_IRQ_SHIFT;
|
||||||
bcs = tmp >> GEN8_BCS_IRQ_SHIFT;
|
ring = &dev_priv->ring[RCS];
|
||||||
if (rcs & GT_RENDER_USER_INTERRUPT)
|
if (rcs & GT_RENDER_USER_INTERRUPT)
|
||||||
notify_ring(dev, &dev_priv->ring[RCS]);
|
notify_ring(dev, ring);
|
||||||
|
if (rcs & GT_CONTEXT_SWITCH_INTERRUPT)
|
||||||
|
intel_execlists_handle_ctx_events(ring);
|
||||||
|
|
||||||
|
bcs = tmp >> GEN8_BCS_IRQ_SHIFT;
|
||||||
|
ring = &dev_priv->ring[BCS];
|
||||||
if (bcs & GT_RENDER_USER_INTERRUPT)
|
if (bcs & GT_RENDER_USER_INTERRUPT)
|
||||||
notify_ring(dev, &dev_priv->ring[BCS]);
|
notify_ring(dev, ring);
|
||||||
|
if (bcs & GT_CONTEXT_SWITCH_INTERRUPT)
|
||||||
|
intel_execlists_handle_ctx_events(ring);
|
||||||
} else
|
} else
|
||||||
DRM_ERROR("The master control interrupt lied (GT0)!\n");
|
DRM_ERROR("The master control interrupt lied (GT0)!\n");
|
||||||
}
|
}
|
||||||
@ -1651,12 +1660,20 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
|
|||||||
if (tmp) {
|
if (tmp) {
|
||||||
I915_WRITE(GEN8_GT_IIR(1), tmp);
|
I915_WRITE(GEN8_GT_IIR(1), tmp);
|
||||||
ret = IRQ_HANDLED;
|
ret = IRQ_HANDLED;
|
||||||
|
|
||||||
vcs = tmp >> GEN8_VCS1_IRQ_SHIFT;
|
vcs = tmp >> GEN8_VCS1_IRQ_SHIFT;
|
||||||
|
ring = &dev_priv->ring[VCS];
|
||||||
if (vcs & GT_RENDER_USER_INTERRUPT)
|
if (vcs & GT_RENDER_USER_INTERRUPT)
|
||||||
notify_ring(dev, &dev_priv->ring[VCS]);
|
notify_ring(dev, ring);
|
||||||
|
if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
|
||||||
|
intel_execlists_handle_ctx_events(ring);
|
||||||
|
|
||||||
vcs = tmp >> GEN8_VCS2_IRQ_SHIFT;
|
vcs = tmp >> GEN8_VCS2_IRQ_SHIFT;
|
||||||
|
ring = &dev_priv->ring[VCS2];
|
||||||
if (vcs & GT_RENDER_USER_INTERRUPT)
|
if (vcs & GT_RENDER_USER_INTERRUPT)
|
||||||
notify_ring(dev, &dev_priv->ring[VCS2]);
|
notify_ring(dev, ring);
|
||||||
|
if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
|
||||||
|
intel_execlists_handle_ctx_events(ring);
|
||||||
} else
|
} else
|
||||||
DRM_ERROR("The master control interrupt lied (GT1)!\n");
|
DRM_ERROR("The master control interrupt lied (GT1)!\n");
|
||||||
}
|
}
|
||||||
@ -1677,9 +1694,13 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
|
|||||||
if (tmp) {
|
if (tmp) {
|
||||||
I915_WRITE(GEN8_GT_IIR(3), tmp);
|
I915_WRITE(GEN8_GT_IIR(3), tmp);
|
||||||
ret = IRQ_HANDLED;
|
ret = IRQ_HANDLED;
|
||||||
|
|
||||||
vcs = tmp >> GEN8_VECS_IRQ_SHIFT;
|
vcs = tmp >> GEN8_VECS_IRQ_SHIFT;
|
||||||
|
ring = &dev_priv->ring[VECS];
|
||||||
if (vcs & GT_RENDER_USER_INTERRUPT)
|
if (vcs & GT_RENDER_USER_INTERRUPT)
|
||||||
notify_ring(dev, &dev_priv->ring[VECS]);
|
notify_ring(dev, ring);
|
||||||
|
if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
|
||||||
|
intel_execlists_handle_ctx_events(ring);
|
||||||
} else
|
} else
|
||||||
DRM_ERROR("The master control interrupt lied (GT3)!\n");
|
DRM_ERROR("The master control interrupt lied (GT3)!\n");
|
||||||
}
|
}
|
||||||
@ -1772,7 +1793,9 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev,
|
|||||||
long_hpd = (dig_hotplug_reg >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
|
long_hpd = (dig_hotplug_reg >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
|
||||||
}
|
}
|
||||||
|
|
||||||
DRM_DEBUG_DRIVER("digital hpd port %d %d\n", port, long_hpd);
|
DRM_DEBUG_DRIVER("digital hpd port %c - %s\n",
|
||||||
|
port_name(port),
|
||||||
|
long_hpd ? "long" : "short");
|
||||||
/* for long HPD pulses we want to have the digital queue happen,
|
/* for long HPD pulses we want to have the digital queue happen,
|
||||||
but we still want HPD storm detection to function. */
|
but we still want HPD storm detection to function. */
|
||||||
if (long_hpd) {
|
if (long_hpd) {
|
||||||
@ -3781,12 +3804,17 @@ static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
|
|||||||
/* These are interrupts we'll toggle with the ring mask register */
|
/* These are interrupts we'll toggle with the ring mask register */
|
||||||
uint32_t gt_interrupts[] = {
|
uint32_t gt_interrupts[] = {
|
||||||
GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
|
GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
|
||||||
|
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
|
||||||
GT_RENDER_L3_PARITY_ERROR_INTERRUPT |
|
GT_RENDER_L3_PARITY_ERROR_INTERRUPT |
|
||||||
GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
|
GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
|
||||||
|
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
|
||||||
GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
|
GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
|
||||||
GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
|
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
|
||||||
|
GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT |
|
||||||
|
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
|
||||||
0,
|
0,
|
||||||
GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT
|
GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
|
||||||
|
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT
|
||||||
};
|
};
|
||||||
|
|
||||||
for (i = 0; i < ARRAY_SIZE(gt_interrupts); i++)
|
for (i = 0; i < ARRAY_SIZE(gt_interrupts); i++)
|
||||||
|
@ -35,6 +35,7 @@ struct i915_params i915 __read_mostly = {
|
|||||||
.vbt_sdvo_panel_type = -1,
|
.vbt_sdvo_panel_type = -1,
|
||||||
.enable_rc6 = -1,
|
.enable_rc6 = -1,
|
||||||
.enable_fbc = -1,
|
.enable_fbc = -1,
|
||||||
|
.enable_execlists = 0,
|
||||||
.enable_hangcheck = true,
|
.enable_hangcheck = true,
|
||||||
.enable_ppgtt = -1,
|
.enable_ppgtt = -1,
|
||||||
.enable_psr = 0,
|
.enable_psr = 0,
|
||||||
@ -118,6 +119,11 @@ MODULE_PARM_DESC(enable_ppgtt,
|
|||||||
"Override PPGTT usage. "
|
"Override PPGTT usage. "
|
||||||
"(-1=auto [default], 0=disabled, 1=aliasing, 2=full)");
|
"(-1=auto [default], 0=disabled, 1=aliasing, 2=full)");
|
||||||
|
|
||||||
|
module_param_named(enable_execlists, i915.enable_execlists, int, 0400);
|
||||||
|
MODULE_PARM_DESC(enable_execlists,
|
||||||
|
"Override execlists usage. "
|
||||||
|
"(-1=auto, 0=disabled [default], 1=enabled)");
|
||||||
|
|
||||||
module_param_named(enable_psr, i915.enable_psr, int, 0600);
|
module_param_named(enable_psr, i915.enable_psr, int, 0600);
|
||||||
MODULE_PARM_DESC(enable_psr, "Enable PSR (default: false)");
|
MODULE_PARM_DESC(enable_psr, "Enable PSR (default: false)");
|
||||||
|
|
||||||
|
@ -272,6 +272,7 @@
|
|||||||
#define MI_SEMAPHORE_POLL (1<<15)
|
#define MI_SEMAPHORE_POLL (1<<15)
|
||||||
#define MI_SEMAPHORE_SAD_GTE_SDD (1<<12)
|
#define MI_SEMAPHORE_SAD_GTE_SDD (1<<12)
|
||||||
#define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1)
|
#define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1)
|
||||||
|
#define MI_STORE_DWORD_IMM_GEN8 MI_INSTR(0x20, 2)
|
||||||
#define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */
|
#define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */
|
||||||
#define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1)
|
#define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1)
|
||||||
#define MI_STORE_DWORD_INDEX_SHIFT 2
|
#define MI_STORE_DWORD_INDEX_SHIFT 2
|
||||||
@ -282,6 +283,7 @@
|
|||||||
* address/value pairs. Don't overdue it, though, x <= 2^4 must hold!
|
* address/value pairs. Don't overdue it, though, x <= 2^4 must hold!
|
||||||
*/
|
*/
|
||||||
#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*(x)-1)
|
#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*(x)-1)
|
||||||
|
#define MI_LRI_FORCE_POSTED (1<<12)
|
||||||
#define MI_STORE_REGISTER_MEM(x) MI_INSTR(0x24, 2*(x)-1)
|
#define MI_STORE_REGISTER_MEM(x) MI_INSTR(0x24, 2*(x)-1)
|
||||||
#define MI_STORE_REGISTER_MEM_GEN8(x) MI_INSTR(0x24, 3*(x)-1)
|
#define MI_STORE_REGISTER_MEM_GEN8(x) MI_INSTR(0x24, 3*(x)-1)
|
||||||
#define MI_SRM_LRM_GLOBAL_GTT (1<<22)
|
#define MI_SRM_LRM_GLOBAL_GTT (1<<22)
|
||||||
@ -1085,6 +1087,7 @@ enum punit_power_well {
|
|||||||
#define RING_ACTHD_UDW(base) ((base)+0x5c)
|
#define RING_ACTHD_UDW(base) ((base)+0x5c)
|
||||||
#define RING_NOPID(base) ((base)+0x94)
|
#define RING_NOPID(base) ((base)+0x94)
|
||||||
#define RING_IMR(base) ((base)+0xa8)
|
#define RING_IMR(base) ((base)+0xa8)
|
||||||
|
#define RING_HWSTAM(base) ((base)+0x98)
|
||||||
#define RING_TIMESTAMP(base) ((base)+0x358)
|
#define RING_TIMESTAMP(base) ((base)+0x358)
|
||||||
#define TAIL_ADDR 0x001FFFF8
|
#define TAIL_ADDR 0x001FFFF8
|
||||||
#define HEAD_WRAP_COUNT 0xFFE00000
|
#define HEAD_WRAP_COUNT 0xFFE00000
|
||||||
@ -1401,6 +1404,7 @@ enum punit_power_well {
|
|||||||
#define GT_BSD_CS_ERROR_INTERRUPT (1 << 15)
|
#define GT_BSD_CS_ERROR_INTERRUPT (1 << 15)
|
||||||
#define GT_BSD_USER_INTERRUPT (1 << 12)
|
#define GT_BSD_USER_INTERRUPT (1 << 12)
|
||||||
#define GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1 (1 << 11) /* hsw+; rsvd on snb, ivb, vlv */
|
#define GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1 (1 << 11) /* hsw+; rsvd on snb, ivb, vlv */
|
||||||
|
#define GT_CONTEXT_SWITCH_INTERRUPT (1 << 8)
|
||||||
#define GT_RENDER_L3_PARITY_ERROR_INTERRUPT (1 << 5) /* !snb */
|
#define GT_RENDER_L3_PARITY_ERROR_INTERRUPT (1 << 5) /* !snb */
|
||||||
#define GT_RENDER_PIPECTL_NOTIFY_INTERRUPT (1 << 4)
|
#define GT_RENDER_PIPECTL_NOTIFY_INTERRUPT (1 << 4)
|
||||||
#define GT_RENDER_CS_MASTER_ERROR_INTERRUPT (1 << 3)
|
#define GT_RENDER_CS_MASTER_ERROR_INTERRUPT (1 << 3)
|
||||||
@ -4124,7 +4128,8 @@ enum punit_power_well {
|
|||||||
/* Old style CUR*CNTR flags (desktop 8xx) */
|
/* Old style CUR*CNTR flags (desktop 8xx) */
|
||||||
#define CURSOR_ENABLE 0x80000000
|
#define CURSOR_ENABLE 0x80000000
|
||||||
#define CURSOR_GAMMA_ENABLE 0x40000000
|
#define CURSOR_GAMMA_ENABLE 0x40000000
|
||||||
#define CURSOR_STRIDE_MASK 0x30000000
|
#define CURSOR_STRIDE_SHIFT 28
|
||||||
|
#define CURSOR_STRIDE(x) ((ffs(x)-9) << CURSOR_STRIDE_SHIFT) /* 256,512,1k,2k */
|
||||||
#define CURSOR_PIPE_CSC_ENABLE (1<<24)
|
#define CURSOR_PIPE_CSC_ENABLE (1<<24)
|
||||||
#define CURSOR_FORMAT_SHIFT 24
|
#define CURSOR_FORMAT_SHIFT 24
|
||||||
#define CURSOR_FORMAT_MASK (0x07 << CURSOR_FORMAT_SHIFT)
|
#define CURSOR_FORMAT_MASK (0x07 << CURSOR_FORMAT_SHIFT)
|
||||||
|
@ -169,14 +169,14 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port)
|
|||||||
ddi_translations_dp = bdw_ddi_translations_dp;
|
ddi_translations_dp = bdw_ddi_translations_dp;
|
||||||
ddi_translations_edp = bdw_ddi_translations_edp;
|
ddi_translations_edp = bdw_ddi_translations_edp;
|
||||||
ddi_translations_hdmi = bdw_ddi_translations_hdmi;
|
ddi_translations_hdmi = bdw_ddi_translations_hdmi;
|
||||||
n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
|
n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi) / 2;
|
||||||
hdmi_800mV_0dB = 7;
|
hdmi_800mV_0dB = 7;
|
||||||
} else if (IS_HASWELL(dev)) {
|
} else if (IS_HASWELL(dev)) {
|
||||||
ddi_translations_fdi = hsw_ddi_translations_fdi;
|
ddi_translations_fdi = hsw_ddi_translations_fdi;
|
||||||
ddi_translations_dp = hsw_ddi_translations_dp;
|
ddi_translations_dp = hsw_ddi_translations_dp;
|
||||||
ddi_translations_edp = hsw_ddi_translations_dp;
|
ddi_translations_edp = hsw_ddi_translations_dp;
|
||||||
ddi_translations_hdmi = hsw_ddi_translations_hdmi;
|
ddi_translations_hdmi = hsw_ddi_translations_hdmi;
|
||||||
n_hdmi_entries = ARRAY_SIZE(hsw_ddi_translations_hdmi);
|
n_hdmi_entries = ARRAY_SIZE(hsw_ddi_translations_hdmi) / 2;
|
||||||
hdmi_800mV_0dB = 6;
|
hdmi_800mV_0dB = 6;
|
||||||
} else {
|
} else {
|
||||||
WARN(1, "ddi translation table missing\n");
|
WARN(1, "ddi translation table missing\n");
|
||||||
@ -184,7 +184,7 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port)
|
|||||||
ddi_translations_fdi = bdw_ddi_translations_fdi;
|
ddi_translations_fdi = bdw_ddi_translations_fdi;
|
||||||
ddi_translations_dp = bdw_ddi_translations_dp;
|
ddi_translations_dp = bdw_ddi_translations_dp;
|
||||||
ddi_translations_hdmi = bdw_ddi_translations_hdmi;
|
ddi_translations_hdmi = bdw_ddi_translations_hdmi;
|
||||||
n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
|
n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi) / 2;
|
||||||
hdmi_800mV_0dB = 7;
|
hdmi_800mV_0dB = 7;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1797,7 +1797,7 @@ static void intel_enable_shared_dpll(struct intel_crtc *crtc)
|
|||||||
pll->on = true;
|
pll->on = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
void intel_disable_shared_dpll(struct intel_crtc *crtc)
|
static void intel_disable_shared_dpll(struct intel_crtc *crtc)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = crtc->base.dev;
|
struct drm_device *dev = crtc->base.dev;
|
||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||||
@ -2082,35 +2082,28 @@ void intel_flush_primary_plane(struct drm_i915_private *dev_priv,
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* intel_enable_primary_hw_plane - enable the primary plane on a given pipe
|
* intel_enable_primary_hw_plane - enable the primary plane on a given pipe
|
||||||
* @dev_priv: i915 private structure
|
* @plane: plane to be enabled
|
||||||
* @plane: plane to enable
|
* @crtc: crtc for the plane
|
||||||
* @pipe: pipe being fed
|
|
||||||
*
|
*
|
||||||
* Enable @plane on @pipe, making sure that @pipe is running first.
|
* Enable @plane on @crtc, making sure that the pipe is running first.
|
||||||
*/
|
*/
|
||||||
static void intel_enable_primary_hw_plane(struct drm_i915_private *dev_priv,
|
static void intel_enable_primary_hw_plane(struct drm_plane *plane,
|
||||||
enum plane plane, enum pipe pipe)
|
struct drm_crtc *crtc)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = dev_priv->dev;
|
struct drm_device *dev = plane->dev;
|
||||||
struct intel_crtc *intel_crtc =
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||||
to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
|
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||||
int reg;
|
|
||||||
u32 val;
|
|
||||||
|
|
||||||
/* If the pipe isn't enabled, we can't pump pixels and may hang */
|
/* If the pipe isn't enabled, we can't pump pixels and may hang */
|
||||||
assert_pipe_enabled(dev_priv, pipe);
|
assert_pipe_enabled(dev_priv, intel_crtc->pipe);
|
||||||
|
|
||||||
if (intel_crtc->primary_enabled)
|
if (intel_crtc->primary_enabled)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
intel_crtc->primary_enabled = true;
|
intel_crtc->primary_enabled = true;
|
||||||
|
|
||||||
reg = DSPCNTR(plane);
|
dev_priv->display.update_primary_plane(crtc, plane->fb,
|
||||||
val = I915_READ(reg);
|
crtc->x, crtc->y);
|
||||||
WARN_ON(val & DISPLAY_PLANE_ENABLE);
|
|
||||||
|
|
||||||
I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE);
|
|
||||||
intel_flush_primary_plane(dev_priv, plane);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* BDW signals flip done immediately if the plane
|
* BDW signals flip done immediately if the plane
|
||||||
@ -2123,31 +2116,27 @@ static void intel_enable_primary_hw_plane(struct drm_i915_private *dev_priv,
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* intel_disable_primary_hw_plane - disable the primary hardware plane
|
* intel_disable_primary_hw_plane - disable the primary hardware plane
|
||||||
* @dev_priv: i915 private structure
|
* @plane: plane to be disabled
|
||||||
* @plane: plane to disable
|
* @crtc: crtc for the plane
|
||||||
* @pipe: pipe consuming the data
|
|
||||||
*
|
*
|
||||||
* Disable @plane; should be an independent operation.
|
* Disable @plane on @crtc, making sure that the pipe is running first.
|
||||||
*/
|
*/
|
||||||
static void intel_disable_primary_hw_plane(struct drm_i915_private *dev_priv,
|
static void intel_disable_primary_hw_plane(struct drm_plane *plane,
|
||||||
enum plane plane, enum pipe pipe)
|
struct drm_crtc *crtc)
|
||||||
{
|
{
|
||||||
struct intel_crtc *intel_crtc =
|
struct drm_device *dev = plane->dev;
|
||||||
to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||||
int reg;
|
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||||
u32 val;
|
|
||||||
|
assert_pipe_enabled(dev_priv, intel_crtc->pipe);
|
||||||
|
|
||||||
if (!intel_crtc->primary_enabled)
|
if (!intel_crtc->primary_enabled)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
intel_crtc->primary_enabled = false;
|
intel_crtc->primary_enabled = false;
|
||||||
|
|
||||||
reg = DSPCNTR(plane);
|
dev_priv->display.update_primary_plane(crtc, plane->fb,
|
||||||
val = I915_READ(reg);
|
crtc->x, crtc->y);
|
||||||
WARN_ON((val & DISPLAY_PLANE_ENABLE) == 0);
|
|
||||||
|
|
||||||
I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE);
|
|
||||||
intel_flush_primary_plane(dev_priv, plane);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool need_vtd_wa(struct drm_device *dev)
|
static bool need_vtd_wa(struct drm_device *dev)
|
||||||
@ -2388,12 +2377,35 @@ static void i9xx_update_primary_plane(struct drm_crtc *crtc,
|
|||||||
int plane = intel_crtc->plane;
|
int plane = intel_crtc->plane;
|
||||||
unsigned long linear_offset;
|
unsigned long linear_offset;
|
||||||
u32 dspcntr;
|
u32 dspcntr;
|
||||||
u32 reg;
|
u32 reg = DSPCNTR(plane);
|
||||||
|
|
||||||
|
if (!intel_crtc->primary_enabled) {
|
||||||
|
I915_WRITE(reg, 0);
|
||||||
|
if (INTEL_INFO(dev)->gen >= 4)
|
||||||
|
I915_WRITE(DSPSURF(plane), 0);
|
||||||
|
else
|
||||||
|
I915_WRITE(DSPADDR(plane), 0);
|
||||||
|
POSTING_READ(reg);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
dspcntr = DISPPLANE_GAMMA_ENABLE;
|
||||||
|
|
||||||
|
dspcntr |= DISPLAY_PLANE_ENABLE;
|
||||||
|
|
||||||
|
if (INTEL_INFO(dev)->gen < 4) {
|
||||||
|
if (intel_crtc->pipe == PIPE_B)
|
||||||
|
dspcntr |= DISPPLANE_SEL_PIPE_B;
|
||||||
|
|
||||||
|
/* pipesrc and dspsize control the size that is scaled from,
|
||||||
|
* which should always be the user's requested size.
|
||||||
|
*/
|
||||||
|
I915_WRITE(DSPSIZE(plane),
|
||||||
|
((intel_crtc->config.pipe_src_h - 1) << 16) |
|
||||||
|
(intel_crtc->config.pipe_src_w - 1));
|
||||||
|
I915_WRITE(DSPPOS(plane), 0);
|
||||||
|
}
|
||||||
|
|
||||||
reg = DSPCNTR(plane);
|
|
||||||
dspcntr = I915_READ(reg);
|
|
||||||
/* Mask out pixel format bits in case we change it */
|
|
||||||
dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
|
|
||||||
switch (fb->pixel_format) {
|
switch (fb->pixel_format) {
|
||||||
case DRM_FORMAT_C8:
|
case DRM_FORMAT_C8:
|
||||||
dspcntr |= DISPPLANE_8BPP;
|
dspcntr |= DISPPLANE_8BPP;
|
||||||
@ -2425,12 +2437,9 @@ static void i9xx_update_primary_plane(struct drm_crtc *crtc,
|
|||||||
BUG();
|
BUG();
|
||||||
}
|
}
|
||||||
|
|
||||||
if (INTEL_INFO(dev)->gen >= 4) {
|
if (INTEL_INFO(dev)->gen >= 4 &&
|
||||||
if (obj->tiling_mode != I915_TILING_NONE)
|
obj->tiling_mode != I915_TILING_NONE)
|
||||||
dspcntr |= DISPPLANE_TILED;
|
dspcntr |= DISPPLANE_TILED;
|
||||||
else
|
|
||||||
dspcntr &= ~DISPPLANE_TILED;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (IS_G4X(dev))
|
if (IS_G4X(dev))
|
||||||
dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
|
dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
|
||||||
@ -2474,12 +2483,22 @@ static void ironlake_update_primary_plane(struct drm_crtc *crtc,
|
|||||||
int plane = intel_crtc->plane;
|
int plane = intel_crtc->plane;
|
||||||
unsigned long linear_offset;
|
unsigned long linear_offset;
|
||||||
u32 dspcntr;
|
u32 dspcntr;
|
||||||
u32 reg;
|
u32 reg = DSPCNTR(plane);
|
||||||
|
|
||||||
|
if (!intel_crtc->primary_enabled) {
|
||||||
|
I915_WRITE(reg, 0);
|
||||||
|
I915_WRITE(DSPSURF(plane), 0);
|
||||||
|
POSTING_READ(reg);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
dspcntr = DISPPLANE_GAMMA_ENABLE;
|
||||||
|
|
||||||
|
dspcntr |= DISPLAY_PLANE_ENABLE;
|
||||||
|
|
||||||
|
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
|
||||||
|
dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
|
||||||
|
|
||||||
reg = DSPCNTR(plane);
|
|
||||||
dspcntr = I915_READ(reg);
|
|
||||||
/* Mask out pixel format bits in case we change it */
|
|
||||||
dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
|
|
||||||
switch (fb->pixel_format) {
|
switch (fb->pixel_format) {
|
||||||
case DRM_FORMAT_C8:
|
case DRM_FORMAT_C8:
|
||||||
dspcntr |= DISPPLANE_8BPP;
|
dspcntr |= DISPPLANE_8BPP;
|
||||||
@ -2509,12 +2528,8 @@ static void ironlake_update_primary_plane(struct drm_crtc *crtc,
|
|||||||
|
|
||||||
if (obj->tiling_mode != I915_TILING_NONE)
|
if (obj->tiling_mode != I915_TILING_NONE)
|
||||||
dspcntr |= DISPPLANE_TILED;
|
dspcntr |= DISPPLANE_TILED;
|
||||||
else
|
|
||||||
dspcntr &= ~DISPPLANE_TILED;
|
|
||||||
|
|
||||||
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
|
if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
|
||||||
dspcntr &= ~DISPPLANE_TRICKLE_FEED_DISABLE;
|
|
||||||
else
|
|
||||||
dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
|
dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
|
||||||
|
|
||||||
I915_WRITE(reg, dspcntr);
|
I915_WRITE(reg, dspcntr);
|
||||||
@ -3873,14 +3888,12 @@ static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
|
|||||||
static void intel_crtc_enable_planes(struct drm_crtc *crtc)
|
static void intel_crtc_enable_planes(struct drm_crtc *crtc)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = crtc->dev;
|
struct drm_device *dev = crtc->dev;
|
||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
|
||||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||||
int pipe = intel_crtc->pipe;
|
int pipe = intel_crtc->pipe;
|
||||||
int plane = intel_crtc->plane;
|
|
||||||
|
|
||||||
drm_vblank_on(dev, pipe);
|
drm_vblank_on(dev, pipe);
|
||||||
|
|
||||||
intel_enable_primary_hw_plane(dev_priv, plane, pipe);
|
intel_enable_primary_hw_plane(crtc->primary, crtc);
|
||||||
intel_enable_planes(crtc);
|
intel_enable_planes(crtc);
|
||||||
intel_crtc_update_cursor(crtc, true);
|
intel_crtc_update_cursor(crtc, true);
|
||||||
intel_crtc_dpms_overlay(intel_crtc, true);
|
intel_crtc_dpms_overlay(intel_crtc, true);
|
||||||
@ -3917,7 +3930,7 @@ static void intel_crtc_disable_planes(struct drm_crtc *crtc)
|
|||||||
intel_crtc_dpms_overlay(intel_crtc, false);
|
intel_crtc_dpms_overlay(intel_crtc, false);
|
||||||
intel_crtc_update_cursor(crtc, false);
|
intel_crtc_update_cursor(crtc, false);
|
||||||
intel_disable_planes(crtc);
|
intel_disable_planes(crtc);
|
||||||
intel_disable_primary_hw_plane(dev_priv, plane, pipe);
|
intel_disable_primary_hw_plane(crtc->primary, crtc);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* FIXME: Once we grow proper nuclear flip support out of this we need
|
* FIXME: Once we grow proper nuclear flip support out of this we need
|
||||||
@ -3936,7 +3949,6 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
|
|||||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||||
struct intel_encoder *encoder;
|
struct intel_encoder *encoder;
|
||||||
int pipe = intel_crtc->pipe;
|
int pipe = intel_crtc->pipe;
|
||||||
enum plane plane = intel_crtc->plane;
|
|
||||||
|
|
||||||
WARN_ON(!crtc->enabled);
|
WARN_ON(!crtc->enabled);
|
||||||
|
|
||||||
@ -3958,13 +3970,6 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
|
|||||||
|
|
||||||
ironlake_set_pipeconf(crtc);
|
ironlake_set_pipeconf(crtc);
|
||||||
|
|
||||||
/* Set up the display plane register */
|
|
||||||
I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE);
|
|
||||||
POSTING_READ(DSPCNTR(plane));
|
|
||||||
|
|
||||||
dev_priv->display.update_primary_plane(crtc, crtc->primary->fb,
|
|
||||||
crtc->x, crtc->y);
|
|
||||||
|
|
||||||
intel_crtc->active = true;
|
intel_crtc->active = true;
|
||||||
|
|
||||||
intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
|
intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
|
||||||
@ -4049,7 +4054,6 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
|
|||||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||||
struct intel_encoder *encoder;
|
struct intel_encoder *encoder;
|
||||||
int pipe = intel_crtc->pipe;
|
int pipe = intel_crtc->pipe;
|
||||||
enum plane plane = intel_crtc->plane;
|
|
||||||
|
|
||||||
WARN_ON(!crtc->enabled);
|
WARN_ON(!crtc->enabled);
|
||||||
|
|
||||||
@ -4073,13 +4077,6 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
|
|||||||
|
|
||||||
intel_set_pipe_csc(crtc);
|
intel_set_pipe_csc(crtc);
|
||||||
|
|
||||||
/* Set up the display plane register */
|
|
||||||
I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE | DISPPLANE_PIPE_CSC_ENABLE);
|
|
||||||
POSTING_READ(DSPCNTR(plane));
|
|
||||||
|
|
||||||
dev_priv->display.update_primary_plane(crtc, crtc->primary->fb,
|
|
||||||
crtc->x, crtc->y);
|
|
||||||
|
|
||||||
intel_crtc->active = true;
|
intel_crtc->active = true;
|
||||||
|
|
||||||
intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
|
intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
|
||||||
@ -4628,13 +4625,10 @@ static void valleyview_modeset_global_resources(struct drm_device *dev)
|
|||||||
static void valleyview_crtc_enable(struct drm_crtc *crtc)
|
static void valleyview_crtc_enable(struct drm_crtc *crtc)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = crtc->dev;
|
struct drm_device *dev = crtc->dev;
|
||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
|
||||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||||
struct intel_encoder *encoder;
|
struct intel_encoder *encoder;
|
||||||
int pipe = intel_crtc->pipe;
|
int pipe = intel_crtc->pipe;
|
||||||
int plane = intel_crtc->plane;
|
|
||||||
bool is_dsi;
|
bool is_dsi;
|
||||||
u32 dspcntr;
|
|
||||||
|
|
||||||
WARN_ON(!crtc->enabled);
|
WARN_ON(!crtc->enabled);
|
||||||
|
|
||||||
@ -4650,30 +4644,13 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
|
|||||||
vlv_prepare_pll(intel_crtc);
|
vlv_prepare_pll(intel_crtc);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Set up the display plane register */
|
|
||||||
dspcntr = DISPPLANE_GAMMA_ENABLE;
|
|
||||||
|
|
||||||
if (intel_crtc->config.has_dp_encoder)
|
if (intel_crtc->config.has_dp_encoder)
|
||||||
intel_dp_set_m_n(intel_crtc);
|
intel_dp_set_m_n(intel_crtc);
|
||||||
|
|
||||||
intel_set_pipe_timings(intel_crtc);
|
intel_set_pipe_timings(intel_crtc);
|
||||||
|
|
||||||
/* pipesrc and dspsize control the size that is scaled from,
|
|
||||||
* which should always be the user's requested size.
|
|
||||||
*/
|
|
||||||
I915_WRITE(DSPSIZE(plane),
|
|
||||||
((intel_crtc->config.pipe_src_h - 1) << 16) |
|
|
||||||
(intel_crtc->config.pipe_src_w - 1));
|
|
||||||
I915_WRITE(DSPPOS(plane), 0);
|
|
||||||
|
|
||||||
i9xx_set_pipeconf(intel_crtc);
|
i9xx_set_pipeconf(intel_crtc);
|
||||||
|
|
||||||
I915_WRITE(DSPCNTR(plane), dspcntr);
|
|
||||||
POSTING_READ(DSPCNTR(plane));
|
|
||||||
|
|
||||||
dev_priv->display.update_primary_plane(crtc, crtc->primary->fb,
|
|
||||||
crtc->x, crtc->y);
|
|
||||||
|
|
||||||
intel_crtc->active = true;
|
intel_crtc->active = true;
|
||||||
|
|
||||||
intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
|
intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
|
||||||
@ -4721,12 +4698,9 @@ static void i9xx_set_pll_dividers(struct intel_crtc *crtc)
|
|||||||
static void i9xx_crtc_enable(struct drm_crtc *crtc)
|
static void i9xx_crtc_enable(struct drm_crtc *crtc)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = crtc->dev;
|
struct drm_device *dev = crtc->dev;
|
||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
|
||||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||||
struct intel_encoder *encoder;
|
struct intel_encoder *encoder;
|
||||||
int pipe = intel_crtc->pipe;
|
int pipe = intel_crtc->pipe;
|
||||||
int plane = intel_crtc->plane;
|
|
||||||
u32 dspcntr;
|
|
||||||
|
|
||||||
WARN_ON(!crtc->enabled);
|
WARN_ON(!crtc->enabled);
|
||||||
|
|
||||||
@ -4735,35 +4709,13 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
|
|||||||
|
|
||||||
i9xx_set_pll_dividers(intel_crtc);
|
i9xx_set_pll_dividers(intel_crtc);
|
||||||
|
|
||||||
/* Set up the display plane register */
|
|
||||||
dspcntr = DISPPLANE_GAMMA_ENABLE;
|
|
||||||
|
|
||||||
if (pipe == 0)
|
|
||||||
dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
|
|
||||||
else
|
|
||||||
dspcntr |= DISPPLANE_SEL_PIPE_B;
|
|
||||||
|
|
||||||
if (intel_crtc->config.has_dp_encoder)
|
if (intel_crtc->config.has_dp_encoder)
|
||||||
intel_dp_set_m_n(intel_crtc);
|
intel_dp_set_m_n(intel_crtc);
|
||||||
|
|
||||||
intel_set_pipe_timings(intel_crtc);
|
intel_set_pipe_timings(intel_crtc);
|
||||||
|
|
||||||
/* pipesrc and dspsize control the size that is scaled from,
|
|
||||||
* which should always be the user's requested size.
|
|
||||||
*/
|
|
||||||
I915_WRITE(DSPSIZE(plane),
|
|
||||||
((intel_crtc->config.pipe_src_h - 1) << 16) |
|
|
||||||
(intel_crtc->config.pipe_src_w - 1));
|
|
||||||
I915_WRITE(DSPPOS(plane), 0);
|
|
||||||
|
|
||||||
i9xx_set_pipeconf(intel_crtc);
|
i9xx_set_pipeconf(intel_crtc);
|
||||||
|
|
||||||
I915_WRITE(DSPCNTR(plane), dspcntr);
|
|
||||||
POSTING_READ(DSPCNTR(plane));
|
|
||||||
|
|
||||||
dev_priv->display.update_primary_plane(crtc, crtc->primary->fb,
|
|
||||||
crtc->x, crtc->y);
|
|
||||||
|
|
||||||
intel_crtc->active = true;
|
intel_crtc->active = true;
|
||||||
|
|
||||||
if (!IS_GEN2(dev))
|
if (!IS_GEN2(dev))
|
||||||
@ -8095,28 +8047,54 @@ static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
|
|||||||
struct drm_device *dev = crtc->dev;
|
struct drm_device *dev = crtc->dev;
|
||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||||
uint32_t cntl;
|
uint32_t cntl = 0, size = 0;
|
||||||
|
|
||||||
if (base != intel_crtc->cursor_base) {
|
if (base) {
|
||||||
/* On these chipsets we can only modify the base whilst
|
unsigned int width = intel_crtc->cursor_width;
|
||||||
* the cursor is disabled.
|
unsigned int height = intel_crtc->cursor_height;
|
||||||
*/
|
unsigned int stride = roundup_pow_of_two(width) * 4;
|
||||||
if (intel_crtc->cursor_cntl) {
|
|
||||||
I915_WRITE(_CURACNTR, 0);
|
switch (stride) {
|
||||||
POSTING_READ(_CURACNTR);
|
default:
|
||||||
intel_crtc->cursor_cntl = 0;
|
WARN_ONCE(1, "Invalid cursor width/stride, width=%u, stride=%u\n",
|
||||||
|
width, stride);
|
||||||
|
stride = 256;
|
||||||
|
/* fallthrough */
|
||||||
|
case 256:
|
||||||
|
case 512:
|
||||||
|
case 1024:
|
||||||
|
case 2048:
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
I915_WRITE(_CURABASE, base);
|
cntl |= CURSOR_ENABLE |
|
||||||
POSTING_READ(_CURABASE);
|
CURSOR_GAMMA_ENABLE |
|
||||||
|
CURSOR_FORMAT_ARGB |
|
||||||
|
CURSOR_STRIDE(stride);
|
||||||
|
|
||||||
|
size = (height << 12) | width;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (intel_crtc->cursor_cntl != 0 &&
|
||||||
|
(intel_crtc->cursor_base != base ||
|
||||||
|
intel_crtc->cursor_size != size ||
|
||||||
|
intel_crtc->cursor_cntl != cntl)) {
|
||||||
|
/* On these chipsets we can only modify the base/size/stride
|
||||||
|
* whilst the cursor is disabled.
|
||||||
|
*/
|
||||||
|
I915_WRITE(_CURACNTR, 0);
|
||||||
|
POSTING_READ(_CURACNTR);
|
||||||
|
intel_crtc->cursor_cntl = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (intel_crtc->cursor_base != base)
|
||||||
|
I915_WRITE(_CURABASE, base);
|
||||||
|
|
||||||
|
if (intel_crtc->cursor_size != size) {
|
||||||
|
I915_WRITE(CURSIZE, size);
|
||||||
|
intel_crtc->cursor_size = size;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* XXX width must be 64, stride 256 => 0x00 << 28 */
|
|
||||||
cntl = 0;
|
|
||||||
if (base)
|
|
||||||
cntl = (CURSOR_ENABLE |
|
|
||||||
CURSOR_GAMMA_ENABLE |
|
|
||||||
CURSOR_FORMAT_ARGB);
|
|
||||||
if (intel_crtc->cursor_cntl != cntl) {
|
if (intel_crtc->cursor_cntl != cntl) {
|
||||||
I915_WRITE(_CURACNTR, cntl);
|
I915_WRITE(_CURACNTR, cntl);
|
||||||
POSTING_READ(_CURACNTR);
|
POSTING_READ(_CURACNTR);
|
||||||
@ -8151,43 +8129,6 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
|
|||||||
}
|
}
|
||||||
cntl |= pipe << 28; /* Connect to correct pipe */
|
cntl |= pipe << 28; /* Connect to correct pipe */
|
||||||
}
|
}
|
||||||
if (intel_crtc->cursor_cntl != cntl) {
|
|
||||||
I915_WRITE(CURCNTR(pipe), cntl);
|
|
||||||
POSTING_READ(CURCNTR(pipe));
|
|
||||||
intel_crtc->cursor_cntl = cntl;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* and commit changes on next vblank */
|
|
||||||
I915_WRITE(CURBASE(pipe), base);
|
|
||||||
POSTING_READ(CURBASE(pipe));
|
|
||||||
}
|
|
||||||
|
|
||||||
static void ivb_update_cursor(struct drm_crtc *crtc, u32 base)
|
|
||||||
{
|
|
||||||
struct drm_device *dev = crtc->dev;
|
|
||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
|
||||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
|
||||||
int pipe = intel_crtc->pipe;
|
|
||||||
uint32_t cntl;
|
|
||||||
|
|
||||||
cntl = 0;
|
|
||||||
if (base) {
|
|
||||||
cntl = MCURSOR_GAMMA_ENABLE;
|
|
||||||
switch (intel_crtc->cursor_width) {
|
|
||||||
case 64:
|
|
||||||
cntl |= CURSOR_MODE_64_ARGB_AX;
|
|
||||||
break;
|
|
||||||
case 128:
|
|
||||||
cntl |= CURSOR_MODE_128_ARGB_AX;
|
|
||||||
break;
|
|
||||||
case 256:
|
|
||||||
cntl |= CURSOR_MODE_256_ARGB_AX;
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
WARN_ON(1);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
|
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
|
||||||
cntl |= CURSOR_PIPE_CSC_ENABLE;
|
cntl |= CURSOR_PIPE_CSC_ENABLE;
|
||||||
|
|
||||||
@ -8246,15 +8187,50 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
|
|||||||
|
|
||||||
I915_WRITE(CURPOS(pipe), pos);
|
I915_WRITE(CURPOS(pipe), pos);
|
||||||
|
|
||||||
if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev) || IS_BROADWELL(dev))
|
if (IS_845G(dev) || IS_I865G(dev))
|
||||||
ivb_update_cursor(crtc, base);
|
|
||||||
else if (IS_845G(dev) || IS_I865G(dev))
|
|
||||||
i845_update_cursor(crtc, base);
|
i845_update_cursor(crtc, base);
|
||||||
else
|
else
|
||||||
i9xx_update_cursor(crtc, base);
|
i9xx_update_cursor(crtc, base);
|
||||||
intel_crtc->cursor_base = base;
|
intel_crtc->cursor_base = base;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool cursor_size_ok(struct drm_device *dev,
|
||||||
|
uint32_t width, uint32_t height)
|
||||||
|
{
|
||||||
|
if (width == 0 || height == 0)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* 845g/865g are special in that they are only limited by
|
||||||
|
* the width of their cursors, the height is arbitrary up to
|
||||||
|
* the precision of the register. Everything else requires
|
||||||
|
* square cursors, limited to a few power-of-two sizes.
|
||||||
|
*/
|
||||||
|
if (IS_845G(dev) || IS_I865G(dev)) {
|
||||||
|
if ((width & 63) != 0)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
if (width > (IS_845G(dev) ? 64 : 512))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
if (height > 1023)
|
||||||
|
return false;
|
||||||
|
} else {
|
||||||
|
switch (width | height) {
|
||||||
|
case 256:
|
||||||
|
case 128:
|
||||||
|
if (IS_GEN2(dev))
|
||||||
|
return false;
|
||||||
|
case 64:
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* intel_crtc_cursor_set_obj - Set cursor to specified GEM object
|
* intel_crtc_cursor_set_obj - Set cursor to specified GEM object
|
||||||
*
|
*
|
||||||
@ -8267,10 +8243,9 @@ static int intel_crtc_cursor_set_obj(struct drm_crtc *crtc,
|
|||||||
uint32_t width, uint32_t height)
|
uint32_t width, uint32_t height)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = crtc->dev;
|
struct drm_device *dev = crtc->dev;
|
||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
|
||||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||||
enum pipe pipe = intel_crtc->pipe;
|
enum pipe pipe = intel_crtc->pipe;
|
||||||
unsigned old_width;
|
unsigned old_width, stride;
|
||||||
uint32_t addr;
|
uint32_t addr;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
@ -8284,14 +8259,13 @@ static int intel_crtc_cursor_set_obj(struct drm_crtc *crtc,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Check for which cursor types we support */
|
/* Check for which cursor types we support */
|
||||||
if (!((width == 64 && height == 64) ||
|
if (!cursor_size_ok(dev, width, height)) {
|
||||||
(width == 128 && height == 128 && !IS_GEN2(dev)) ||
|
|
||||||
(width == 256 && height == 256 && !IS_GEN2(dev)))) {
|
|
||||||
DRM_DEBUG("Cursor dimension not supported\n");
|
DRM_DEBUG("Cursor dimension not supported\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (obj->base.size < width * height * 4) {
|
stride = roundup_pow_of_two(width) * 4;
|
||||||
|
if (obj->base.size < stride * height) {
|
||||||
DRM_DEBUG_KMS("buffer is too small\n");
|
DRM_DEBUG_KMS("buffer is too small\n");
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto fail;
|
goto fail;
|
||||||
@ -8340,9 +8314,6 @@ static int intel_crtc_cursor_set_obj(struct drm_crtc *crtc,
|
|||||||
addr = obj->phys_handle->busaddr;
|
addr = obj->phys_handle->busaddr;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (IS_GEN2(dev))
|
|
||||||
I915_WRITE(CURSIZE, (height << 12) | width);
|
|
||||||
|
|
||||||
finish:
|
finish:
|
||||||
if (intel_crtc->cursor_bo) {
|
if (intel_crtc->cursor_bo) {
|
||||||
if (!INTEL_INFO(dev)->cursor_needs_physical)
|
if (!INTEL_INFO(dev)->cursor_needs_physical)
|
||||||
@ -9577,6 +9548,8 @@ static bool use_mmio_flip(struct intel_engine_cs *ring,
|
|||||||
return false;
|
return false;
|
||||||
else if (i915.use_mmio_flip > 0)
|
else if (i915.use_mmio_flip > 0)
|
||||||
return true;
|
return true;
|
||||||
|
else if (i915.enable_execlists)
|
||||||
|
return true;
|
||||||
else
|
else
|
||||||
return ring != obj->ring;
|
return ring != obj->ring;
|
||||||
}
|
}
|
||||||
@ -11380,7 +11353,6 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
|
|||||||
ret = intel_set_mode(set->crtc, set->mode,
|
ret = intel_set_mode(set->crtc, set->mode,
|
||||||
set->x, set->y, set->fb);
|
set->x, set->y, set->fb);
|
||||||
} else if (config->fb_changed) {
|
} else if (config->fb_changed) {
|
||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
|
||||||
struct intel_crtc *intel_crtc = to_intel_crtc(set->crtc);
|
struct intel_crtc *intel_crtc = to_intel_crtc(set->crtc);
|
||||||
|
|
||||||
intel_crtc_wait_for_pending_flips(set->crtc);
|
intel_crtc_wait_for_pending_flips(set->crtc);
|
||||||
@ -11394,8 +11366,7 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
|
|||||||
*/
|
*/
|
||||||
if (!intel_crtc->primary_enabled && ret == 0) {
|
if (!intel_crtc->primary_enabled && ret == 0) {
|
||||||
WARN_ON(!intel_crtc->active);
|
WARN_ON(!intel_crtc->active);
|
||||||
intel_enable_primary_hw_plane(dev_priv, intel_crtc->plane,
|
intel_enable_primary_hw_plane(set->crtc->primary, set->crtc);
|
||||||
intel_crtc->pipe);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -11548,8 +11519,6 @@ static int
|
|||||||
intel_primary_plane_disable(struct drm_plane *plane)
|
intel_primary_plane_disable(struct drm_plane *plane)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = plane->dev;
|
struct drm_device *dev = plane->dev;
|
||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
|
||||||
struct intel_plane *intel_plane = to_intel_plane(plane);
|
|
||||||
struct intel_crtc *intel_crtc;
|
struct intel_crtc *intel_crtc;
|
||||||
|
|
||||||
if (!plane->fb)
|
if (!plane->fb)
|
||||||
@ -11572,8 +11541,8 @@ intel_primary_plane_disable(struct drm_plane *plane)
|
|||||||
goto disable_unpin;
|
goto disable_unpin;
|
||||||
|
|
||||||
intel_crtc_wait_for_pending_flips(plane->crtc);
|
intel_crtc_wait_for_pending_flips(plane->crtc);
|
||||||
intel_disable_primary_hw_plane(dev_priv, intel_plane->plane,
|
intel_disable_primary_hw_plane(plane, plane->crtc);
|
||||||
intel_plane->pipe);
|
|
||||||
disable_unpin:
|
disable_unpin:
|
||||||
mutex_lock(&dev->struct_mutex);
|
mutex_lock(&dev->struct_mutex);
|
||||||
i915_gem_track_fb(intel_fb_obj(plane->fb), NULL,
|
i915_gem_track_fb(intel_fb_obj(plane->fb), NULL,
|
||||||
@ -11593,9 +11562,7 @@ intel_primary_plane_setplane(struct drm_plane *plane, struct drm_crtc *crtc,
|
|||||||
uint32_t src_w, uint32_t src_h)
|
uint32_t src_w, uint32_t src_h)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = crtc->dev;
|
struct drm_device *dev = crtc->dev;
|
||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
|
||||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||||
struct intel_plane *intel_plane = to_intel_plane(plane);
|
|
||||||
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
|
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
|
||||||
struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->fb);
|
struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->fb);
|
||||||
struct drm_rect dest = {
|
struct drm_rect dest = {
|
||||||
@ -11682,9 +11649,7 @@ intel_primary_plane_setplane(struct drm_plane *plane, struct drm_crtc *crtc,
|
|||||||
INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe));
|
INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe));
|
||||||
|
|
||||||
if (intel_crtc->primary_enabled)
|
if (intel_crtc->primary_enabled)
|
||||||
intel_disable_primary_hw_plane(dev_priv,
|
intel_disable_primary_hw_plane(plane, crtc);
|
||||||
intel_plane->plane,
|
|
||||||
intel_plane->pipe);
|
|
||||||
|
|
||||||
|
|
||||||
if (plane->fb != fb)
|
if (plane->fb != fb)
|
||||||
@ -11701,8 +11666,7 @@ intel_primary_plane_setplane(struct drm_plane *plane, struct drm_crtc *crtc,
|
|||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
if (!intel_crtc->primary_enabled)
|
if (!intel_crtc->primary_enabled)
|
||||||
intel_enable_primary_hw_plane(dev_priv, intel_crtc->plane,
|
intel_enable_primary_hw_plane(plane, crtc);
|
||||||
intel_crtc->pipe);
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -11811,6 +11775,10 @@ intel_cursor_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
|
|||||||
return intel_crtc_cursor_set_obj(crtc, obj, crtc_w, crtc_h);
|
return intel_crtc_cursor_set_obj(crtc, obj, crtc_w, crtc_h);
|
||||||
} else {
|
} else {
|
||||||
intel_crtc_update_cursor(crtc, visible);
|
intel_crtc_update_cursor(crtc, visible);
|
||||||
|
|
||||||
|
intel_frontbuffer_flip(crtc->dev,
|
||||||
|
INTEL_FRONTBUFFER_CURSOR(intel_crtc->pipe));
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -11887,6 +11855,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
|
|||||||
|
|
||||||
intel_crtc->cursor_base = ~0;
|
intel_crtc->cursor_base = ~0;
|
||||||
intel_crtc->cursor_cntl = ~0;
|
intel_crtc->cursor_cntl = ~0;
|
||||||
|
intel_crtc->cursor_size = ~0;
|
||||||
|
|
||||||
BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
|
BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
|
||||||
dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
|
dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
|
||||||
@ -12404,29 +12373,27 @@ static void intel_init_display(struct drm_device *dev)
|
|||||||
dev_priv->display.get_display_clock_speed =
|
dev_priv->display.get_display_clock_speed =
|
||||||
i830_get_display_clock_speed;
|
i830_get_display_clock_speed;
|
||||||
|
|
||||||
if (HAS_PCH_SPLIT(dev)) {
|
if (IS_G4X(dev)) {
|
||||||
if (IS_GEN5(dev)) {
|
|
||||||
dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
|
|
||||||
dev_priv->display.write_eld = ironlake_write_eld;
|
|
||||||
} else if (IS_GEN6(dev)) {
|
|
||||||
dev_priv->display.fdi_link_train = gen6_fdi_link_train;
|
|
||||||
dev_priv->display.write_eld = ironlake_write_eld;
|
|
||||||
dev_priv->display.modeset_global_resources =
|
|
||||||
snb_modeset_global_resources;
|
|
||||||
} else if (IS_IVYBRIDGE(dev)) {
|
|
||||||
/* FIXME: detect B0+ stepping and use auto training */
|
|
||||||
dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
|
|
||||||
dev_priv->display.write_eld = ironlake_write_eld;
|
|
||||||
dev_priv->display.modeset_global_resources =
|
|
||||||
ivb_modeset_global_resources;
|
|
||||||
} else if (IS_HASWELL(dev) || IS_GEN8(dev)) {
|
|
||||||
dev_priv->display.fdi_link_train = hsw_fdi_link_train;
|
|
||||||
dev_priv->display.write_eld = haswell_write_eld;
|
|
||||||
dev_priv->display.modeset_global_resources =
|
|
||||||
haswell_modeset_global_resources;
|
|
||||||
}
|
|
||||||
} else if (IS_G4X(dev)) {
|
|
||||||
dev_priv->display.write_eld = g4x_write_eld;
|
dev_priv->display.write_eld = g4x_write_eld;
|
||||||
|
} else if (IS_GEN5(dev)) {
|
||||||
|
dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
|
||||||
|
dev_priv->display.write_eld = ironlake_write_eld;
|
||||||
|
} else if (IS_GEN6(dev)) {
|
||||||
|
dev_priv->display.fdi_link_train = gen6_fdi_link_train;
|
||||||
|
dev_priv->display.write_eld = ironlake_write_eld;
|
||||||
|
dev_priv->display.modeset_global_resources =
|
||||||
|
snb_modeset_global_resources;
|
||||||
|
} else if (IS_IVYBRIDGE(dev)) {
|
||||||
|
/* FIXME: detect B0+ stepping and use auto training */
|
||||||
|
dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
|
||||||
|
dev_priv->display.write_eld = ironlake_write_eld;
|
||||||
|
dev_priv->display.modeset_global_resources =
|
||||||
|
ivb_modeset_global_resources;
|
||||||
|
} else if (IS_HASWELL(dev) || IS_GEN8(dev)) {
|
||||||
|
dev_priv->display.fdi_link_train = hsw_fdi_link_train;
|
||||||
|
dev_priv->display.write_eld = haswell_write_eld;
|
||||||
|
dev_priv->display.modeset_global_resources =
|
||||||
|
haswell_modeset_global_resources;
|
||||||
} else if (IS_VALLEYVIEW(dev)) {
|
} else if (IS_VALLEYVIEW(dev)) {
|
||||||
dev_priv->display.modeset_global_resources =
|
dev_priv->display.modeset_global_resources =
|
||||||
valleyview_modeset_global_resources;
|
valleyview_modeset_global_resources;
|
||||||
@ -12677,7 +12644,10 @@ void intel_modeset_init(struct drm_device *dev)
|
|||||||
dev->mode_config.max_height = 8192;
|
dev->mode_config.max_height = 8192;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (IS_GEN2(dev)) {
|
if (IS_845G(dev) || IS_I865G(dev)) {
|
||||||
|
dev->mode_config.cursor_width = IS_845G(dev) ? 64 : 512;
|
||||||
|
dev->mode_config.cursor_height = 1023;
|
||||||
|
} else if (IS_GEN2(dev)) {
|
||||||
dev->mode_config.cursor_width = GEN2_CURSOR_WIDTH;
|
dev->mode_config.cursor_width = GEN2_CURSOR_WIDTH;
|
||||||
dev->mode_config.cursor_height = GEN2_CURSOR_HEIGHT;
|
dev->mode_config.cursor_height = GEN2_CURSOR_HEIGHT;
|
||||||
} else {
|
} else {
|
||||||
|
@ -4059,7 +4059,8 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
|
|||||||
if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
|
if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
|
||||||
intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
|
intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
|
||||||
|
|
||||||
DRM_DEBUG_KMS("got hpd irq on port %d - %s\n", intel_dig_port->port,
|
DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
|
||||||
|
port_name(intel_dig_port->port),
|
||||||
long_hpd ? "long" : "short");
|
long_hpd ? "long" : "short");
|
||||||
|
|
||||||
power_domain = intel_display_port_power_domain(intel_encoder);
|
power_domain = intel_display_port_power_domain(intel_encoder);
|
||||||
|
@ -411,6 +411,7 @@ struct intel_crtc {
|
|||||||
uint32_t cursor_addr;
|
uint32_t cursor_addr;
|
||||||
int16_t cursor_width, cursor_height;
|
int16_t cursor_width, cursor_height;
|
||||||
uint32_t cursor_cntl;
|
uint32_t cursor_cntl;
|
||||||
|
uint32_t cursor_size;
|
||||||
uint32_t cursor_base;
|
uint32_t cursor_base;
|
||||||
|
|
||||||
struct intel_plane_config plane_config;
|
struct intel_plane_config plane_config;
|
||||||
@ -952,7 +953,7 @@ void intel_dvo_init(struct drm_device *dev);
|
|||||||
extern int intel_fbdev_init(struct drm_device *dev);
|
extern int intel_fbdev_init(struct drm_device *dev);
|
||||||
extern void intel_fbdev_initial_config(struct drm_device *dev);
|
extern void intel_fbdev_initial_config(struct drm_device *dev);
|
||||||
extern void intel_fbdev_fini(struct drm_device *dev);
|
extern void intel_fbdev_fini(struct drm_device *dev);
|
||||||
extern void intel_fbdev_set_suspend(struct drm_device *dev, int state);
|
extern void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous);
|
||||||
extern void intel_fbdev_output_poll_changed(struct drm_device *dev);
|
extern void intel_fbdev_output_poll_changed(struct drm_device *dev);
|
||||||
extern void intel_fbdev_restore_mode(struct drm_device *dev);
|
extern void intel_fbdev_restore_mode(struct drm_device *dev);
|
||||||
#else
|
#else
|
||||||
@ -969,7 +970,7 @@ static inline void intel_fbdev_fini(struct drm_device *dev)
|
|||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void intel_fbdev_set_suspend(struct drm_device *dev, int state)
|
static inline void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -26,6 +26,7 @@
|
|||||||
|
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
|
#include <linux/console.h>
|
||||||
#include <linux/errno.h>
|
#include <linux/errno.h>
|
||||||
#include <linux/string.h>
|
#include <linux/string.h>
|
||||||
#include <linux/mm.h>
|
#include <linux/mm.h>
|
||||||
@ -636,6 +637,15 @@ out:
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void intel_fbdev_suspend_worker(struct work_struct *work)
|
||||||
|
{
|
||||||
|
intel_fbdev_set_suspend(container_of(work,
|
||||||
|
struct drm_i915_private,
|
||||||
|
fbdev_suspend_work)->dev,
|
||||||
|
FBINFO_STATE_RUNNING,
|
||||||
|
true);
|
||||||
|
}
|
||||||
|
|
||||||
int intel_fbdev_init(struct drm_device *dev)
|
int intel_fbdev_init(struct drm_device *dev)
|
||||||
{
|
{
|
||||||
struct intel_fbdev *ifbdev;
|
struct intel_fbdev *ifbdev;
|
||||||
@ -662,6 +672,8 @@ int intel_fbdev_init(struct drm_device *dev)
|
|||||||
}
|
}
|
||||||
|
|
||||||
dev_priv->fbdev = ifbdev;
|
dev_priv->fbdev = ifbdev;
|
||||||
|
INIT_WORK(&dev_priv->fbdev_suspend_work, intel_fbdev_suspend_worker);
|
||||||
|
|
||||||
drm_fb_helper_single_add_all_connectors(&ifbdev->helper);
|
drm_fb_helper_single_add_all_connectors(&ifbdev->helper);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
@ -682,12 +694,14 @@ void intel_fbdev_fini(struct drm_device *dev)
|
|||||||
if (!dev_priv->fbdev)
|
if (!dev_priv->fbdev)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
flush_work(&dev_priv->fbdev_suspend_work);
|
||||||
|
|
||||||
intel_fbdev_destroy(dev, dev_priv->fbdev);
|
intel_fbdev_destroy(dev, dev_priv->fbdev);
|
||||||
kfree(dev_priv->fbdev);
|
kfree(dev_priv->fbdev);
|
||||||
dev_priv->fbdev = NULL;
|
dev_priv->fbdev = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
void intel_fbdev_set_suspend(struct drm_device *dev, int state)
|
void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||||
struct intel_fbdev *ifbdev = dev_priv->fbdev;
|
struct intel_fbdev *ifbdev = dev_priv->fbdev;
|
||||||
@ -698,6 +712,33 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state)
|
|||||||
|
|
||||||
info = ifbdev->helper.fbdev;
|
info = ifbdev->helper.fbdev;
|
||||||
|
|
||||||
|
if (synchronous) {
|
||||||
|
/* Flush any pending work to turn the console on, and then
|
||||||
|
* wait to turn it off. It must be synchronous as we are
|
||||||
|
* about to suspend or unload the driver.
|
||||||
|
*
|
||||||
|
* Note that from within the work-handler, we cannot flush
|
||||||
|
* ourselves, so only flush outstanding work upon suspend!
|
||||||
|
*/
|
||||||
|
if (state != FBINFO_STATE_RUNNING)
|
||||||
|
flush_work(&dev_priv->fbdev_suspend_work);
|
||||||
|
console_lock();
|
||||||
|
} else {
|
||||||
|
/*
|
||||||
|
* The console lock can be pretty contented on resume due
|
||||||
|
* to all the printk activity. Try to keep it out of the hot
|
||||||
|
* path of resume if possible.
|
||||||
|
*/
|
||||||
|
WARN_ON(state != FBINFO_STATE_RUNNING);
|
||||||
|
if (!console_trylock()) {
|
||||||
|
/* Don't block our own workqueue as this can
|
||||||
|
* be run in parallel with other i915.ko tasks.
|
||||||
|
*/
|
||||||
|
schedule_work(&dev_priv->fbdev_suspend_work);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/* On resume from hibernation: If the object is shmemfs backed, it has
|
/* On resume from hibernation: If the object is shmemfs backed, it has
|
||||||
* been restored from swap. If the object is stolen however, it will be
|
* been restored from swap. If the object is stolen however, it will be
|
||||||
* full of whatever garbage was left in there.
|
* full of whatever garbage was left in there.
|
||||||
@ -706,6 +747,7 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state)
|
|||||||
memset_io(info->screen_base, 0, info->screen_size);
|
memset_io(info->screen_base, 0, info->screen_size);
|
||||||
|
|
||||||
fb_set_suspend(info, state);
|
fb_set_suspend(info, state);
|
||||||
|
console_unlock();
|
||||||
}
|
}
|
||||||
|
|
||||||
void intel_fbdev_output_poll_changed(struct drm_device *dev)
|
void intel_fbdev_output_poll_changed(struct drm_device *dev)
|
||||||
|
1697
drivers/gpu/drm/i915/intel_lrc.c
Normal file
1697
drivers/gpu/drm/i915/intel_lrc.c
Normal file
File diff suppressed because it is too large
Load Diff
112
drivers/gpu/drm/i915/intel_lrc.h
Normal file
112
drivers/gpu/drm/i915/intel_lrc.h
Normal file
@ -0,0 +1,112 @@
|
|||||||
|
/*
|
||||||
|
* Copyright © 2014 Intel Corporation
|
||||||
|
*
|
||||||
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
|
* to deal in the Software without restriction, including without limitation
|
||||||
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||||
|
* and/or sell copies of the Software, and to permit persons to whom the
|
||||||
|
* Software is furnished to do so, subject to the following conditions:
|
||||||
|
*
|
||||||
|
* The above copyright notice and this permission notice (including the next
|
||||||
|
* paragraph) shall be included in all copies or substantial portions of the
|
||||||
|
* Software.
|
||||||
|
*
|
||||||
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||||
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||||
|
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||||
|
* DEALINGS IN THE SOFTWARE.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef _INTEL_LRC_H_
|
||||||
|
#define _INTEL_LRC_H_
|
||||||
|
|
||||||
|
/* Execlists regs */
|
||||||
|
#define RING_ELSP(ring) ((ring)->mmio_base+0x230)
|
||||||
|
#define RING_EXECLIST_STATUS(ring) ((ring)->mmio_base+0x234)
|
||||||
|
#define RING_CONTEXT_CONTROL(ring) ((ring)->mmio_base+0x244)
|
||||||
|
#define RING_CONTEXT_STATUS_BUF(ring) ((ring)->mmio_base+0x370)
|
||||||
|
#define RING_CONTEXT_STATUS_PTR(ring) ((ring)->mmio_base+0x3a0)
|
||||||
|
|
||||||
|
/* Logical Rings */
|
||||||
|
void intel_logical_ring_stop(struct intel_engine_cs *ring);
|
||||||
|
void intel_logical_ring_cleanup(struct intel_engine_cs *ring);
|
||||||
|
int intel_logical_rings_init(struct drm_device *dev);
|
||||||
|
|
||||||
|
int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf);
|
||||||
|
void intel_logical_ring_advance_and_submit(struct intel_ringbuffer *ringbuf);
|
||||||
|
/**
|
||||||
|
* intel_logical_ring_advance() - advance the ringbuffer tail
|
||||||
|
* @ringbuf: Ringbuffer to advance.
|
||||||
|
*
|
||||||
|
* The tail is only updated in our logical ringbuffer struct.
|
||||||
|
*/
|
||||||
|
static inline void intel_logical_ring_advance(struct intel_ringbuffer *ringbuf)
|
||||||
|
{
|
||||||
|
ringbuf->tail &= ringbuf->size - 1;
|
||||||
|
}
|
||||||
|
/**
|
||||||
|
* intel_logical_ring_emit() - write a DWORD to the ringbuffer.
|
||||||
|
* @ringbuf: Ringbuffer to write to.
|
||||||
|
* @data: DWORD to write.
|
||||||
|
*/
|
||||||
|
static inline void intel_logical_ring_emit(struct intel_ringbuffer *ringbuf,
|
||||||
|
u32 data)
|
||||||
|
{
|
||||||
|
iowrite32(data, ringbuf->virtual_start + ringbuf->tail);
|
||||||
|
ringbuf->tail += 4;
|
||||||
|
}
|
||||||
|
int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf, int num_dwords);
|
||||||
|
|
||||||
|
/* Logical Ring Contexts */
|
||||||
|
void intel_lr_context_free(struct intel_context *ctx);
|
||||||
|
int intel_lr_context_deferred_create(struct intel_context *ctx,
|
||||||
|
struct intel_engine_cs *ring);
|
||||||
|
|
||||||
|
/* Execlists */
|
||||||
|
int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists);
|
||||||
|
int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
|
||||||
|
struct intel_engine_cs *ring,
|
||||||
|
struct intel_context *ctx,
|
||||||
|
struct drm_i915_gem_execbuffer2 *args,
|
||||||
|
struct list_head *vmas,
|
||||||
|
struct drm_i915_gem_object *batch_obj,
|
||||||
|
u64 exec_start, u32 flags);
|
||||||
|
u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* struct intel_ctx_submit_request - queued context submission request
|
||||||
|
* @ctx: Context to submit to the ELSP.
|
||||||
|
* @ring: Engine to submit it to.
|
||||||
|
* @tail: how far in the context's ringbuffer this request goes to.
|
||||||
|
* @execlist_link: link in the submission queue.
|
||||||
|
* @work: workqueue for processing this request in a bottom half.
|
||||||
|
* @elsp_submitted: no. of times this request has been sent to the ELSP.
|
||||||
|
*
|
||||||
|
* The ELSP only accepts two elements at a time, so we queue context/tail
|
||||||
|
* pairs on a given queue (ring->execlist_queue) until the hardware is
|
||||||
|
* available. The queue serves a double purpose: we also use it to keep track
|
||||||
|
* of the up to 2 contexts currently in the hardware (usually one in execution
|
||||||
|
* and the other queued up by the GPU): We only remove elements from the head
|
||||||
|
* of the queue when the hardware informs us that an element has been
|
||||||
|
* completed.
|
||||||
|
*
|
||||||
|
* All accesses to the queue are mediated by a spinlock (ring->execlist_lock).
|
||||||
|
*/
|
||||||
|
struct intel_ctx_submit_request {
|
||||||
|
struct intel_context *ctx;
|
||||||
|
struct intel_engine_cs *ring;
|
||||||
|
u32 tail;
|
||||||
|
|
||||||
|
struct list_head execlist_link;
|
||||||
|
struct work_struct work;
|
||||||
|
|
||||||
|
int elsp_submitted;
|
||||||
|
};
|
||||||
|
|
||||||
|
void intel_execlists_handle_ctx_events(struct intel_engine_cs *ring);
|
||||||
|
|
||||||
|
#endif /* _INTEL_LRC_H_ */
|
@ -3719,7 +3719,6 @@ static void gen6_enable_rps(struct drm_device *dev)
|
|||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||||
struct intel_engine_cs *ring;
|
struct intel_engine_cs *ring;
|
||||||
u32 rp_state_cap;
|
u32 rp_state_cap;
|
||||||
u32 gt_perf_status;
|
|
||||||
u32 rc6vids, pcu_mbox = 0, rc6_mask = 0;
|
u32 rc6vids, pcu_mbox = 0, rc6_mask = 0;
|
||||||
u32 gtfifodbg;
|
u32 gtfifodbg;
|
||||||
int rc6_mode;
|
int rc6_mode;
|
||||||
@ -3744,7 +3743,6 @@ static void gen6_enable_rps(struct drm_device *dev)
|
|||||||
gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
|
gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
|
||||||
|
|
||||||
rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
|
rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
|
||||||
gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
|
|
||||||
|
|
||||||
parse_rp_state_cap(dev_priv, rp_state_cap);
|
parse_rp_state_cap(dev_priv, rp_state_cap);
|
||||||
|
|
||||||
|
@ -33,14 +33,24 @@
|
|||||||
#include "i915_trace.h"
|
#include "i915_trace.h"
|
||||||
#include "intel_drv.h"
|
#include "intel_drv.h"
|
||||||
|
|
||||||
/* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill,
|
bool
|
||||||
* but keeps the logic simple. Indeed, the whole purpose of this macro is just
|
intel_ring_initialized(struct intel_engine_cs *ring)
|
||||||
* to give some inclination as to some of the magic values used in the various
|
{
|
||||||
* workarounds!
|
struct drm_device *dev = ring->dev;
|
||||||
*/
|
|
||||||
#define CACHELINE_BYTES 64
|
|
||||||
|
|
||||||
static inline int __ring_space(int head, int tail, int size)
|
if (!dev)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
if (i915.enable_execlists) {
|
||||||
|
struct intel_context *dctx = ring->default_context;
|
||||||
|
struct intel_ringbuffer *ringbuf = dctx->engine[ring->id].ringbuf;
|
||||||
|
|
||||||
|
return ringbuf->obj;
|
||||||
|
} else
|
||||||
|
return ring->buffer && ring->buffer->obj;
|
||||||
|
}
|
||||||
|
|
||||||
|
int __intel_ring_space(int head, int tail, int size)
|
||||||
{
|
{
|
||||||
int space = head - (tail + I915_RING_FREE_SPACE);
|
int space = head - (tail + I915_RING_FREE_SPACE);
|
||||||
if (space < 0)
|
if (space < 0)
|
||||||
@ -48,12 +58,13 @@ static inline int __ring_space(int head, int tail, int size)
|
|||||||
return space;
|
return space;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int ring_space(struct intel_ringbuffer *ringbuf)
|
int intel_ring_space(struct intel_ringbuffer *ringbuf)
|
||||||
{
|
{
|
||||||
return __ring_space(ringbuf->head & HEAD_ADDR, ringbuf->tail, ringbuf->size);
|
return __intel_ring_space(ringbuf->head & HEAD_ADDR,
|
||||||
|
ringbuf->tail, ringbuf->size);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool intel_ring_stopped(struct intel_engine_cs *ring)
|
bool intel_ring_stopped(struct intel_engine_cs *ring)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv = ring->dev->dev_private;
|
struct drm_i915_private *dev_priv = ring->dev->dev_private;
|
||||||
return dev_priv->gpu_error.stop_rings & intel_ring_flag(ring);
|
return dev_priv->gpu_error.stop_rings & intel_ring_flag(ring);
|
||||||
@ -478,7 +489,12 @@ static bool stop_ring(struct intel_engine_cs *ring)
|
|||||||
I915_WRITE_MODE(ring, _MASKED_BIT_ENABLE(STOP_RING));
|
I915_WRITE_MODE(ring, _MASKED_BIT_ENABLE(STOP_RING));
|
||||||
if (wait_for((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000)) {
|
if (wait_for((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000)) {
|
||||||
DRM_ERROR("%s : timed out trying to stop ring\n", ring->name);
|
DRM_ERROR("%s : timed out trying to stop ring\n", ring->name);
|
||||||
return false;
|
/* Sometimes we observe that the idle flag is not
|
||||||
|
* set even though the ring is empty. So double
|
||||||
|
* check before giving up.
|
||||||
|
*/
|
||||||
|
if (I915_READ_HEAD(ring) != I915_READ_TAIL(ring))
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -563,7 +579,7 @@ static int init_ring_common(struct intel_engine_cs *ring)
|
|||||||
else {
|
else {
|
||||||
ringbuf->head = I915_READ_HEAD(ring);
|
ringbuf->head = I915_READ_HEAD(ring);
|
||||||
ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
|
ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
|
||||||
ringbuf->space = ring_space(ringbuf);
|
ringbuf->space = intel_ring_space(ringbuf);
|
||||||
ringbuf->last_retired_head = -1;
|
ringbuf->last_retired_head = -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -575,8 +591,25 @@ out:
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
void
|
||||||
init_pipe_control(struct intel_engine_cs *ring)
|
intel_fini_pipe_control(struct intel_engine_cs *ring)
|
||||||
|
{
|
||||||
|
struct drm_device *dev = ring->dev;
|
||||||
|
|
||||||
|
if (ring->scratch.obj == NULL)
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (INTEL_INFO(dev)->gen >= 5) {
|
||||||
|
kunmap(sg_page(ring->scratch.obj->pages->sgl));
|
||||||
|
i915_gem_object_ggtt_unpin(ring->scratch.obj);
|
||||||
|
}
|
||||||
|
|
||||||
|
drm_gem_object_unreference(&ring->scratch.obj->base);
|
||||||
|
ring->scratch.obj = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
int
|
||||||
|
intel_init_pipe_control(struct intel_engine_cs *ring)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
@ -651,7 +684,7 @@ static int init_render_ring(struct intel_engine_cs *ring)
|
|||||||
_MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
|
_MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
|
||||||
|
|
||||||
if (INTEL_INFO(dev)->gen >= 5) {
|
if (INTEL_INFO(dev)->gen >= 5) {
|
||||||
ret = init_pipe_control(ring);
|
ret = intel_init_pipe_control(ring);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@ -686,16 +719,7 @@ static void render_ring_cleanup(struct intel_engine_cs *ring)
|
|||||||
dev_priv->semaphore_obj = NULL;
|
dev_priv->semaphore_obj = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ring->scratch.obj == NULL)
|
intel_fini_pipe_control(ring);
|
||||||
return;
|
|
||||||
|
|
||||||
if (INTEL_INFO(dev)->gen >= 5) {
|
|
||||||
kunmap(sg_page(ring->scratch.obj->pages->sgl));
|
|
||||||
i915_gem_object_ggtt_unpin(ring->scratch.obj);
|
|
||||||
}
|
|
||||||
|
|
||||||
drm_gem_object_unreference(&ring->scratch.obj->base);
|
|
||||||
ring->scratch.obj = NULL;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int gen8_rcs_signal(struct intel_engine_cs *signaller,
|
static int gen8_rcs_signal(struct intel_engine_cs *signaller,
|
||||||
@ -1514,7 +1538,7 @@ static int init_phys_status_page(struct intel_engine_cs *ring)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
|
void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
|
||||||
{
|
{
|
||||||
if (!ringbuf->obj)
|
if (!ringbuf->obj)
|
||||||
return;
|
return;
|
||||||
@ -1525,8 +1549,8 @@ static void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
|
|||||||
ringbuf->obj = NULL;
|
ringbuf->obj = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int intel_alloc_ringbuffer_obj(struct drm_device *dev,
|
int intel_alloc_ringbuffer_obj(struct drm_device *dev,
|
||||||
struct intel_ringbuffer *ringbuf)
|
struct intel_ringbuffer *ringbuf)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||||
struct drm_i915_gem_object *obj;
|
struct drm_i915_gem_object *obj;
|
||||||
@ -1588,7 +1612,9 @@ static int intel_init_ring_buffer(struct drm_device *dev,
|
|||||||
ring->dev = dev;
|
ring->dev = dev;
|
||||||
INIT_LIST_HEAD(&ring->active_list);
|
INIT_LIST_HEAD(&ring->active_list);
|
||||||
INIT_LIST_HEAD(&ring->request_list);
|
INIT_LIST_HEAD(&ring->request_list);
|
||||||
|
INIT_LIST_HEAD(&ring->execlist_queue);
|
||||||
ringbuf->size = 32 * PAGE_SIZE;
|
ringbuf->size = 32 * PAGE_SIZE;
|
||||||
|
ringbuf->ring = ring;
|
||||||
memset(ring->semaphore.sync_seqno, 0, sizeof(ring->semaphore.sync_seqno));
|
memset(ring->semaphore.sync_seqno, 0, sizeof(ring->semaphore.sync_seqno));
|
||||||
|
|
||||||
init_waitqueue_head(&ring->irq_queue);
|
init_waitqueue_head(&ring->irq_queue);
|
||||||
@ -1671,13 +1697,14 @@ static int intel_ring_wait_request(struct intel_engine_cs *ring, int n)
|
|||||||
ringbuf->head = ringbuf->last_retired_head;
|
ringbuf->head = ringbuf->last_retired_head;
|
||||||
ringbuf->last_retired_head = -1;
|
ringbuf->last_retired_head = -1;
|
||||||
|
|
||||||
ringbuf->space = ring_space(ringbuf);
|
ringbuf->space = intel_ring_space(ringbuf);
|
||||||
if (ringbuf->space >= n)
|
if (ringbuf->space >= n)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
list_for_each_entry(request, &ring->request_list, list) {
|
list_for_each_entry(request, &ring->request_list, list) {
|
||||||
if (__ring_space(request->tail, ringbuf->tail, ringbuf->size) >= n) {
|
if (__intel_ring_space(request->tail, ringbuf->tail,
|
||||||
|
ringbuf->size) >= n) {
|
||||||
seqno = request->seqno;
|
seqno = request->seqno;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -1694,7 +1721,7 @@ static int intel_ring_wait_request(struct intel_engine_cs *ring, int n)
|
|||||||
ringbuf->head = ringbuf->last_retired_head;
|
ringbuf->head = ringbuf->last_retired_head;
|
||||||
ringbuf->last_retired_head = -1;
|
ringbuf->last_retired_head = -1;
|
||||||
|
|
||||||
ringbuf->space = ring_space(ringbuf);
|
ringbuf->space = intel_ring_space(ringbuf);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1723,7 +1750,7 @@ static int ring_wait_for_space(struct intel_engine_cs *ring, int n)
|
|||||||
trace_i915_ring_wait_begin(ring);
|
trace_i915_ring_wait_begin(ring);
|
||||||
do {
|
do {
|
||||||
ringbuf->head = I915_READ_HEAD(ring);
|
ringbuf->head = I915_READ_HEAD(ring);
|
||||||
ringbuf->space = ring_space(ringbuf);
|
ringbuf->space = intel_ring_space(ringbuf);
|
||||||
if (ringbuf->space >= n) {
|
if (ringbuf->space >= n) {
|
||||||
ret = 0;
|
ret = 0;
|
||||||
break;
|
break;
|
||||||
@ -1775,7 +1802,7 @@ static int intel_wrap_ring_buffer(struct intel_engine_cs *ring)
|
|||||||
iowrite32(MI_NOOP, virt++);
|
iowrite32(MI_NOOP, virt++);
|
||||||
|
|
||||||
ringbuf->tail = 0;
|
ringbuf->tail = 0;
|
||||||
ringbuf->space = ring_space(ringbuf);
|
ringbuf->space = intel_ring_space(ringbuf);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -1980,9 +2007,7 @@ gen8_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
|
|||||||
u64 offset, u32 len,
|
u64 offset, u32 len,
|
||||||
unsigned flags)
|
unsigned flags)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv = ring->dev->dev_private;
|
bool ppgtt = USES_PPGTT(ring->dev) && !(flags & I915_DISPATCH_SECURE);
|
||||||
bool ppgtt = dev_priv->mm.aliasing_ppgtt != NULL &&
|
|
||||||
!(flags & I915_DISPATCH_SECURE);
|
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = intel_ring_begin(ring, 4);
|
ret = intel_ring_begin(ring, 4);
|
||||||
|
@ -5,6 +5,13 @@
|
|||||||
|
|
||||||
#define I915_CMD_HASH_ORDER 9
|
#define I915_CMD_HASH_ORDER 9
|
||||||
|
|
||||||
|
/* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill,
|
||||||
|
* but keeps the logic simple. Indeed, the whole purpose of this macro is just
|
||||||
|
* to give some inclination as to some of the magic values used in the various
|
||||||
|
* workarounds!
|
||||||
|
*/
|
||||||
|
#define CACHELINE_BYTES 64
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use"
|
* Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use"
|
||||||
* Gen3 BSpec "vol1c Memory Interface Functions" / 2.3.4.5 "Ring Buffer Use"
|
* Gen3 BSpec "vol1c Memory Interface Functions" / 2.3.4.5 "Ring Buffer Use"
|
||||||
@ -90,6 +97,15 @@ struct intel_ringbuffer {
|
|||||||
struct drm_i915_gem_object *obj;
|
struct drm_i915_gem_object *obj;
|
||||||
void __iomem *virtual_start;
|
void __iomem *virtual_start;
|
||||||
|
|
||||||
|
struct intel_engine_cs *ring;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* FIXME: This backpointer is an artifact of the history of how the
|
||||||
|
* execlist patches came into being. It will get removed once the basic
|
||||||
|
* code has landed.
|
||||||
|
*/
|
||||||
|
struct intel_context *FIXME_lrc_ctx;
|
||||||
|
|
||||||
u32 head;
|
u32 head;
|
||||||
u32 tail;
|
u32 tail;
|
||||||
int space;
|
int space;
|
||||||
@ -214,6 +230,18 @@ struct intel_engine_cs {
|
|||||||
unsigned int num_dwords);
|
unsigned int num_dwords);
|
||||||
} semaphore;
|
} semaphore;
|
||||||
|
|
||||||
|
/* Execlists */
|
||||||
|
spinlock_t execlist_lock;
|
||||||
|
struct list_head execlist_queue;
|
||||||
|
u8 next_context_status_buffer;
|
||||||
|
u32 irq_keep_mask; /* bitmask for interrupts that should not be masked */
|
||||||
|
int (*emit_request)(struct intel_ringbuffer *ringbuf);
|
||||||
|
int (*emit_flush)(struct intel_ringbuffer *ringbuf,
|
||||||
|
u32 invalidate_domains,
|
||||||
|
u32 flush_domains);
|
||||||
|
int (*emit_bb_start)(struct intel_ringbuffer *ringbuf,
|
||||||
|
u64 offset, unsigned flags);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* List of objects currently involved in rendering from the
|
* List of objects currently involved in rendering from the
|
||||||
* ringbuffer.
|
* ringbuffer.
|
||||||
@ -287,11 +315,7 @@ struct intel_engine_cs {
|
|||||||
u32 (*get_cmd_length_mask)(u32 cmd_header);
|
u32 (*get_cmd_length_mask)(u32 cmd_header);
|
||||||
};
|
};
|
||||||
|
|
||||||
static inline bool
|
bool intel_ring_initialized(struct intel_engine_cs *ring);
|
||||||
intel_ring_initialized(struct intel_engine_cs *ring)
|
|
||||||
{
|
|
||||||
return ring->buffer && ring->buffer->obj;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline unsigned
|
static inline unsigned
|
||||||
intel_ring_flag(struct intel_engine_cs *ring)
|
intel_ring_flag(struct intel_engine_cs *ring)
|
||||||
@ -355,6 +379,10 @@ intel_write_status_page(struct intel_engine_cs *ring,
|
|||||||
#define I915_GEM_HWS_SCRATCH_INDEX 0x30
|
#define I915_GEM_HWS_SCRATCH_INDEX 0x30
|
||||||
#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
|
#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
|
||||||
|
|
||||||
|
void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
|
||||||
|
int intel_alloc_ringbuffer_obj(struct drm_device *dev,
|
||||||
|
struct intel_ringbuffer *ringbuf);
|
||||||
|
|
||||||
void intel_stop_ring_buffer(struct intel_engine_cs *ring);
|
void intel_stop_ring_buffer(struct intel_engine_cs *ring);
|
||||||
void intel_cleanup_ring_buffer(struct intel_engine_cs *ring);
|
void intel_cleanup_ring_buffer(struct intel_engine_cs *ring);
|
||||||
|
|
||||||
@ -372,6 +400,9 @@ static inline void intel_ring_advance(struct intel_engine_cs *ring)
|
|||||||
struct intel_ringbuffer *ringbuf = ring->buffer;
|
struct intel_ringbuffer *ringbuf = ring->buffer;
|
||||||
ringbuf->tail &= ringbuf->size - 1;
|
ringbuf->tail &= ringbuf->size - 1;
|
||||||
}
|
}
|
||||||
|
int __intel_ring_space(int head, int tail, int size);
|
||||||
|
int intel_ring_space(struct intel_ringbuffer *ringbuf);
|
||||||
|
bool intel_ring_stopped(struct intel_engine_cs *ring);
|
||||||
void __intel_ring_advance(struct intel_engine_cs *ring);
|
void __intel_ring_advance(struct intel_engine_cs *ring);
|
||||||
|
|
||||||
int __must_check intel_ring_idle(struct intel_engine_cs *ring);
|
int __must_check intel_ring_idle(struct intel_engine_cs *ring);
|
||||||
@ -379,6 +410,9 @@ void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno);
|
|||||||
int intel_ring_flush_all_caches(struct intel_engine_cs *ring);
|
int intel_ring_flush_all_caches(struct intel_engine_cs *ring);
|
||||||
int intel_ring_invalidate_all_caches(struct intel_engine_cs *ring);
|
int intel_ring_invalidate_all_caches(struct intel_engine_cs *ring);
|
||||||
|
|
||||||
|
void intel_fini_pipe_control(struct intel_engine_cs *ring);
|
||||||
|
int intel_init_pipe_control(struct intel_engine_cs *ring);
|
||||||
|
|
||||||
int intel_init_render_ring_buffer(struct drm_device *dev);
|
int intel_init_render_ring_buffer(struct drm_device *dev);
|
||||||
int intel_init_bsd_ring_buffer(struct drm_device *dev);
|
int intel_init_bsd_ring_buffer(struct drm_device *dev);
|
||||||
int intel_init_bsd2_ring_buffer(struct drm_device *dev);
|
int intel_init_bsd2_ring_buffer(struct drm_device *dev);
|
||||||
|
@ -1127,6 +1127,9 @@ extern int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data,
|
|||||||
struct drm_file *file_priv);
|
struct drm_file *file_priv);
|
||||||
extern int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
|
extern int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
|
||||||
struct drm_file *file_priv);
|
struct drm_file *file_priv);
|
||||||
|
extern int drm_mode_plane_set_obj_prop(struct drm_plane *plane,
|
||||||
|
struct drm_property *property,
|
||||||
|
uint64_t value);
|
||||||
|
|
||||||
extern void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth,
|
extern void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth,
|
||||||
int *bpp);
|
int *bpp);
|
||||||
|
Loading…
Reference in New Issue
Block a user