forked from Minki/linux
Merge tag 'drm-intel-next-2015-02-14' of git://anongit.freedesktop.org/drm-intel into drm-next
- use the atomic helpers for plane_upate/disable hooks (Matt Roper) - refactor the initial plane config code (Damien) - ppgtt prep patches for dynamic pagetable alloc (Ben Widawsky, reworked and rebased by a lot of other people) - framebuffer modifier support from Tvrtko Ursulin, drm core code from Rob Clark - piles of workaround patches for skl from Damien and Nick Hoath - vGPU support for xengt on the client side (Yu Zhang) - and the usual smaller things all over * tag 'drm-intel-next-2015-02-14' of git://anongit.freedesktop.org/drm-intel: (88 commits) drm/i915: Update DRIVER_DATE to 20150214 drm/i915: Remove references to previously removed UMS config option drm/i915/skl: Use a LRI for WaDisableDgMirrorFixInHalfSliceChicken5 drm/i915/skl: Fix always true comparison in a revision id check drm/i915/skl: Implement WaEnableLbsSlaRetryTimerDecrement drm/i915/skl: Implement WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken drm/i915: Add process identifier to requests drm/i915/skl: Implement WaBarrierPerformanceFixDisable drm/i915/skl: Implement WaCcsTlbPrefetchDisable:skl drm/i915/skl: Implement WaDisableChickenBitTSGBarrierAckForFFSliceCS drm/i915/skl: Implement WaDisableHDCInvalidation drm/i915/skl: Implement WaDisableLSQCROPERFforOCL drm/i915/skl: Implement WaDisablePartialResolveInVc drm/i915/skl: Introduce a SKL specific init_workarounds() drm/i915/skl: Document that we implement WaRsClearFWBitsAtReset drm/i915/skl: Implement WaSetGAPSunitClckGateDisable drm/i915/skl: Make the init clock gating function skylake specific drm/i915/skl: Provide a gen9 specific init_render_ring() drm/i915/skl: Document the WM read latency W/A with its name drm/i915/skl: Also detect eDRAM on SKL ...
This commit is contained in:
commit
7547af9186
@ -3979,6 +3979,11 @@ int num_ioctls;</synopsis>
|
||||
!Fdrivers/gpu/drm/i915/i915_irq.c intel_runtime_pm_disable_interrupts
|
||||
!Fdrivers/gpu/drm/i915/i915_irq.c intel_runtime_pm_enable_interrupts
|
||||
</sect2>
|
||||
<sect2>
|
||||
<title>Intel GVT-g Guest Support(vGPU)</title>
|
||||
!Pdrivers/gpu/drm/i915/i915_vgpu.c Intel GVT-g guest support
|
||||
!Idrivers/gpu/drm/i915/i915_vgpu.c
|
||||
</sect2>
|
||||
</sect1>
|
||||
<sect1>
|
||||
<title>Display Hardware Handling</title>
|
||||
|
@ -3272,6 +3272,12 @@ static int framebuffer_check(const struct drm_mode_fb_cmd2 *r)
|
||||
DRM_DEBUG_KMS("bad pitch %u for plane %d\n", r->pitches[i], i);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (r->modifier[i] && !(r->flags & DRM_MODE_FB_MODIFIERS)) {
|
||||
DRM_DEBUG_KMS("bad fb modifier %llu for plane %d\n",
|
||||
r->modifier[i], i);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -3285,7 +3291,7 @@ static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev,
|
||||
struct drm_framebuffer *fb;
|
||||
int ret;
|
||||
|
||||
if (r->flags & ~DRM_MODE_FB_INTERLACED) {
|
||||
if (r->flags & ~(DRM_MODE_FB_INTERLACED | DRM_MODE_FB_MODIFIERS)) {
|
||||
DRM_DEBUG_KMS("bad framebuffer flags 0x%08x\n", r->flags);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
@ -3301,6 +3307,12 @@ static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev,
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
if (r->flags & DRM_MODE_FB_MODIFIERS &&
|
||||
!dev->mode_config.allow_fb_modifiers) {
|
||||
DRM_DEBUG_KMS("driver does not support fb modifiers\n");
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
ret = framebuffer_check(r);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
@ -837,6 +837,7 @@ void drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
|
||||
for (i = 0; i < 4; i++) {
|
||||
fb->pitches[i] = mode_cmd->pitches[i];
|
||||
fb->offsets[i] = mode_cmd->offsets[i];
|
||||
fb->modifier[i] = mode_cmd->modifier[i];
|
||||
}
|
||||
drm_fb_get_bpp_depth(mode_cmd->pixel_format, &fb->depth,
|
||||
&fb->bits_per_pixel);
|
||||
|
@ -321,6 +321,9 @@ static int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_
|
||||
else
|
||||
req->value = 64;
|
||||
break;
|
||||
case DRM_CAP_ADDFB2_MODIFIERS:
|
||||
req->value = dev->mode_config.allow_fb_modifiers;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -83,6 +83,9 @@ i915-y += dvo_ch7017.o \
|
||||
intel_sdvo.o \
|
||||
intel_tv.o
|
||||
|
||||
# virtual gpu code
|
||||
i915-y += i915_vgpu.o
|
||||
|
||||
# legacy horrors
|
||||
i915-y += i915_dma.o \
|
||||
i915_ums.o
|
||||
|
@ -1778,11 +1778,12 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
|
||||
ifbdev = dev_priv->fbdev;
|
||||
fb = to_intel_framebuffer(ifbdev->helper.fb);
|
||||
|
||||
seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, refcount %d, obj ",
|
||||
seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
|
||||
fb->base.width,
|
||||
fb->base.height,
|
||||
fb->base.depth,
|
||||
fb->base.bits_per_pixel,
|
||||
fb->base.modifier[0],
|
||||
atomic_read(&fb->base.refcount.refcount));
|
||||
describe_obj(m, fb->obj);
|
||||
seq_putc(m, '\n');
|
||||
@ -1793,11 +1794,12 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
|
||||
if (ifbdev && &fb->base == ifbdev->helper.fb)
|
||||
continue;
|
||||
|
||||
seq_printf(m, "user size: %d x %d, depth %d, %d bpp, refcount %d, obj ",
|
||||
seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
|
||||
fb->base.width,
|
||||
fb->base.height,
|
||||
fb->base.depth,
|
||||
fb->base.bits_per_pixel,
|
||||
fb->base.modifier[0],
|
||||
atomic_read(&fb->base.refcount.refcount));
|
||||
describe_obj(m, fb->obj);
|
||||
seq_putc(m, '\n');
|
||||
@ -4226,10 +4228,7 @@ i915_max_freq_set(void *data, u64 val)
|
||||
|
||||
dev_priv->rps.max_freq_softlimit = val;
|
||||
|
||||
if (IS_VALLEYVIEW(dev))
|
||||
valleyview_set_rps(dev, val);
|
||||
else
|
||||
gen6_set_rps(dev, val);
|
||||
intel_set_rps(dev, val);
|
||||
|
||||
mutex_unlock(&dev_priv->rps.hw_lock);
|
||||
|
||||
@ -4304,10 +4303,7 @@ i915_min_freq_set(void *data, u64 val)
|
||||
|
||||
dev_priv->rps.min_freq_softlimit = val;
|
||||
|
||||
if (IS_VALLEYVIEW(dev))
|
||||
valleyview_set_rps(dev, val);
|
||||
else
|
||||
gen6_set_rps(dev, val);
|
||||
intel_set_rps(dev, val);
|
||||
|
||||
mutex_unlock(&dev_priv->rps.hw_lock);
|
||||
|
||||
|
@ -36,6 +36,7 @@
|
||||
#include "intel_drv.h"
|
||||
#include <drm/i915_drm.h>
|
||||
#include "i915_drv.h"
|
||||
#include "i915_vgpu.h"
|
||||
#include "i915_trace.h"
|
||||
#include <linux/pci.h>
|
||||
#include <linux/console.h>
|
||||
@ -842,6 +843,13 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Notify a valid surface after modesetting,
|
||||
* when running inside a VM.
|
||||
*/
|
||||
if (intel_vgpu_active(dev))
|
||||
I915_WRITE(vgtif_reg(display_ready), VGT_DRV_DISPLAY_READY);
|
||||
|
||||
i915_setup_sysfs(dev);
|
||||
|
||||
if (INTEL_INFO(dev)->num_pipes) {
|
||||
|
@ -369,6 +369,19 @@ static const struct intel_device_info intel_skylake_info = {
|
||||
IVB_CURSOR_OFFSETS,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_skylake_gt3_info = {
|
||||
.is_preliminary = 1,
|
||||
.is_skylake = 1,
|
||||
.gen = 9, .num_pipes = 3,
|
||||
.need_gfx_hws = 1, .has_hotplug = 1,
|
||||
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
|
||||
.has_llc = 1,
|
||||
.has_ddi = 1,
|
||||
.has_fbc = 1,
|
||||
GEN_DEFAULT_PIPEOFFSETS,
|
||||
IVB_CURSOR_OFFSETS,
|
||||
};
|
||||
|
||||
/*
|
||||
* Make sure any device matches here are from most specific to most
|
||||
* general. For example, since the Quanta match is based on the subsystem
|
||||
@ -406,7 +419,9 @@ static const struct intel_device_info intel_skylake_info = {
|
||||
INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info), \
|
||||
INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info), \
|
||||
INTEL_CHV_IDS(&intel_cherryview_info), \
|
||||
INTEL_SKL_IDS(&intel_skylake_info)
|
||||
INTEL_SKL_GT1_IDS(&intel_skylake_info), \
|
||||
INTEL_SKL_GT2_IDS(&intel_skylake_info), \
|
||||
INTEL_SKL_GT3_IDS(&intel_skylake_gt3_info) \
|
||||
|
||||
static const struct pci_device_id pciidlist[] = { /* aka */
|
||||
INTEL_PCI_IDS,
|
||||
@ -1630,11 +1645,9 @@ static int __init i915_init(void)
|
||||
|
||||
if (!(driver.driver_features & DRIVER_MODESET)) {
|
||||
driver.get_vblank_timestamp = NULL;
|
||||
#ifndef CONFIG_DRM_I915_UMS
|
||||
/* Silently fail loading to not upset userspace. */
|
||||
DRM_DEBUG_DRIVER("KMS and UMS disabled.\n");
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1650,10 +1663,8 @@ static int __init i915_init(void)
|
||||
|
||||
static void __exit i915_exit(void)
|
||||
{
|
||||
#ifndef CONFIG_DRM_I915_UMS
|
||||
if (!(driver.driver_features & DRIVER_MODESET))
|
||||
return; /* Never loaded a driver. */
|
||||
#endif
|
||||
|
||||
drm_pci_exit(&driver, &i915_pci_driver);
|
||||
}
|
||||
|
@ -31,6 +31,7 @@
|
||||
#define _I915_DRV_H_
|
||||
|
||||
#include <uapi/drm/i915_drm.h>
|
||||
#include <uapi/drm/drm_fourcc.h>
|
||||
|
||||
#include "i915_reg.h"
|
||||
#include "intel_bios.h"
|
||||
@ -55,7 +56,7 @@
|
||||
|
||||
#define DRIVER_NAME "i915"
|
||||
#define DRIVER_DESC "Intel Graphics"
|
||||
#define DRIVER_DATE "20150130"
|
||||
#define DRIVER_DATE "20150214"
|
||||
|
||||
#undef WARN_ON
|
||||
/* Many gcc seem to no see through this and fall over :( */
|
||||
@ -772,10 +773,10 @@ struct intel_context {
|
||||
};
|
||||
|
||||
struct i915_fbc {
|
||||
unsigned long size;
|
||||
unsigned long uncompressed_size;
|
||||
unsigned threshold;
|
||||
unsigned int fb_id;
|
||||
enum plane plane;
|
||||
struct intel_crtc *crtc;
|
||||
int y;
|
||||
|
||||
struct drm_mm_node compressed_fb;
|
||||
@ -1640,6 +1641,10 @@ struct i915_workarounds {
|
||||
u32 count;
|
||||
};
|
||||
|
||||
struct i915_virtual_gpu {
|
||||
bool active;
|
||||
};
|
||||
|
||||
struct drm_i915_private {
|
||||
struct drm_device *dev;
|
||||
struct kmem_cache *slab;
|
||||
@ -1652,6 +1657,8 @@ struct drm_i915_private {
|
||||
|
||||
struct intel_uncore uncore;
|
||||
|
||||
struct i915_virtual_gpu vgpu;
|
||||
|
||||
struct intel_gmbus gmbus[GMBUS_NUM_PORTS];
|
||||
|
||||
|
||||
@ -2153,6 +2160,9 @@ struct drm_i915_gem_request {
|
||||
/** file_priv list entry for this request */
|
||||
struct list_head client_list;
|
||||
|
||||
/** process identifier submitting this request */
|
||||
struct pid *pid;
|
||||
|
||||
uint32_t uniq;
|
||||
|
||||
/**
|
||||
@ -2339,6 +2349,7 @@ struct drm_i915_cmd_table {
|
||||
})
|
||||
#define INTEL_INFO(p) (&__I915__(p)->info)
|
||||
#define INTEL_DEVID(p) (INTEL_INFO(p)->device_id)
|
||||
#define INTEL_REVID(p) (__I915__(p)->dev->pdev->revision)
|
||||
|
||||
#define IS_I830(dev) (INTEL_DEVID(dev) == 0x3577)
|
||||
#define IS_845G(dev) (INTEL_DEVID(dev) == 0x2562)
|
||||
@ -2361,9 +2372,6 @@ struct drm_i915_cmd_table {
|
||||
#define IS_IVB_GT1(dev) (INTEL_DEVID(dev) == 0x0156 || \
|
||||
INTEL_DEVID(dev) == 0x0152 || \
|
||||
INTEL_DEVID(dev) == 0x015a)
|
||||
#define IS_SNB_GT1(dev) (INTEL_DEVID(dev) == 0x0102 || \
|
||||
INTEL_DEVID(dev) == 0x0106 || \
|
||||
INTEL_DEVID(dev) == 0x010A)
|
||||
#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview)
|
||||
#define IS_CHERRYVIEW(dev) (INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev))
|
||||
#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
|
||||
@ -2386,6 +2394,12 @@ struct drm_i915_cmd_table {
|
||||
INTEL_DEVID(dev) == 0x0A1E)
|
||||
#define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary)
|
||||
|
||||
#define SKL_REVID_A0 (0x0)
|
||||
#define SKL_REVID_B0 (0x1)
|
||||
#define SKL_REVID_C0 (0x2)
|
||||
#define SKL_REVID_D0 (0x3)
|
||||
#define SKL_REVID_E0 (0x4)
|
||||
|
||||
/*
|
||||
* The genX designation typically refers to the render engine, so render
|
||||
* capability related checks should use IS_GEN, while display and other checks
|
||||
@ -2493,8 +2507,6 @@ extern int i915_max_ioctl;
|
||||
|
||||
extern int i915_suspend_legacy(struct drm_device *dev, pm_message_t state);
|
||||
extern int i915_resume_legacy(struct drm_device *dev);
|
||||
extern int i915_master_create(struct drm_device *dev, struct drm_master *master);
|
||||
extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master);
|
||||
|
||||
/* i915_params.c */
|
||||
struct i915_params {
|
||||
@ -2577,6 +2589,10 @@ void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
|
||||
void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
|
||||
enum forcewake_domains domains);
|
||||
void assert_forcewakes_inactive(struct drm_i915_private *dev_priv);
|
||||
static inline bool intel_vgpu_active(struct drm_device *dev)
|
||||
{
|
||||
return to_i915(dev)->vgpu.active;
|
||||
}
|
||||
|
||||
void
|
||||
i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
|
||||
@ -3182,8 +3198,7 @@ extern void i915_redisable_vga(struct drm_device *dev);
|
||||
extern void i915_redisable_vga_power_on(struct drm_device *dev);
|
||||
extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
|
||||
extern void intel_init_pch_refclk(struct drm_device *dev);
|
||||
extern void gen6_set_rps(struct drm_device *dev, u8 val);
|
||||
extern void valleyview_set_rps(struct drm_device *dev, u8 val);
|
||||
extern void intel_set_rps(struct drm_device *dev, u8 val);
|
||||
extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv,
|
||||
bool enable);
|
||||
extern void intel_detect_pch(struct drm_device *dev);
|
||||
@ -3196,8 +3211,6 @@ int i915_reg_read_ioctl(struct drm_device *dev, void *data,
|
||||
int i915_get_reset_stats_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file);
|
||||
|
||||
void intel_notify_mmio_flip(struct intel_engine_cs *ring);
|
||||
|
||||
/* overlay */
|
||||
extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
|
||||
extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e,
|
||||
|
@ -29,6 +29,7 @@
|
||||
#include <drm/drm_vma_manager.h>
|
||||
#include <drm/i915_drm.h>
|
||||
#include "i915_drv.h"
|
||||
#include "i915_vgpu.h"
|
||||
#include "i915_trace.h"
|
||||
#include "intel_drv.h"
|
||||
#include <linux/oom.h>
|
||||
@ -2492,6 +2493,8 @@ int __i915_add_request(struct intel_engine_cs *ring,
|
||||
list_add_tail(&request->client_list,
|
||||
&file_priv->mm.request_list);
|
||||
spin_unlock(&file_priv->mm.lock);
|
||||
|
||||
request->pid = get_pid(task_pid(current));
|
||||
}
|
||||
|
||||
trace_i915_gem_request_add(request);
|
||||
@ -2572,6 +2575,8 @@ static void i915_gem_free_request(struct drm_i915_gem_request *request)
|
||||
list_del(&request->list);
|
||||
i915_gem_request_remove_from_client(request);
|
||||
|
||||
put_pid(request->pid);
|
||||
|
||||
i915_gem_request_unreference(request);
|
||||
}
|
||||
|
||||
@ -4987,6 +4992,10 @@ i915_gem_load(struct drm_device *dev)
|
||||
else
|
||||
dev_priv->num_fence_regs = 8;
|
||||
|
||||
if (intel_vgpu_active(dev))
|
||||
dev_priv->num_fence_regs =
|
||||
I915_READ(vgtif_reg(avail_rs.fence_num));
|
||||
|
||||
/* Initialize fence registers to zero */
|
||||
INIT_LIST_HEAD(&dev_priv->mm.fence_list);
|
||||
i915_gem_restore_fences(dev);
|
||||
|
@ -27,6 +27,7 @@
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/i915_drm.h>
|
||||
#include "i915_drv.h"
|
||||
#include "i915_vgpu.h"
|
||||
#include "i915_trace.h"
|
||||
#include "intel_drv.h"
|
||||
|
||||
@ -103,6 +104,9 @@ static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
|
||||
has_aliasing_ppgtt = INTEL_INFO(dev)->gen >= 6;
|
||||
has_full_ppgtt = INTEL_INFO(dev)->gen >= 7;
|
||||
|
||||
if (intel_vgpu_active(dev))
|
||||
has_full_ppgtt = false; /* emulation is too hard */
|
||||
|
||||
/*
|
||||
* We don't allow disabling PPGTT for gen9+ as it's a requirement for
|
||||
* execlists, the sole mechanism available to submit work.
|
||||
@ -375,7 +379,7 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
|
||||
pt_vaddr = NULL;
|
||||
|
||||
for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
|
||||
if (WARN_ON(pdpe >= GEN8_LEGACY_PDPS))
|
||||
if (WARN_ON(pdpe >= GEN8_LEGACY_PDPES))
|
||||
break;
|
||||
|
||||
if (pt_vaddr == NULL)
|
||||
@ -486,7 +490,7 @@ bail:
|
||||
static int gen8_ppgtt_allocate_page_tables(struct i915_hw_ppgtt *ppgtt,
|
||||
const int max_pdp)
|
||||
{
|
||||
struct page **pt_pages[GEN8_LEGACY_PDPS];
|
||||
struct page **pt_pages[GEN8_LEGACY_PDPES];
|
||||
int i, ret;
|
||||
|
||||
for (i = 0; i < max_pdp; i++) {
|
||||
@ -537,7 +541,7 @@ static int gen8_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt,
|
||||
return -ENOMEM;
|
||||
|
||||
ppgtt->num_pd_pages = 1 << get_order(max_pdp << PAGE_SHIFT);
|
||||
BUG_ON(ppgtt->num_pd_pages > GEN8_LEGACY_PDPS);
|
||||
BUG_ON(ppgtt->num_pd_pages > GEN8_LEGACY_PDPES);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -797,6 +801,16 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vgpu_mm_switch(struct i915_hw_ppgtt *ppgtt,
|
||||
struct intel_engine_cs *ring)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev);
|
||||
|
||||
I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
|
||||
I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt));
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
|
||||
struct intel_engine_cs *ring)
|
||||
{
|
||||
@ -1032,11 +1046,14 @@ alloc:
|
||||
goto alloc;
|
||||
}
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (ppgtt->node.start < dev_priv->gtt.mappable_end)
|
||||
DRM_DEBUG("Forced to use aperture for PDEs\n");
|
||||
|
||||
ppgtt->num_pd_entries = GEN6_PPGTT_PD_ENTRIES;
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gen6_ppgtt_allocate_page_tables(struct i915_hw_ppgtt *ppgtt)
|
||||
@ -1123,6 +1140,9 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
|
||||
} else
|
||||
BUG();
|
||||
|
||||
if (intel_vgpu_active(dev))
|
||||
ppgtt->switch_mm = vgpu_mm_switch;
|
||||
|
||||
ret = gen6_ppgtt_alloc(ppgtt);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -1753,6 +1773,16 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev,
|
||||
|
||||
/* Subtract the guard page ... */
|
||||
drm_mm_init(&ggtt_vm->mm, start, end - start - PAGE_SIZE);
|
||||
|
||||
dev_priv->gtt.base.start = start;
|
||||
dev_priv->gtt.base.total = end - start;
|
||||
|
||||
if (intel_vgpu_active(dev)) {
|
||||
ret = intel_vgt_balloon(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (!HAS_LLC(dev))
|
||||
dev_priv->gtt.base.mm.color_adjust = i915_gtt_color_adjust;
|
||||
|
||||
@ -1772,9 +1802,6 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev,
|
||||
vma->bound |= GLOBAL_BIND;
|
||||
}
|
||||
|
||||
dev_priv->gtt.base.start = start;
|
||||
dev_priv->gtt.base.total = end - start;
|
||||
|
||||
/* Clear any non-preallocated blocks */
|
||||
drm_mm_for_each_hole(entry, &ggtt_vm->mm, hole_start, hole_end) {
|
||||
DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
|
||||
@ -1826,6 +1853,9 @@ void i915_global_gtt_cleanup(struct drm_device *dev)
|
||||
}
|
||||
|
||||
if (drm_mm_initialized(&vm->mm)) {
|
||||
if (intel_vgpu_active(dev))
|
||||
intel_vgt_deballoon();
|
||||
|
||||
drm_mm_takedown(&vm->mm);
|
||||
list_del(&vm->global_link);
|
||||
}
|
||||
|
@ -88,7 +88,7 @@ typedef gen8_gtt_pte_t gen8_ppgtt_pde_t;
|
||||
#define GEN8_PDE_MASK 0x1ff
|
||||
#define GEN8_PTE_SHIFT 12
|
||||
#define GEN8_PTE_MASK 0x1ff
|
||||
#define GEN8_LEGACY_PDPS 4
|
||||
#define GEN8_LEGACY_PDPES 4
|
||||
#define GEN8_PTES_PER_PAGE (PAGE_SIZE / sizeof(gen8_gtt_pte_t))
|
||||
#define GEN8_PDES_PER_PAGE (PAGE_SIZE / sizeof(gen8_ppgtt_pde_t))
|
||||
|
||||
@ -273,16 +273,16 @@ struct i915_hw_ppgtt {
|
||||
unsigned num_pd_pages; /* gen8+ */
|
||||
union {
|
||||
struct page **pt_pages;
|
||||
struct page **gen8_pt_pages[GEN8_LEGACY_PDPS];
|
||||
struct page **gen8_pt_pages[GEN8_LEGACY_PDPES];
|
||||
};
|
||||
struct page *pd_pages;
|
||||
union {
|
||||
uint32_t pd_offset;
|
||||
dma_addr_t pd_dma_addr[GEN8_LEGACY_PDPS];
|
||||
dma_addr_t pd_dma_addr[GEN8_LEGACY_PDPES];
|
||||
};
|
||||
union {
|
||||
dma_addr_t *pt_dma_addr;
|
||||
dma_addr_t *gen8_pt_dma_addr[4];
|
||||
dma_addr_t *gen8_pt_dma_addr[GEN8_LEGACY_PDPES];
|
||||
};
|
||||
|
||||
struct drm_i915_file_private *file_priv;
|
||||
|
@ -231,7 +231,7 @@ static int i915_setup_compression(struct drm_device *dev, int size, int fb_cpp)
|
||||
dev_priv->mm.stolen_base + compressed_llb->start);
|
||||
}
|
||||
|
||||
dev_priv->fbc.size = size / dev_priv->fbc.threshold;
|
||||
dev_priv->fbc.uncompressed_size = size;
|
||||
|
||||
DRM_DEBUG_KMS("reserved %d bytes of contiguous stolen space for FBC\n",
|
||||
size);
|
||||
@ -253,7 +253,7 @@ int i915_gem_stolen_setup_compression(struct drm_device *dev, int size, int fb_c
|
||||
if (!drm_mm_initialized(&dev_priv->mm.stolen))
|
||||
return -ENODEV;
|
||||
|
||||
if (size < dev_priv->fbc.size)
|
||||
if (size < dev_priv->fbc.uncompressed_size)
|
||||
return 0;
|
||||
|
||||
/* Release any current block */
|
||||
@ -266,7 +266,7 @@ void i915_gem_stolen_cleanup_compression(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
if (dev_priv->fbc.size == 0)
|
||||
if (dev_priv->fbc.uncompressed_size == 0)
|
||||
return;
|
||||
|
||||
drm_mm_remove_node(&dev_priv->fbc.compressed_fb);
|
||||
@ -276,7 +276,7 @@ void i915_gem_stolen_cleanup_compression(struct drm_device *dev)
|
||||
kfree(dev_priv->fbc.compressed_llb);
|
||||
}
|
||||
|
||||
dev_priv->fbc.size = 0;
|
||||
dev_priv->fbc.uncompressed_size = 0;
|
||||
}
|
||||
|
||||
void i915_gem_cleanup_stolen(struct drm_device *dev)
|
||||
|
@ -994,12 +994,11 @@ static void i915_gem_record_rings(struct drm_device *dev,
|
||||
i915_error_ggtt_object_create(dev_priv,
|
||||
ring->scratch.obj);
|
||||
|
||||
if (request->file_priv) {
|
||||
if (request->pid) {
|
||||
struct task_struct *task;
|
||||
|
||||
rcu_read_lock();
|
||||
task = pid_task(request->file_priv->file->pid,
|
||||
PIDTYPE_PID);
|
||||
task = pid_task(request->pid, PIDTYPE_PID);
|
||||
if (task) {
|
||||
strcpy(error->ring[i].comm, task->comm);
|
||||
error->ring[i].pid = task->pid;
|
||||
|
@ -1243,10 +1243,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
|
||||
|
||||
dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_freq;
|
||||
|
||||
if (IS_VALLEYVIEW(dev_priv->dev))
|
||||
valleyview_set_rps(dev_priv->dev, new_delay);
|
||||
else
|
||||
gen6_set_rps(dev_priv->dev, new_delay);
|
||||
intel_set_rps(dev_priv->dev, new_delay);
|
||||
|
||||
mutex_unlock(&dev_priv->rps.hw_lock);
|
||||
}
|
||||
|
@ -140,6 +140,7 @@
|
||||
#define GEN8_RING_PDP_LDW(ring, n) ((ring)->mmio_base+0x270 + (n) * 8)
|
||||
|
||||
#define GAM_ECOCHK 0x4090
|
||||
#define BDW_DISABLE_HDC_INVALIDATION (1<<25)
|
||||
#define ECOCHK_SNB_BIT (1<<10)
|
||||
#define HSW_ECOCHK_ARB_PRIO_SOL (1<<6)
|
||||
#define ECOCHK_PPGTT_CACHE64B (0x3<<3)
|
||||
@ -586,6 +587,19 @@ enum punit_power_well {
|
||||
PUNIT_POWER_WELL_NUM,
|
||||
};
|
||||
|
||||
enum skl_disp_power_wells {
|
||||
SKL_DISP_PW_MISC_IO,
|
||||
SKL_DISP_PW_DDI_A_E,
|
||||
SKL_DISP_PW_DDI_B,
|
||||
SKL_DISP_PW_DDI_C,
|
||||
SKL_DISP_PW_DDI_D,
|
||||
SKL_DISP_PW_1 = 14,
|
||||
SKL_DISP_PW_2,
|
||||
};
|
||||
|
||||
#define SKL_POWER_WELL_STATE(pw) (1 << ((pw) * 2))
|
||||
#define SKL_POWER_WELL_REQ(pw) (1 << (((pw) * 2) + 1))
|
||||
|
||||
#define PUNIT_REG_PWRGT_CTRL 0x60
|
||||
#define PUNIT_REG_PWRGT_STATUS 0x61
|
||||
#define PUNIT_PWRGT_MASK(power_well) (3 << ((power_well) * 2))
|
||||
@ -1470,6 +1484,7 @@ enum punit_power_well {
|
||||
#define CACHE_MODE_1 0x7004 /* IVB+ */
|
||||
#define PIXEL_SUBSPAN_COLLECT_OPT_DISABLE (1<<6)
|
||||
#define GEN8_4x4_STC_OPTIMIZATION_DISABLE (1<<6)
|
||||
#define GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE (1<<1)
|
||||
|
||||
#define GEN6_BLITTER_ECOSKPD 0x221d0
|
||||
#define GEN6_BLITTER_LOCK_SHIFT 16
|
||||
@ -5221,15 +5236,22 @@ enum punit_power_well {
|
||||
#define HSW_NDE_RSTWRN_OPT 0x46408
|
||||
#define RESET_PCH_HANDSHAKE_ENABLE (1<<4)
|
||||
|
||||
#define FF_SLICE_CS_CHICKEN2 0x02e4
|
||||
#define GEN9_TSG_BARRIER_ACK_DISABLE (1<<8)
|
||||
|
||||
/* GEN7 chicken */
|
||||
#define GEN7_COMMON_SLICE_CHICKEN1 0x7010
|
||||
# define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC ((1<<10) | (1<<26))
|
||||
# define GEN9_RHWO_OPTIMIZATION_DISABLE (1<<14)
|
||||
#define COMMON_SLICE_CHICKEN2 0x7014
|
||||
# define GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE (1<<0)
|
||||
|
||||
#define HIZ_CHICKEN 0x7018
|
||||
# define CHV_HZ_8X8_MODE_IN_1X (1<<15)
|
||||
|
||||
#define GEN9_SLICE_COMMON_ECO_CHICKEN0 0x7308
|
||||
#define DISABLE_PIXEL_MASK_CAMMING (1<<14)
|
||||
|
||||
#define GEN7_L3SQCREG1 0xB010
|
||||
#define VLV_B0_WA_L3SQCREG1_VALUE 0x00D30000
|
||||
|
||||
@ -5245,11 +5267,16 @@ enum punit_power_well {
|
||||
#define GEN7_L3SQCREG4 0xb034
|
||||
#define L3SQ_URB_READ_CAM_MATCH_DISABLE (1<<27)
|
||||
|
||||
#define GEN8_L3SQCREG4 0xb118
|
||||
#define GEN8_LQSC_RO_PERF_DIS (1<<27)
|
||||
|
||||
/* GEN8 chicken */
|
||||
#define HDC_CHICKEN0 0x7300
|
||||
#define HDC_FORCE_NON_COHERENT (1<<4)
|
||||
#define HDC_DONOT_FETCH_MEM_WHEN_MASKED (1<<11)
|
||||
#define HDC_FENCE_DEST_SLM_DISABLE (1<<14)
|
||||
#define HDC_DONOT_FETCH_MEM_WHEN_MASKED (1<<11)
|
||||
#define HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT (1<<5)
|
||||
#define HDC_FORCE_NON_COHERENT (1<<4)
|
||||
#define HDC_BARRIER_PERFORMANCE_DISABLE (1<<10)
|
||||
|
||||
/* WaCatErrorRejectionIssue */
|
||||
#define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG 0x9030
|
||||
@ -5258,6 +5285,9 @@ enum punit_power_well {
|
||||
#define HSW_SCRATCH1 0xb038
|
||||
#define HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE (1<<27)
|
||||
|
||||
#define BDW_SCRATCH1 0xb11c
|
||||
#define GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE (1<<2)
|
||||
|
||||
/* PCH */
|
||||
|
||||
/* south display engine interrupt: IBX */
|
||||
@ -5980,6 +6010,7 @@ enum punit_power_well {
|
||||
#define HSW_IDICR 0x9008
|
||||
#define IDIHASHMSK(x) (((x) & 0x3f) << 16)
|
||||
#define HSW_EDRAM_PRESENT 0x120010
|
||||
#define EDRAM_ENABLED 0x1
|
||||
|
||||
#define GEN6_UCGCTL1 0x9400
|
||||
# define GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE (1 << 16)
|
||||
@ -6003,6 +6034,7 @@ enum punit_power_well {
|
||||
#define GEN6_RSTCTL 0x9420
|
||||
|
||||
#define GEN8_UCGCTL6 0x9430
|
||||
#define GEN8_GAPSUNIT_CLOCK_GATE_DISABLE (1<<24)
|
||||
#define GEN8_SDEUNIT_CLOCK_GATE_DISABLE (1<<14)
|
||||
|
||||
#define GEN6_GFXPAUSE 0xA000
|
||||
@ -6185,6 +6217,7 @@ enum punit_power_well {
|
||||
|
||||
#define GEN9_HALF_SLICE_CHICKEN5 0xe188
|
||||
#define GEN9_DG_MIRROR_FIX_ENABLE (1<<5)
|
||||
#define GEN9_CCS_TLB_PREFETCH_ENABLE (1<<3)
|
||||
|
||||
#define GEN8_ROW_CHICKEN 0xe4f0
|
||||
#define PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE (1<<8)
|
||||
@ -6200,8 +6233,12 @@ enum punit_power_well {
|
||||
#define HALF_SLICE_CHICKEN3 0xe184
|
||||
#define HSW_SAMPLE_C_PERFORMANCE (1<<9)
|
||||
#define GEN8_CENTROID_PIXEL_OPT_DIS (1<<8)
|
||||
#define GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC (1<<5)
|
||||
#define GEN8_SAMPLER_POWER_BYPASS_DIS (1<<1)
|
||||
|
||||
#define GEN9_HALF_SLICE_CHICKEN7 0xe194
|
||||
#define GEN9_ENABLE_YV12_BUGFIX (1<<4)
|
||||
|
||||
/* Audio */
|
||||
#define G4X_AUD_VID_DID (dev_priv->info.display_mmio_offset + 0x62020)
|
||||
#define INTEL_AUDIO_DEVCL 0x808629FB
|
||||
@ -6351,6 +6388,13 @@ enum punit_power_well {
|
||||
#define HSW_PWR_WELL_FORCE_ON (1<<19)
|
||||
#define HSW_PWR_WELL_CTL6 0x45414
|
||||
|
||||
/* SKL Fuse Status */
|
||||
#define SKL_FUSE_STATUS 0x42000
|
||||
#define SKL_FUSE_DOWNLOAD_STATUS (1<<31)
|
||||
#define SKL_FUSE_PG0_DIST_STATUS (1<<27)
|
||||
#define SKL_FUSE_PG1_DIST_STATUS (1<<26)
|
||||
#define SKL_FUSE_PG2_DIST_STATUS (1<<25)
|
||||
|
||||
/* Per-pipe DDI Function Control */
|
||||
#define TRANS_DDI_FUNC_CTL_A 0x60400
|
||||
#define TRANS_DDI_FUNC_CTL_B 0x61400
|
||||
|
@ -402,10 +402,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
|
||||
/* We still need *_set_rps to process the new max_delay and
|
||||
* update the interrupt limits and PMINTRMSK even though
|
||||
* frequency request may be unchanged. */
|
||||
if (IS_VALLEYVIEW(dev))
|
||||
valleyview_set_rps(dev, val);
|
||||
else
|
||||
gen6_set_rps(dev, val);
|
||||
intel_set_rps(dev, val);
|
||||
|
||||
mutex_unlock(&dev_priv->rps.hw_lock);
|
||||
|
||||
@ -464,10 +461,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
|
||||
/* We still need *_set_rps to process the new min_delay and
|
||||
* update the interrupt limits and PMINTRMSK even though
|
||||
* frequency request may be unchanged. */
|
||||
if (IS_VALLEYVIEW(dev))
|
||||
valleyview_set_rps(dev, val);
|
||||
else
|
||||
gen6_set_rps(dev, val);
|
||||
intel_set_rps(dev, val);
|
||||
|
||||
mutex_unlock(&dev_priv->rps.hw_lock);
|
||||
|
||||
|
@ -115,7 +115,7 @@ TRACE_EVENT(i915_vma_bind,
|
||||
TP_STRUCT__entry(
|
||||
__field(struct drm_i915_gem_object *, obj)
|
||||
__field(struct i915_address_space *, vm)
|
||||
__field(u32, offset)
|
||||
__field(u64, offset)
|
||||
__field(u32, size)
|
||||
__field(unsigned, flags)
|
||||
),
|
||||
@ -128,7 +128,7 @@ TRACE_EVENT(i915_vma_bind,
|
||||
__entry->flags = flags;
|
||||
),
|
||||
|
||||
TP_printk("obj=%p, offset=%08x size=%x%s vm=%p",
|
||||
TP_printk("obj=%p, offset=%016llx size=%x%s vm=%p",
|
||||
__entry->obj, __entry->offset, __entry->size,
|
||||
__entry->flags & PIN_MAPPABLE ? ", mappable" : "",
|
||||
__entry->vm)
|
||||
@ -141,7 +141,7 @@ TRACE_EVENT(i915_vma_unbind,
|
||||
TP_STRUCT__entry(
|
||||
__field(struct drm_i915_gem_object *, obj)
|
||||
__field(struct i915_address_space *, vm)
|
||||
__field(u32, offset)
|
||||
__field(u64, offset)
|
||||
__field(u32, size)
|
||||
),
|
||||
|
||||
@ -152,7 +152,7 @@ TRACE_EVENT(i915_vma_unbind,
|
||||
__entry->size = vma->node.size;
|
||||
),
|
||||
|
||||
TP_printk("obj=%p, offset=%08x size=%x vm=%p",
|
||||
TP_printk("obj=%p, offset=%016llx size=%x vm=%p",
|
||||
__entry->obj, __entry->offset, __entry->size, __entry->vm)
|
||||
);
|
||||
|
||||
|
264
drivers/gpu/drm/i915/i915_vgpu.c
Normal file
264
drivers/gpu/drm/i915/i915_vgpu.c
Normal file
@ -0,0 +1,264 @@
|
||||
/*
|
||||
* Copyright(c) 2011-2015 Intel Corporation. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "intel_drv.h"
|
||||
#include "i915_vgpu.h"
|
||||
|
||||
/**
|
||||
* DOC: Intel GVT-g guest support
|
||||
*
|
||||
* Intel GVT-g is a graphics virtualization technology which shares the
|
||||
* GPU among multiple virtual machines on a time-sharing basis. Each
|
||||
* virtual machine is presented a virtual GPU (vGPU), which has equivalent
|
||||
* features as the underlying physical GPU (pGPU), so i915 driver can run
|
||||
* seamlessly in a virtual machine. This file provides vGPU specific
|
||||
* optimizations when running in a virtual machine, to reduce the complexity
|
||||
* of vGPU emulation and to improve the overall performance.
|
||||
*
|
||||
* A primary function introduced here is so-called "address space ballooning"
|
||||
* technique. Intel GVT-g partitions global graphics memory among multiple VMs,
|
||||
* so each VM can directly access a portion of the memory without hypervisor's
|
||||
* intervention, e.g. filling textures or queuing commands. However with the
|
||||
* partitioning an unmodified i915 driver would assume a smaller graphics
|
||||
* memory starting from address ZERO, then requires vGPU emulation module to
|
||||
* translate the graphics address between 'guest view' and 'host view', for
|
||||
* all registers and command opcodes which contain a graphics memory address.
|
||||
* To reduce the complexity, Intel GVT-g introduces "address space ballooning",
|
||||
* by telling the exact partitioning knowledge to each guest i915 driver, which
|
||||
* then reserves and prevents non-allocated portions from allocation. Thus vGPU
|
||||
* emulation module only needs to scan and validate graphics addresses without
|
||||
* complexity of address translation.
|
||||
*
|
||||
*/
|
||||
|
||||
/**
|
||||
* i915_check_vgpu - detect virtual GPU
|
||||
* @dev: drm device *
|
||||
*
|
||||
* This function is called at the initialization stage, to detect whether
|
||||
* running on a vGPU.
|
||||
*/
|
||||
void i915_check_vgpu(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
uint64_t magic;
|
||||
uint32_t version;
|
||||
|
||||
BUILD_BUG_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
|
||||
|
||||
if (!IS_HASWELL(dev))
|
||||
return;
|
||||
|
||||
magic = readq(dev_priv->regs + vgtif_reg(magic));
|
||||
if (magic != VGT_MAGIC)
|
||||
return;
|
||||
|
||||
version = INTEL_VGT_IF_VERSION_ENCODE(
|
||||
readw(dev_priv->regs + vgtif_reg(version_major)),
|
||||
readw(dev_priv->regs + vgtif_reg(version_minor)));
|
||||
if (version != INTEL_VGT_IF_VERSION) {
|
||||
DRM_INFO("VGT interface version mismatch!\n");
|
||||
return;
|
||||
}
|
||||
|
||||
dev_priv->vgpu.active = true;
|
||||
DRM_INFO("Virtual GPU for Intel GVT-g detected.\n");
|
||||
}
|
||||
|
||||
struct _balloon_info_ {
|
||||
/*
|
||||
* There are up to 2 regions per mappable/unmappable graphic
|
||||
* memory that might be ballooned. Here, index 0/1 is for mappable
|
||||
* graphic memory, 2/3 for unmappable graphic memory.
|
||||
*/
|
||||
struct drm_mm_node space[4];
|
||||
};
|
||||
|
||||
static struct _balloon_info_ bl_info;
|
||||
|
||||
/**
|
||||
* intel_vgt_deballoon - deballoon reserved graphics address trunks
|
||||
*
|
||||
* This function is called to deallocate the ballooned-out graphic memory, when
|
||||
* driver is unloaded or when ballooning fails.
|
||||
*/
|
||||
void intel_vgt_deballoon(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
DRM_DEBUG("VGT deballoon.\n");
|
||||
|
||||
for (i = 0; i < 4; i++) {
|
||||
if (bl_info.space[i].allocated)
|
||||
drm_mm_remove_node(&bl_info.space[i]);
|
||||
}
|
||||
|
||||
memset(&bl_info, 0, sizeof(bl_info));
|
||||
}
|
||||
|
||||
static int vgt_balloon_space(struct drm_mm *mm,
|
||||
struct drm_mm_node *node,
|
||||
unsigned long start, unsigned long end)
|
||||
{
|
||||
unsigned long size = end - start;
|
||||
|
||||
if (start == end)
|
||||
return -EINVAL;
|
||||
|
||||
DRM_INFO("balloon space: range [ 0x%lx - 0x%lx ] %lu KiB.\n",
|
||||
start, end, size / 1024);
|
||||
|
||||
node->start = start;
|
||||
node->size = size;
|
||||
|
||||
return drm_mm_reserve_node(mm, node);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_vgt_balloon - balloon out reserved graphics address trunks
|
||||
* @dev: drm device
|
||||
*
|
||||
* This function is called at the initialization stage, to balloon out the
|
||||
* graphic address space allocated to other vGPUs, by marking these spaces as
|
||||
* reserved. The ballooning related knowledge(starting address and size of
|
||||
* the mappable/unmappable graphic memory) is described in the vgt_if structure
|
||||
* in a reserved mmio range.
|
||||
*
|
||||
* To give an example, the drawing below depicts one typical scenario after
|
||||
* ballooning. Here the vGPU1 has 2 pieces of graphic address spaces ballooned
|
||||
* out each for the mappable and the non-mappable part. From the vGPU1 point of
|
||||
* view, the total size is the same as the physical one, with the start address
|
||||
* of its graphic space being zero. Yet there are some portions ballooned out(
|
||||
* the shadow part, which are marked as reserved by drm allocator). From the
|
||||
* host point of view, the graphic address space is partitioned by multiple
|
||||
* vGPUs in different VMs.
|
||||
*
|
||||
* vGPU1 view Host view
|
||||
* 0 ------> +-----------+ +-----------+
|
||||
* ^ |///////////| | vGPU3 |
|
||||
* | |///////////| +-----------+
|
||||
* | |///////////| | vGPU2 |
|
||||
* | +-----------+ +-----------+
|
||||
* mappable GM | available | ==> | vGPU1 |
|
||||
* | +-----------+ +-----------+
|
||||
* | |///////////| | |
|
||||
* v |///////////| | Host |
|
||||
* +=======+===========+ +===========+
|
||||
* ^ |///////////| | vGPU3 |
|
||||
* | |///////////| +-----------+
|
||||
* | |///////////| | vGPU2 |
|
||||
* | +-----------+ +-----------+
|
||||
* unmappable GM | available | ==> | vGPU1 |
|
||||
* | +-----------+ +-----------+
|
||||
* | |///////////| | |
|
||||
* | |///////////| | Host |
|
||||
* v |///////////| | |
|
||||
* total GM size ------> +-----------+ +-----------+
|
||||
*
|
||||
* Returns:
|
||||
* zero on success, non-zero if configuration invalid or ballooning failed
|
||||
*/
|
||||
int intel_vgt_balloon(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct i915_address_space *ggtt_vm = &dev_priv->gtt.base;
|
||||
unsigned long ggtt_vm_end = ggtt_vm->start + ggtt_vm->total;
|
||||
|
||||
unsigned long mappable_base, mappable_size, mappable_end;
|
||||
unsigned long unmappable_base, unmappable_size, unmappable_end;
|
||||
int ret;
|
||||
|
||||
mappable_base = I915_READ(vgtif_reg(avail_rs.mappable_gmadr.base));
|
||||
mappable_size = I915_READ(vgtif_reg(avail_rs.mappable_gmadr.size));
|
||||
unmappable_base = I915_READ(vgtif_reg(avail_rs.nonmappable_gmadr.base));
|
||||
unmappable_size = I915_READ(vgtif_reg(avail_rs.nonmappable_gmadr.size));
|
||||
|
||||
mappable_end = mappable_base + mappable_size;
|
||||
unmappable_end = unmappable_base + unmappable_size;
|
||||
|
||||
DRM_INFO("VGT ballooning configuration:\n");
|
||||
DRM_INFO("Mappable graphic memory: base 0x%lx size %ldKiB\n",
|
||||
mappable_base, mappable_size / 1024);
|
||||
DRM_INFO("Unmappable graphic memory: base 0x%lx size %ldKiB\n",
|
||||
unmappable_base, unmappable_size / 1024);
|
||||
|
||||
if (mappable_base < ggtt_vm->start ||
|
||||
mappable_end > dev_priv->gtt.mappable_end ||
|
||||
unmappable_base < dev_priv->gtt.mappable_end ||
|
||||
unmappable_end > ggtt_vm_end) {
|
||||
DRM_ERROR("Invalid ballooning configuration!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Unmappable graphic memory ballooning */
|
||||
if (unmappable_base > dev_priv->gtt.mappable_end) {
|
||||
ret = vgt_balloon_space(&ggtt_vm->mm,
|
||||
&bl_info.space[2],
|
||||
dev_priv->gtt.mappable_end,
|
||||
unmappable_base);
|
||||
|
||||
if (ret)
|
||||
goto err;
|
||||
}
|
||||
|
||||
/*
|
||||
* No need to partition out the last physical page,
|
||||
* because it is reserved to the guard page.
|
||||
*/
|
||||
if (unmappable_end < ggtt_vm_end - PAGE_SIZE) {
|
||||
ret = vgt_balloon_space(&ggtt_vm->mm,
|
||||
&bl_info.space[3],
|
||||
unmappable_end,
|
||||
ggtt_vm_end - PAGE_SIZE);
|
||||
if (ret)
|
||||
goto err;
|
||||
}
|
||||
|
||||
/* Mappable graphic memory ballooning */
|
||||
if (mappable_base > ggtt_vm->start) {
|
||||
ret = vgt_balloon_space(&ggtt_vm->mm,
|
||||
&bl_info.space[0],
|
||||
ggtt_vm->start, mappable_base);
|
||||
|
||||
if (ret)
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (mappable_end < dev_priv->gtt.mappable_end) {
|
||||
ret = vgt_balloon_space(&ggtt_vm->mm,
|
||||
&bl_info.space[1],
|
||||
mappable_end,
|
||||
dev_priv->gtt.mappable_end);
|
||||
|
||||
if (ret)
|
||||
goto err;
|
||||
}
|
||||
|
||||
DRM_INFO("VGT balloon successfully\n");
|
||||
return 0;
|
||||
|
||||
err:
|
||||
DRM_ERROR("VGT balloon fail\n");
|
||||
intel_vgt_deballoon();
|
||||
return ret;
|
||||
}
|
91
drivers/gpu/drm/i915/i915_vgpu.h
Normal file
91
drivers/gpu/drm/i915/i915_vgpu.h
Normal file
@ -0,0 +1,91 @@
|
||||
/*
|
||||
* Copyright(c) 2011-2015 Intel Corporation. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef _I915_VGPU_H_
|
||||
#define _I915_VGPU_H_
|
||||
|
||||
/* The MMIO offset of the shared info between guest and host emulator */
|
||||
#define VGT_PVINFO_PAGE 0x78000
|
||||
#define VGT_PVINFO_SIZE 0x1000
|
||||
|
||||
/*
|
||||
* The following structure pages are defined in GEN MMIO space
|
||||
* for virtualization. (One page for now)
|
||||
*/
|
||||
#define VGT_MAGIC 0x4776544776544776 /* 'vGTvGTvG' */
|
||||
#define VGT_VERSION_MAJOR 1
|
||||
#define VGT_VERSION_MINOR 0
|
||||
|
||||
#define INTEL_VGT_IF_VERSION_ENCODE(major, minor) ((major) << 16 | (minor))
|
||||
#define INTEL_VGT_IF_VERSION \
|
||||
INTEL_VGT_IF_VERSION_ENCODE(VGT_VERSION_MAJOR, VGT_VERSION_MINOR)
|
||||
|
||||
struct vgt_if {
|
||||
uint64_t magic; /* VGT_MAGIC */
|
||||
uint16_t version_major;
|
||||
uint16_t version_minor;
|
||||
uint32_t vgt_id; /* ID of vGT instance */
|
||||
uint32_t rsv1[12]; /* pad to offset 0x40 */
|
||||
/*
|
||||
* Data structure to describe the balooning info of resources.
|
||||
* Each VM can only have one portion of continuous area for now.
|
||||
* (May support scattered resource in future)
|
||||
* (starting from offset 0x40)
|
||||
*/
|
||||
struct {
|
||||
/* Aperture register balooning */
|
||||
struct {
|
||||
uint32_t base;
|
||||
uint32_t size;
|
||||
} mappable_gmadr; /* aperture */
|
||||
/* GMADR register balooning */
|
||||
struct {
|
||||
uint32_t base;
|
||||
uint32_t size;
|
||||
} nonmappable_gmadr; /* non aperture */
|
||||
/* allowed fence registers */
|
||||
uint32_t fence_num;
|
||||
uint32_t rsv2[3];
|
||||
} avail_rs; /* available/assigned resource */
|
||||
uint32_t rsv3[0x200 - 24]; /* pad to half page */
|
||||
/*
|
||||
* The bottom half page is for response from Gfx driver to hypervisor.
|
||||
* Set to reserved fields temporarily by now.
|
||||
*/
|
||||
uint32_t rsv4;
|
||||
uint32_t display_ready; /* ready for display owner switch */
|
||||
uint32_t rsv5[0x200 - 2]; /* pad to one page */
|
||||
} __packed;
|
||||
|
||||
#define vgtif_reg(x) \
|
||||
(VGT_PVINFO_PAGE + (long)&((struct vgt_if *)NULL)->x)
|
||||
|
||||
/* vGPU display status to be used by the host side */
|
||||
#define VGT_DRV_DISPLAY_NOT_READY 0
|
||||
#define VGT_DRV_DISPLAY_READY 1 /* ready for display switch */
|
||||
|
||||
extern void i915_check_vgpu(struct drm_device *dev);
|
||||
extern int intel_vgt_balloon(struct drm_device *dev);
|
||||
extern void intel_vgt_deballoon(void);
|
||||
|
||||
#endif /* _I915_VGPU_H_ */
|
@ -2190,11 +2190,15 @@ static bool need_vtd_wa(struct drm_device *dev)
|
||||
}
|
||||
|
||||
int
|
||||
intel_fb_align_height(struct drm_device *dev, int height, unsigned int tiling)
|
||||
intel_fb_align_height(struct drm_device *dev, int height,
|
||||
uint32_t pixel_format,
|
||||
uint64_t fb_format_modifier)
|
||||
{
|
||||
int tile_height;
|
||||
|
||||
tile_height = tiling ? (IS_GEN2(dev) ? 16 : 8) : 1;
|
||||
tile_height = fb_format_modifier == I915_FORMAT_MOD_X_TILED ?
|
||||
(IS_GEN2(dev) ? 16 : 8) : 1;
|
||||
|
||||
return ALIGN(height, tile_height);
|
||||
}
|
||||
|
||||
@ -2211,8 +2215,8 @@ intel_pin_and_fence_fb_obj(struct drm_plane *plane,
|
||||
|
||||
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
|
||||
|
||||
switch (obj->tiling_mode) {
|
||||
case I915_TILING_NONE:
|
||||
switch (fb->modifier[0]) {
|
||||
case DRM_FORMAT_MOD_NONE:
|
||||
if (INTEL_INFO(dev)->gen >= 9)
|
||||
alignment = 256 * 1024;
|
||||
else if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
|
||||
@ -2222,7 +2226,7 @@ intel_pin_and_fence_fb_obj(struct drm_plane *plane,
|
||||
else
|
||||
alignment = 64 * 1024;
|
||||
break;
|
||||
case I915_TILING_X:
|
||||
case I915_FORMAT_MOD_X_TILED:
|
||||
if (INTEL_INFO(dev)->gen >= 9)
|
||||
alignment = 256 * 1024;
|
||||
else {
|
||||
@ -2230,11 +2234,12 @@ intel_pin_and_fence_fb_obj(struct drm_plane *plane,
|
||||
alignment = 0;
|
||||
}
|
||||
break;
|
||||
case I915_TILING_Y:
|
||||
case I915_FORMAT_MOD_Y_TILED:
|
||||
WARN(1, "Y tiled bo slipped through, driver bug!\n");
|
||||
return -EINVAL;
|
||||
default:
|
||||
BUG();
|
||||
MISSING_CASE(fb->modifier[0]);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Note that the w/a also requires 64 PTE of padding following the
|
||||
@ -2282,7 +2287,7 @@ err_interruptible:
|
||||
return ret;
|
||||
}
|
||||
|
||||
void intel_unpin_fb_obj(struct drm_i915_gem_object *obj)
|
||||
static void intel_unpin_fb_obj(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex));
|
||||
|
||||
@ -2371,6 +2376,7 @@ intel_alloc_plane_obj(struct intel_crtc *crtc,
|
||||
struct drm_device *dev = crtc->base.dev;
|
||||
struct drm_i915_gem_object *obj = NULL;
|
||||
struct drm_mode_fb_cmd2 mode_cmd = { 0 };
|
||||
struct drm_framebuffer *fb = &plane_config->fb->base;
|
||||
u32 base = plane_config->base;
|
||||
|
||||
if (plane_config->size == 0)
|
||||
@ -2383,16 +2389,18 @@ intel_alloc_plane_obj(struct intel_crtc *crtc,
|
||||
|
||||
obj->tiling_mode = plane_config->tiling;
|
||||
if (obj->tiling_mode == I915_TILING_X)
|
||||
obj->stride = crtc->base.primary->fb->pitches[0];
|
||||
obj->stride = fb->pitches[0];
|
||||
|
||||
mode_cmd.pixel_format = crtc->base.primary->fb->pixel_format;
|
||||
mode_cmd.width = crtc->base.primary->fb->width;
|
||||
mode_cmd.height = crtc->base.primary->fb->height;
|
||||
mode_cmd.pitches[0] = crtc->base.primary->fb->pitches[0];
|
||||
mode_cmd.pixel_format = fb->pixel_format;
|
||||
mode_cmd.width = fb->width;
|
||||
mode_cmd.height = fb->height;
|
||||
mode_cmd.pitches[0] = fb->pitches[0];
|
||||
mode_cmd.modifier[0] = fb->modifier[0];
|
||||
mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
|
||||
if (intel_framebuffer_init(dev, to_intel_framebuffer(crtc->base.primary->fb),
|
||||
if (intel_framebuffer_init(dev, to_intel_framebuffer(fb),
|
||||
&mode_cmd, obj)) {
|
||||
DRM_DEBUG_KMS("intel fb init failed\n");
|
||||
goto out_unref_obj;
|
||||
@ -2410,6 +2418,20 @@ out_unref_obj:
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Update plane->state->fb to match plane->fb after driver-internal updates */
|
||||
static void
|
||||
update_state_fb(struct drm_plane *plane)
|
||||
{
|
||||
if (plane->fb == plane->state->fb)
|
||||
return;
|
||||
|
||||
if (plane->state->fb)
|
||||
drm_framebuffer_unreference(plane->state->fb);
|
||||
plane->state->fb = plane->fb;
|
||||
if (plane->state->fb)
|
||||
drm_framebuffer_reference(plane->state->fb);
|
||||
}
|
||||
|
||||
static void
|
||||
intel_find_plane_obj(struct intel_crtc *intel_crtc,
|
||||
struct intel_initial_plane_config *plane_config)
|
||||
@ -2420,14 +2442,20 @@ intel_find_plane_obj(struct intel_crtc *intel_crtc,
|
||||
struct intel_crtc *i;
|
||||
struct drm_i915_gem_object *obj;
|
||||
|
||||
if (!intel_crtc->base.primary->fb)
|
||||
if (!plane_config->fb)
|
||||
return;
|
||||
|
||||
if (intel_alloc_plane_obj(intel_crtc, plane_config))
|
||||
return;
|
||||
if (intel_alloc_plane_obj(intel_crtc, plane_config)) {
|
||||
struct drm_plane *primary = intel_crtc->base.primary;
|
||||
|
||||
kfree(intel_crtc->base.primary->fb);
|
||||
intel_crtc->base.primary->fb = NULL;
|
||||
primary->fb = &plane_config->fb->base;
|
||||
primary->state->crtc = &intel_crtc->base;
|
||||
update_state_fb(primary);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
kfree(plane_config->fb);
|
||||
|
||||
/*
|
||||
* Failed to alloc the obj, check to see if we should share
|
||||
@ -2447,15 +2475,20 @@ intel_find_plane_obj(struct intel_crtc *intel_crtc,
|
||||
continue;
|
||||
|
||||
if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) {
|
||||
struct drm_plane *primary = intel_crtc->base.primary;
|
||||
|
||||
if (obj->tiling_mode != I915_TILING_NONE)
|
||||
dev_priv->preserve_bios_swizzle = true;
|
||||
|
||||
drm_framebuffer_reference(c->primary->fb);
|
||||
intel_crtc->base.primary->fb = c->primary->fb;
|
||||
primary->fb = c->primary->fb;
|
||||
primary->state->crtc = &intel_crtc->base;
|
||||
update_state_fb(intel_crtc->base.primary);
|
||||
obj->frontbuffer_bits |= INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
static void i9xx_update_primary_plane(struct drm_crtc *crtc,
|
||||
@ -2747,11 +2780,11 @@ static void skylake_update_primary_plane(struct drm_crtc *crtc,
|
||||
* The stride is either expressed as a multiple of 64 bytes chunks for
|
||||
* linear buffers or in number of tiles for tiled buffers.
|
||||
*/
|
||||
switch (obj->tiling_mode) {
|
||||
case I915_TILING_NONE:
|
||||
switch (fb->modifier[0]) {
|
||||
case DRM_FORMAT_MOD_NONE:
|
||||
stride = fb->pitches[0] >> 6;
|
||||
break;
|
||||
case I915_TILING_X:
|
||||
case I915_FORMAT_MOD_X_TILED:
|
||||
plane_ctl |= PLANE_CTL_TILED_X;
|
||||
stride = fb->pitches[0] >> 9;
|
||||
break;
|
||||
@ -4251,11 +4284,10 @@ static void intel_crtc_disable_planes(struct drm_crtc *crtc)
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
int pipe = intel_crtc->pipe;
|
||||
int plane = intel_crtc->plane;
|
||||
|
||||
intel_crtc_wait_for_pending_flips(crtc);
|
||||
|
||||
if (dev_priv->fbc.plane == plane)
|
||||
if (dev_priv->fbc.crtc == intel_crtc)
|
||||
intel_fbc_disable(dev);
|
||||
|
||||
hsw_disable_ips(intel_crtc);
|
||||
@ -6587,6 +6619,10 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
|
||||
struct drm_framebuffer *fb;
|
||||
struct intel_framebuffer *intel_fb;
|
||||
|
||||
val = I915_READ(DSPCNTR(plane));
|
||||
if (!(val & DISPLAY_PLANE_ENABLE))
|
||||
return;
|
||||
|
||||
intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
|
||||
if (!intel_fb) {
|
||||
DRM_DEBUG_KMS("failed to alloc fb\n");
|
||||
@ -6595,11 +6631,12 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
|
||||
|
||||
fb = &intel_fb->base;
|
||||
|
||||
val = I915_READ(DSPCNTR(plane));
|
||||
|
||||
if (INTEL_INFO(dev)->gen >= 4)
|
||||
if (val & DISPPLANE_TILED)
|
||||
if (INTEL_INFO(dev)->gen >= 4) {
|
||||
if (val & DISPPLANE_TILED) {
|
||||
plane_config->tiling = I915_TILING_X;
|
||||
fb->modifier[0] = I915_FORMAT_MOD_X_TILED;
|
||||
}
|
||||
}
|
||||
|
||||
pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
|
||||
fourcc = i9xx_format_to_fourcc(pixel_format);
|
||||
@ -6625,7 +6662,8 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
|
||||
fb->pitches[0] = val & 0xffffffc0;
|
||||
|
||||
aligned_height = intel_fb_align_height(dev, fb->height,
|
||||
plane_config->tiling);
|
||||
fb->pixel_format,
|
||||
fb->modifier[0]);
|
||||
|
||||
plane_config->size = PAGE_ALIGN(fb->pitches[0] * aligned_height);
|
||||
|
||||
@ -6634,7 +6672,7 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
|
||||
fb->bits_per_pixel, base, fb->pitches[0],
|
||||
plane_config->size);
|
||||
|
||||
crtc->base.primary->fb = fb;
|
||||
plane_config->fb = intel_fb;
|
||||
}
|
||||
|
||||
static void chv_crtc_clock_get(struct intel_crtc *crtc,
|
||||
@ -7628,8 +7666,13 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
|
||||
fb = &intel_fb->base;
|
||||
|
||||
val = I915_READ(PLANE_CTL(pipe, 0));
|
||||
if (val & PLANE_CTL_TILED_MASK)
|
||||
if (!(val & PLANE_CTL_ENABLE))
|
||||
goto error;
|
||||
|
||||
if (val & PLANE_CTL_TILED_MASK) {
|
||||
plane_config->tiling = I915_TILING_X;
|
||||
fb->modifier[0] = I915_FORMAT_MOD_X_TILED;
|
||||
}
|
||||
|
||||
pixel_format = val & PLANE_CTL_FORMAT_MASK;
|
||||
fourcc = skl_format_to_fourcc(pixel_format,
|
||||
@ -7662,7 +7705,8 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
|
||||
fb->pitches[0] = (val & 0x3ff) * stride_mult;
|
||||
|
||||
aligned_height = intel_fb_align_height(dev, fb->height,
|
||||
plane_config->tiling);
|
||||
fb->pixel_format,
|
||||
fb->modifier[0]);
|
||||
|
||||
plane_config->size = ALIGN(fb->pitches[0] * aligned_height, PAGE_SIZE);
|
||||
|
||||
@ -7671,7 +7715,7 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
|
||||
fb->bits_per_pixel, base, fb->pitches[0],
|
||||
plane_config->size);
|
||||
|
||||
crtc->base.primary->fb = fb;
|
||||
plane_config->fb = intel_fb;
|
||||
return;
|
||||
|
||||
error:
|
||||
@ -7715,6 +7759,10 @@ ironlake_get_initial_plane_config(struct intel_crtc *crtc,
|
||||
struct drm_framebuffer *fb;
|
||||
struct intel_framebuffer *intel_fb;
|
||||
|
||||
val = I915_READ(DSPCNTR(pipe));
|
||||
if (!(val & DISPLAY_PLANE_ENABLE))
|
||||
return;
|
||||
|
||||
intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
|
||||
if (!intel_fb) {
|
||||
DRM_DEBUG_KMS("failed to alloc fb\n");
|
||||
@ -7723,11 +7771,12 @@ ironlake_get_initial_plane_config(struct intel_crtc *crtc,
|
||||
|
||||
fb = &intel_fb->base;
|
||||
|
||||
val = I915_READ(DSPCNTR(pipe));
|
||||
|
||||
if (INTEL_INFO(dev)->gen >= 4)
|
||||
if (val & DISPPLANE_TILED)
|
||||
if (INTEL_INFO(dev)->gen >= 4) {
|
||||
if (val & DISPPLANE_TILED) {
|
||||
plane_config->tiling = I915_TILING_X;
|
||||
fb->modifier[0] = I915_FORMAT_MOD_X_TILED;
|
||||
}
|
||||
}
|
||||
|
||||
pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
|
||||
fourcc = i9xx_format_to_fourcc(pixel_format);
|
||||
@ -7753,7 +7802,8 @@ ironlake_get_initial_plane_config(struct intel_crtc *crtc,
|
||||
fb->pitches[0] = val & 0xffffffc0;
|
||||
|
||||
aligned_height = intel_fb_align_height(dev, fb->height,
|
||||
plane_config->tiling);
|
||||
fb->pixel_format,
|
||||
fb->modifier[0]);
|
||||
|
||||
plane_config->size = PAGE_ALIGN(fb->pitches[0] * aligned_height);
|
||||
|
||||
@ -7762,7 +7812,7 @@ ironlake_get_initial_plane_config(struct intel_crtc *crtc,
|
||||
fb->bits_per_pixel, base, fb->pitches[0],
|
||||
plane_config->size);
|
||||
|
||||
crtc->base.primary->fb = fb;
|
||||
plane_config->fb = intel_fb;
|
||||
}
|
||||
|
||||
static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
|
||||
@ -9055,9 +9105,9 @@ static void intel_unpin_work_fn(struct work_struct *__work)
|
||||
enum pipe pipe = to_intel_crtc(work->crtc)->pipe;
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
intel_unpin_fb_obj(work->old_fb_obj);
|
||||
intel_unpin_fb_obj(intel_fb_obj(work->old_fb));
|
||||
drm_gem_object_unreference(&work->pending_flip_obj->base);
|
||||
drm_gem_object_unreference(&work->old_fb_obj->base);
|
||||
drm_framebuffer_unreference(work->old_fb);
|
||||
|
||||
intel_fbc_update(dev);
|
||||
|
||||
@ -9582,69 +9632,6 @@ static int intel_queue_mmio_flip(struct drm_device *dev,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int intel_gen9_queue_flip(struct drm_device *dev,
|
||||
struct drm_crtc *crtc,
|
||||
struct drm_framebuffer *fb,
|
||||
struct drm_i915_gem_object *obj,
|
||||
struct intel_engine_cs *ring,
|
||||
uint32_t flags)
|
||||
{
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
uint32_t plane = 0, stride;
|
||||
int ret;
|
||||
|
||||
switch(intel_crtc->pipe) {
|
||||
case PIPE_A:
|
||||
plane = MI_DISPLAY_FLIP_SKL_PLANE_1_A;
|
||||
break;
|
||||
case PIPE_B:
|
||||
plane = MI_DISPLAY_FLIP_SKL_PLANE_1_B;
|
||||
break;
|
||||
case PIPE_C:
|
||||
plane = MI_DISPLAY_FLIP_SKL_PLANE_1_C;
|
||||
break;
|
||||
default:
|
||||
WARN_ONCE(1, "unknown plane in flip command\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
switch (obj->tiling_mode) {
|
||||
case I915_TILING_NONE:
|
||||
stride = fb->pitches[0] >> 6;
|
||||
break;
|
||||
case I915_TILING_X:
|
||||
stride = fb->pitches[0] >> 9;
|
||||
break;
|
||||
default:
|
||||
WARN_ONCE(1, "unknown tiling in flip command\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
ret = intel_ring_begin(ring, 10);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
|
||||
intel_ring_emit(ring, DERRMR);
|
||||
intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
|
||||
DERRMR_PIPEB_PRI_FLIP_DONE |
|
||||
DERRMR_PIPEC_PRI_FLIP_DONE));
|
||||
intel_ring_emit(ring, MI_STORE_REGISTER_MEM_GEN8(1) |
|
||||
MI_SRM_LRM_GLOBAL_GTT);
|
||||
intel_ring_emit(ring, DERRMR);
|
||||
intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
|
||||
intel_ring_emit(ring, 0);
|
||||
|
||||
intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane);
|
||||
intel_ring_emit(ring, stride << 6 | obj->tiling_mode);
|
||||
intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
|
||||
|
||||
intel_mark_page_flip_active(intel_crtc);
|
||||
__intel_ring_advance(ring);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int intel_default_queue_flip(struct drm_device *dev,
|
||||
struct drm_crtc *crtc,
|
||||
struct drm_framebuffer *fb,
|
||||
@ -9760,7 +9747,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
|
||||
|
||||
work->event = event;
|
||||
work->crtc = crtc;
|
||||
work->old_fb_obj = intel_fb_obj(old_fb);
|
||||
work->old_fb = old_fb;
|
||||
INIT_WORK(&work->work, intel_unpin_work_fn);
|
||||
|
||||
ret = drm_crtc_vblank_get(crtc);
|
||||
@ -9796,10 +9783,11 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
|
||||
goto cleanup;
|
||||
|
||||
/* Reference the objects for the scheduled work. */
|
||||
drm_gem_object_reference(&work->old_fb_obj->base);
|
||||
drm_framebuffer_reference(work->old_fb);
|
||||
drm_gem_object_reference(&obj->base);
|
||||
|
||||
crtc->primary->fb = fb;
|
||||
update_state_fb(crtc->primary);
|
||||
|
||||
work->pending_flip_obj = obj;
|
||||
|
||||
@ -9811,7 +9799,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
|
||||
|
||||
if (IS_VALLEYVIEW(dev)) {
|
||||
ring = &dev_priv->ring[BCS];
|
||||
if (obj->tiling_mode != work->old_fb_obj->tiling_mode)
|
||||
if (obj->tiling_mode != intel_fb_obj(work->old_fb)->tiling_mode)
|
||||
/* vlv: DISPLAY_FLIP fails to change tiling */
|
||||
ring = NULL;
|
||||
} else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
|
||||
@ -9852,7 +9840,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
|
||||
work->flip_queued_vblank = drm_vblank_count(dev, intel_crtc->pipe);
|
||||
work->enable_stall_check = true;
|
||||
|
||||
i915_gem_track_fb(work->old_fb_obj, obj,
|
||||
i915_gem_track_fb(intel_fb_obj(work->old_fb), obj,
|
||||
INTEL_FRONTBUFFER_PRIMARY(pipe));
|
||||
|
||||
intel_fbc_disable(dev);
|
||||
@ -9868,7 +9856,8 @@ cleanup_unpin:
|
||||
cleanup_pending:
|
||||
atomic_dec(&intel_crtc->unpin_work_count);
|
||||
crtc->primary->fb = old_fb;
|
||||
drm_gem_object_unreference(&work->old_fb_obj->base);
|
||||
update_state_fb(crtc->primary);
|
||||
drm_framebuffer_unreference(work->old_fb);
|
||||
drm_gem_object_unreference(&obj->base);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
@ -11897,7 +11886,7 @@ intel_check_primary_plane(struct drm_plane *plane,
|
||||
*/
|
||||
if (intel_crtc->primary_enabled &&
|
||||
INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) &&
|
||||
dev_priv->fbc.plane == intel_crtc->plane &&
|
||||
dev_priv->fbc.crtc == intel_crtc &&
|
||||
state->base.rotation != BIT(DRM_ROTATE_0)) {
|
||||
intel_crtc->atomic.disable_fbc = true;
|
||||
}
|
||||
@ -12069,8 +12058,8 @@ void intel_plane_destroy(struct drm_plane *plane)
|
||||
}
|
||||
|
||||
const struct drm_plane_funcs intel_plane_funcs = {
|
||||
.update_plane = drm_plane_helper_update,
|
||||
.disable_plane = drm_plane_helper_disable,
|
||||
.update_plane = drm_atomic_helper_update_plane,
|
||||
.disable_plane = drm_atomic_helper_disable_plane,
|
||||
.destroy = intel_plane_destroy,
|
||||
.set_property = drm_atomic_helper_plane_set_property,
|
||||
.atomic_get_property = intel_plane_atomic_get_property,
|
||||
@ -12185,13 +12174,10 @@ intel_check_cursor_plane(struct drm_plane *plane,
|
||||
if (fb == crtc->cursor->fb)
|
||||
return 0;
|
||||
|
||||
/* we only need to pin inside GTT if cursor is non-phy */
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
if (!INTEL_INFO(dev)->cursor_needs_physical && obj->tiling_mode) {
|
||||
if (fb->modifier[0] != DRM_FORMAT_MOD_NONE) {
|
||||
DRM_DEBUG_KMS("cursor cannot be tiled\n");
|
||||
ret = -EINVAL;
|
||||
}
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
finish:
|
||||
if (intel_crtc->active) {
|
||||
@ -12672,7 +12658,24 @@ static int intel_framebuffer_init(struct drm_device *dev,
|
||||
|
||||
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
|
||||
|
||||
if (obj->tiling_mode == I915_TILING_Y) {
|
||||
if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
|
||||
/* Enforce that fb modifier and tiling mode match, but only for
|
||||
* X-tiled. This is needed for FBC. */
|
||||
if (!!(obj->tiling_mode == I915_TILING_X) !=
|
||||
!!(mode_cmd->modifier[0] == I915_FORMAT_MOD_X_TILED)) {
|
||||
DRM_DEBUG("tiling_mode doesn't match fb modifier\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
} else {
|
||||
if (obj->tiling_mode == I915_TILING_X)
|
||||
mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
|
||||
else if (obj->tiling_mode == I915_TILING_Y) {
|
||||
DRM_DEBUG("No Y tiling for legacy addfb\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
if (mode_cmd->modifier[0] == I915_FORMAT_MOD_Y_TILED) {
|
||||
DRM_DEBUG("hardware does not support tiling Y\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -12686,12 +12689,12 @@ static int intel_framebuffer_init(struct drm_device *dev,
|
||||
if (INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev)) {
|
||||
pitch_limit = 32*1024;
|
||||
} else if (INTEL_INFO(dev)->gen >= 4) {
|
||||
if (obj->tiling_mode)
|
||||
if (mode_cmd->modifier[0] == I915_FORMAT_MOD_X_TILED)
|
||||
pitch_limit = 16*1024;
|
||||
else
|
||||
pitch_limit = 32*1024;
|
||||
} else if (INTEL_INFO(dev)->gen >= 3) {
|
||||
if (obj->tiling_mode)
|
||||
if (mode_cmd->modifier[0] == I915_FORMAT_MOD_X_TILED)
|
||||
pitch_limit = 8*1024;
|
||||
else
|
||||
pitch_limit = 16*1024;
|
||||
@ -12701,12 +12704,13 @@ static int intel_framebuffer_init(struct drm_device *dev,
|
||||
|
||||
if (mode_cmd->pitches[0] > pitch_limit) {
|
||||
DRM_DEBUG("%s pitch (%d) must be at less than %d\n",
|
||||
obj->tiling_mode ? "tiled" : "linear",
|
||||
mode_cmd->modifier[0] == I915_FORMAT_MOD_X_TILED ?
|
||||
"tiled" : "linear",
|
||||
mode_cmd->pitches[0], pitch_limit);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (obj->tiling_mode != I915_TILING_NONE &&
|
||||
if (mode_cmd->modifier[0] == I915_FORMAT_MOD_X_TILED &&
|
||||
mode_cmd->pitches[0] != obj->stride) {
|
||||
DRM_DEBUG("pitch (%d) must match tiling stride (%d)\n",
|
||||
mode_cmd->pitches[0], obj->stride);
|
||||
@ -12761,7 +12765,8 @@ static int intel_framebuffer_init(struct drm_device *dev,
|
||||
return -EINVAL;
|
||||
|
||||
aligned_height = intel_fb_align_height(dev, mode_cmd->height,
|
||||
obj->tiling_mode);
|
||||
mode_cmd->pixel_format,
|
||||
mode_cmd->modifier[0]);
|
||||
/* FIXME drm helper for size checks (especially planar formats)? */
|
||||
if (obj->base.size < aligned_height * mode_cmd->pitches[0])
|
||||
return -EINVAL;
|
||||
@ -12923,9 +12928,6 @@ static void intel_init_display(struct drm_device *dev)
|
||||
valleyview_modeset_global_resources;
|
||||
}
|
||||
|
||||
/* Default just returns -ENODEV to indicate unsupported */
|
||||
dev_priv->display.queue_flip = intel_default_queue_flip;
|
||||
|
||||
switch (INTEL_INFO(dev)->gen) {
|
||||
case 2:
|
||||
dev_priv->display.queue_flip = intel_gen2_queue_flip;
|
||||
@ -12948,8 +12950,10 @@ static void intel_init_display(struct drm_device *dev)
|
||||
dev_priv->display.queue_flip = intel_gen7_queue_flip;
|
||||
break;
|
||||
case 9:
|
||||
dev_priv->display.queue_flip = intel_gen9_queue_flip;
|
||||
break;
|
||||
/* Drop through - unsupported since execlist only. */
|
||||
default:
|
||||
/* Default just returns -ENODEV to indicate unsupported */
|
||||
dev_priv->display.queue_flip = intel_default_queue_flip;
|
||||
}
|
||||
|
||||
intel_panel_init_backlight_funcs(dev);
|
||||
@ -13165,6 +13169,8 @@ void intel_modeset_init(struct drm_device *dev)
|
||||
dev->mode_config.preferred_depth = 24;
|
||||
dev->mode_config.prefer_shadow = 1;
|
||||
|
||||
dev->mode_config.allow_fb_modifiers = true;
|
||||
|
||||
dev->mode_config.funcs = &intel_mode_funcs;
|
||||
|
||||
intel_init_quirks(dev);
|
||||
@ -13702,6 +13708,7 @@ void intel_modeset_gem_init(struct drm_device *dev)
|
||||
to_intel_crtc(c)->pipe);
|
||||
drm_framebuffer_unreference(c->primary->fb);
|
||||
c->primary->fb = NULL;
|
||||
update_state_fb(c->primary);
|
||||
}
|
||||
}
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
@ -240,7 +240,7 @@ uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
|
||||
return v;
|
||||
}
|
||||
|
||||
void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
|
||||
static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
|
||||
{
|
||||
int i;
|
||||
if (dst_bytes > 4)
|
||||
@ -3803,7 +3803,7 @@ go_again:
|
||||
* 3. Use Link Training from 2.5.3.3 and 3.5.1.3
|
||||
* 4. Check link status on receipt of hot-plug interrupt
|
||||
*/
|
||||
void
|
||||
static void
|
||||
intel_dp_check_link_status(struct intel_dp *intel_dp)
|
||||
{
|
||||
struct drm_device *dev = intel_dp_to_dev(intel_dp);
|
||||
|
@ -258,6 +258,7 @@ struct intel_plane_state {
|
||||
};
|
||||
|
||||
struct intel_initial_plane_config {
|
||||
struct intel_framebuffer *fb;
|
||||
unsigned int tiling;
|
||||
int size;
|
||||
u32 base;
|
||||
@ -710,7 +711,7 @@ intel_get_crtc_for_plane(struct drm_device *dev, int plane)
|
||||
struct intel_unpin_work {
|
||||
struct work_struct work;
|
||||
struct drm_crtc *crtc;
|
||||
struct drm_i915_gem_object *old_fb_obj;
|
||||
struct drm_framebuffer *old_fb;
|
||||
struct drm_i915_gem_object *pending_flip_obj;
|
||||
struct drm_pending_vblank_event *event;
|
||||
atomic_t pending;
|
||||
@ -878,7 +879,8 @@ void intel_frontbuffer_flip(struct drm_device *dev,
|
||||
}
|
||||
|
||||
int intel_fb_align_height(struct drm_device *dev, int height,
|
||||
unsigned int tiling);
|
||||
uint32_t pixel_format,
|
||||
uint64_t fb_format_modifier);
|
||||
void intel_fb_obj_flush(struct drm_i915_gem_object *obj, bool retire);
|
||||
|
||||
|
||||
@ -932,7 +934,6 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
|
||||
int intel_pin_and_fence_fb_obj(struct drm_plane *plane,
|
||||
struct drm_framebuffer *fb,
|
||||
struct intel_engine_cs *pipelined);
|
||||
void intel_unpin_fb_obj(struct drm_i915_gem_object *obj);
|
||||
struct drm_framebuffer *
|
||||
__intel_framebuffer_create(struct drm_device *dev,
|
||||
struct drm_mode_fb_cmd2 *mode_cmd,
|
||||
@ -1017,7 +1018,6 @@ void intel_dp_complete_link_train(struct intel_dp *intel_dp);
|
||||
void intel_dp_stop_link_train(struct intel_dp *intel_dp);
|
||||
void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
|
||||
void intel_dp_encoder_destroy(struct drm_encoder *encoder);
|
||||
void intel_dp_check_link_status(struct intel_dp *intel_dp);
|
||||
int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc);
|
||||
bool intel_dp_compute_config(struct intel_encoder *encoder,
|
||||
struct intel_crtc_state *pipe_config);
|
||||
@ -1036,13 +1036,6 @@ int intel_dp_max_link_bw(struct intel_dp *intel_dp);
|
||||
void intel_dp_hot_plug(struct intel_encoder *intel_encoder);
|
||||
void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv);
|
||||
uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes);
|
||||
void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes);
|
||||
int intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
|
||||
struct drm_framebuffer *fb, int crtc_x, int crtc_y,
|
||||
unsigned int crtc_w, unsigned int crtc_h,
|
||||
uint32_t src_x, uint32_t src_y,
|
||||
uint32_t src_w, uint32_t src_h);
|
||||
int intel_disable_plane(struct drm_plane *plane);
|
||||
void intel_plane_destroy(struct drm_plane *plane);
|
||||
void intel_edp_drrs_enable(struct intel_dp *intel_dp);
|
||||
void intel_edp_drrs_disable(struct intel_dp *intel_dp);
|
||||
@ -1231,9 +1224,6 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob);
|
||||
int intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane);
|
||||
void intel_flush_primary_plane(struct drm_i915_private *dev_priv,
|
||||
enum plane plane);
|
||||
int intel_plane_set_property(struct drm_plane *plane,
|
||||
struct drm_property *prop,
|
||||
uint64_t val);
|
||||
int intel_plane_restore(struct drm_plane *plane);
|
||||
int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
|
@ -854,7 +854,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder)
|
||||
|
||||
|
||||
/* recovery disables */
|
||||
I915_WRITE(MIPI_EOT_DISABLE(port), val);
|
||||
I915_WRITE(MIPI_EOT_DISABLE(port), tmp);
|
||||
|
||||
/* in terms of low power clock */
|
||||
I915_WRITE(MIPI_INIT_COUNT(port), intel_dsi->init_count);
|
||||
|
@ -1,39 +0,0 @@
|
||||
/*
|
||||
* Copyright © 2013 Intel Corporation
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Author: Jani Nikula <jani.nikula@intel.com>
|
||||
*/
|
||||
|
||||
#ifndef _INTEL_DSI_DSI_H
|
||||
#define _INTEL_DSI_DSI_H
|
||||
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/drm_crtc.h>
|
||||
#include <video/mipi_display.h>
|
||||
#include "i915_drv.h"
|
||||
#include "intel_drv.h"
|
||||
#include "intel_dsi.h"
|
||||
|
||||
void dsi_hs_mode_enable(struct intel_dsi *intel_dsi, bool enable,
|
||||
enum port port);
|
||||
|
||||
#endif /* _INTEL_DSI_DSI_H */
|
@ -78,7 +78,8 @@ static void i8xx_fbc_enable(struct drm_crtc *crtc)
|
||||
|
||||
dev_priv->fbc.enabled = true;
|
||||
|
||||
cfb_pitch = dev_priv->fbc.size / FBC_LL_SIZE;
|
||||
/* Note: fbc.threshold == 1 for i8xx */
|
||||
cfb_pitch = dev_priv->fbc.uncompressed_size / FBC_LL_SIZE;
|
||||
if (fb->pitches[0] < cfb_pitch)
|
||||
cfb_pitch = fb->pitches[0];
|
||||
|
||||
@ -368,7 +369,7 @@ static void intel_fbc_work_fn(struct work_struct *__work)
|
||||
if (work->crtc->primary->fb == work->fb) {
|
||||
dev_priv->display.enable_fbc(work->crtc);
|
||||
|
||||
dev_priv->fbc.plane = to_intel_crtc(work->crtc)->plane;
|
||||
dev_priv->fbc.crtc = to_intel_crtc(work->crtc);
|
||||
dev_priv->fbc.fb_id = work->crtc->primary->fb->base.id;
|
||||
dev_priv->fbc.y = work->crtc->y;
|
||||
}
|
||||
@ -459,7 +460,7 @@ void intel_fbc_disable(struct drm_device *dev)
|
||||
return;
|
||||
|
||||
dev_priv->display.disable_fbc(dev);
|
||||
dev_priv->fbc.plane = -1;
|
||||
dev_priv->fbc.crtc = NULL;
|
||||
}
|
||||
|
||||
static bool set_no_fbc_reason(struct drm_i915_private *dev_priv,
|
||||
@ -501,15 +502,23 @@ void intel_fbc_update(struct drm_device *dev)
|
||||
const struct drm_display_mode *adjusted_mode;
|
||||
unsigned int max_width, max_height;
|
||||
|
||||
if (!HAS_FBC(dev)) {
|
||||
set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED);
|
||||
if (!HAS_FBC(dev))
|
||||
return;
|
||||
|
||||
/* disable framebuffer compression in vGPU */
|
||||
if (intel_vgpu_active(dev))
|
||||
i915.enable_fbc = 0;
|
||||
|
||||
if (i915.enable_fbc < 0) {
|
||||
if (set_no_fbc_reason(dev_priv, FBC_CHIP_DEFAULT))
|
||||
DRM_DEBUG_KMS("disabled per chip default\n");
|
||||
goto out_disable;
|
||||
}
|
||||
|
||||
if (!i915.powersave) {
|
||||
if (!i915.enable_fbc || !i915.powersave) {
|
||||
if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM))
|
||||
DRM_DEBUG_KMS("fbc disabled per module param\n");
|
||||
return;
|
||||
goto out_disable;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -544,16 +553,6 @@ void intel_fbc_update(struct drm_device *dev)
|
||||
obj = intel_fb_obj(fb);
|
||||
adjusted_mode = &intel_crtc->config->base.adjusted_mode;
|
||||
|
||||
if (i915.enable_fbc < 0) {
|
||||
if (set_no_fbc_reason(dev_priv, FBC_CHIP_DEFAULT))
|
||||
DRM_DEBUG_KMS("disabled per chip default\n");
|
||||
goto out_disable;
|
||||
}
|
||||
if (!i915.enable_fbc) {
|
||||
if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM))
|
||||
DRM_DEBUG_KMS("fbc disabled per module param\n");
|
||||
goto out_disable;
|
||||
}
|
||||
if ((adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) ||
|
||||
(adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)) {
|
||||
if (set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE))
|
||||
@ -617,7 +616,7 @@ void intel_fbc_update(struct drm_device *dev)
|
||||
* cannot be unpinned (and have its GTT offset and fence revoked)
|
||||
* without first being decoupled from the scanout and FBC disabled.
|
||||
*/
|
||||
if (dev_priv->fbc.plane == intel_crtc->plane &&
|
||||
if (dev_priv->fbc.crtc == intel_crtc &&
|
||||
dev_priv->fbc.fb_id == fb->base.id &&
|
||||
dev_priv->fbc.y == crtc->y)
|
||||
return;
|
||||
@ -673,6 +672,7 @@ void intel_fbc_init(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
if (!HAS_FBC(dev_priv)) {
|
||||
dev_priv->fbc.enabled = false;
|
||||
dev_priv->fbc.no_fbc_reason = FBC_UNSUPPORTED;
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -594,7 +594,8 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
|
||||
|
||||
cur_size = intel_crtc->config->base.adjusted_mode.crtc_vdisplay;
|
||||
cur_size = intel_fb_align_height(dev, cur_size,
|
||||
plane_config->tiling);
|
||||
fb->base.pixel_format,
|
||||
fb->base.modifier[0]);
|
||||
cur_size *= fb->base.pitches[0];
|
||||
DRM_DEBUG_KMS("pipe %c area: %dx%d, bpp: %d, size: %d\n",
|
||||
pipe_name(intel_crtc->pipe),
|
||||
|
@ -254,8 +254,10 @@ u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj)
|
||||
return lrca >> 12;
|
||||
}
|
||||
|
||||
static uint64_t execlists_ctx_descriptor(struct drm_i915_gem_object *ctx_obj)
|
||||
static uint64_t execlists_ctx_descriptor(struct intel_engine_cs *ring,
|
||||
struct drm_i915_gem_object *ctx_obj)
|
||||
{
|
||||
struct drm_device *dev = ring->dev;
|
||||
uint64_t desc;
|
||||
uint64_t lrca = i915_gem_obj_ggtt_offset(ctx_obj);
|
||||
|
||||
@ -272,6 +274,13 @@ static uint64_t execlists_ctx_descriptor(struct drm_i915_gem_object *ctx_obj)
|
||||
* signalling between Command Streamers */
|
||||
/* desc |= GEN8_CTX_FORCE_RESTORE; */
|
||||
|
||||
/* WaEnableForceRestoreInCtxtDescForVCS:skl */
|
||||
if (IS_GEN9(dev) &&
|
||||
INTEL_REVID(dev) <= SKL_REVID_B0 &&
|
||||
(ring->id == BCS || ring->id == VCS ||
|
||||
ring->id == VECS || ring->id == VCS2))
|
||||
desc |= GEN8_CTX_FORCE_RESTORE;
|
||||
|
||||
return desc;
|
||||
}
|
||||
|
||||
@ -286,13 +295,13 @@ static void execlists_elsp_write(struct intel_engine_cs *ring,
|
||||
|
||||
/* XXX: You must always write both descriptors in the order below. */
|
||||
if (ctx_obj1)
|
||||
temp = execlists_ctx_descriptor(ctx_obj1);
|
||||
temp = execlists_ctx_descriptor(ring, ctx_obj1);
|
||||
else
|
||||
temp = 0;
|
||||
desc[1] = (u32)(temp >> 32);
|
||||
desc[0] = (u32)temp;
|
||||
|
||||
temp = execlists_ctx_descriptor(ctx_obj0);
|
||||
temp = execlists_ctx_descriptor(ring, ctx_obj0);
|
||||
desc[3] = (u32)(temp >> 32);
|
||||
desc[2] = (u32)temp;
|
||||
|
||||
@ -776,7 +785,7 @@ int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* intel_logical_ring_advance_and_submit() - advance the tail and submit the workload
|
||||
* @ringbuf: Logical Ringbuffer to advance.
|
||||
*
|
||||
@ -785,9 +794,10 @@ int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf,
|
||||
* on a queue waiting for the ELSP to be ready to accept a new context submission. At that
|
||||
* point, the tail *inside* the context is updated and the ELSP written to.
|
||||
*/
|
||||
void intel_logical_ring_advance_and_submit(struct intel_ringbuffer *ringbuf,
|
||||
struct intel_context *ctx,
|
||||
struct drm_i915_gem_request *request)
|
||||
static void
|
||||
intel_logical_ring_advance_and_submit(struct intel_ringbuffer *ringbuf,
|
||||
struct intel_context *ctx,
|
||||
struct drm_i915_gem_request *request)
|
||||
{
|
||||
struct intel_engine_cs *ring = ringbuf->ring;
|
||||
|
||||
@ -1140,6 +1150,17 @@ static int gen8_init_render_ring(struct intel_engine_cs *ring)
|
||||
return init_workarounds_ring(ring);
|
||||
}
|
||||
|
||||
static int gen9_init_render_ring(struct intel_engine_cs *ring)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = gen8_init_common_ring(ring);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return init_workarounds_ring(ring);
|
||||
}
|
||||
|
||||
static int gen8_emit_bb_start(struct intel_ringbuffer *ringbuf,
|
||||
struct intel_context *ctx,
|
||||
u64 offset, unsigned flags)
|
||||
@ -1316,6 +1337,39 @@ static int gen8_emit_request(struct intel_ringbuffer *ringbuf,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int intel_lr_context_render_state_init(struct intel_engine_cs *ring,
|
||||
struct intel_context *ctx)
|
||||
{
|
||||
struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
|
||||
struct render_state so;
|
||||
struct drm_i915_file_private *file_priv = ctx->file_priv;
|
||||
struct drm_file *file = file_priv ? file_priv->file : NULL;
|
||||
int ret;
|
||||
|
||||
ret = i915_gem_render_state_prepare(ring, &so);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (so.rodata == NULL)
|
||||
return 0;
|
||||
|
||||
ret = ring->emit_bb_start(ringbuf,
|
||||
ctx,
|
||||
so.ggtt_offset,
|
||||
I915_DISPATCH_SECURE);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), ring);
|
||||
|
||||
ret = __i915_add_request(ring, file, so.obj);
|
||||
/* intel_logical_ring_add_request moves object to inactive if it
|
||||
* fails */
|
||||
out:
|
||||
i915_gem_render_state_fini(&so);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int gen8_init_rcs_context(struct intel_engine_cs *ring,
|
||||
struct intel_context *ctx)
|
||||
{
|
||||
@ -1399,7 +1453,10 @@ static int logical_render_ring_init(struct drm_device *dev)
|
||||
if (HAS_L3_DPF(dev))
|
||||
ring->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
|
||||
|
||||
ring->init_hw = gen8_init_render_ring;
|
||||
if (INTEL_INFO(dev)->gen >= 9)
|
||||
ring->init_hw = gen9_init_render_ring;
|
||||
else
|
||||
ring->init_hw = gen8_init_render_ring;
|
||||
ring->init_context = gen8_init_rcs_context;
|
||||
ring->cleanup = intel_fini_pipe_control;
|
||||
ring->get_seqno = gen8_get_seqno;
|
||||
@ -1581,39 +1638,6 @@ cleanup_render_ring:
|
||||
return ret;
|
||||
}
|
||||
|
||||
int intel_lr_context_render_state_init(struct intel_engine_cs *ring,
|
||||
struct intel_context *ctx)
|
||||
{
|
||||
struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
|
||||
struct render_state so;
|
||||
struct drm_i915_file_private *file_priv = ctx->file_priv;
|
||||
struct drm_file *file = file_priv ? file_priv->file : NULL;
|
||||
int ret;
|
||||
|
||||
ret = i915_gem_render_state_prepare(ring, &so);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (so.rodata == NULL)
|
||||
return 0;
|
||||
|
||||
ret = ring->emit_bb_start(ringbuf,
|
||||
ctx,
|
||||
so.ggtt_offset,
|
||||
I915_DISPATCH_SECURE);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), ring);
|
||||
|
||||
ret = __i915_add_request(ring, file, so.obj);
|
||||
/* intel_logical_ring_add_request moves object to inactive if it
|
||||
* fails */
|
||||
out:
|
||||
i915_gem_render_state_fini(&so);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_obj,
|
||||
struct intel_engine_cs *ring, struct intel_ringbuffer *ringbuf)
|
||||
@ -1659,7 +1683,8 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
|
||||
reg_state[CTX_LRI_HEADER_0] |= MI_LRI_FORCE_POSTED;
|
||||
reg_state[CTX_CONTEXT_CONTROL] = RING_CONTEXT_CONTROL(ring);
|
||||
reg_state[CTX_CONTEXT_CONTROL+1] =
|
||||
_MASKED_BIT_ENABLE((1<<3) | MI_RESTORE_INHIBIT);
|
||||
_MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH |
|
||||
CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
|
||||
reg_state[CTX_RING_HEAD] = RING_HEAD(ring->mmio_base);
|
||||
reg_state[CTX_RING_HEAD+1] = 0;
|
||||
reg_state[CTX_RING_TAIL] = RING_TAIL(ring->mmio_base);
|
||||
|
@ -30,6 +30,8 @@
|
||||
#define RING_ELSP(ring) ((ring)->mmio_base+0x230)
|
||||
#define RING_EXECLIST_STATUS(ring) ((ring)->mmio_base+0x234)
|
||||
#define RING_CONTEXT_CONTROL(ring) ((ring)->mmio_base+0x244)
|
||||
#define CTX_CTRL_INHIBIT_SYN_CTX_SWITCH (1 << 3)
|
||||
#define CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT (1 << 0)
|
||||
#define RING_CONTEXT_STATUS_BUF(ring) ((ring)->mmio_base+0x370)
|
||||
#define RING_CONTEXT_STATUS_PTR(ring) ((ring)->mmio_base+0x3a0)
|
||||
|
||||
@ -40,10 +42,6 @@ int intel_logical_rings_init(struct drm_device *dev);
|
||||
|
||||
int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf,
|
||||
struct intel_context *ctx);
|
||||
void intel_logical_ring_advance_and_submit(
|
||||
struct intel_ringbuffer *ringbuf,
|
||||
struct intel_context *ctx,
|
||||
struct drm_i915_gem_request *request);
|
||||
/**
|
||||
* intel_logical_ring_advance() - advance the ringbuffer tail
|
||||
* @ringbuf: Ringbuffer to advance.
|
||||
@ -70,8 +68,6 @@ int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf,
|
||||
int num_dwords);
|
||||
|
||||
/* Logical Ring Contexts */
|
||||
int intel_lr_context_render_state_init(struct intel_engine_cs *ring,
|
||||
struct intel_context *ctx);
|
||||
void intel_lr_context_free(struct intel_context *ctx);
|
||||
int intel_lr_context_deferred_create(struct intel_context *ctx,
|
||||
struct intel_engine_cs *ring);
|
||||
|
@ -56,24 +56,42 @@ static void gen9_init_clock_gating(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
/*
|
||||
* WaDisableSDEUnitClockGating:skl
|
||||
* This seems to be a pre-production w/a.
|
||||
*/
|
||||
I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
|
||||
GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
|
||||
/* WaEnableLbsSlaRetryTimerDecrement:skl */
|
||||
I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) |
|
||||
GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
|
||||
}
|
||||
|
||||
/*
|
||||
* WaDisableDgMirrorFixInHalfSliceChicken5:skl
|
||||
* This is a pre-production w/a.
|
||||
*/
|
||||
I915_WRITE(GEN9_HALF_SLICE_CHICKEN5,
|
||||
I915_READ(GEN9_HALF_SLICE_CHICKEN5) &
|
||||
~GEN9_DG_MIRROR_FIX_ENABLE);
|
||||
static void skl_init_clock_gating(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
/* Wa4x4STCOptimizationDisable:skl */
|
||||
I915_WRITE(CACHE_MODE_1,
|
||||
_MASKED_BIT_ENABLE(GEN8_4x4_STC_OPTIMIZATION_DISABLE));
|
||||
gen9_init_clock_gating(dev);
|
||||
|
||||
if (INTEL_REVID(dev) == SKL_REVID_A0) {
|
||||
/*
|
||||
* WaDisableSDEUnitClockGating:skl
|
||||
* WaSetGAPSunitClckGateDisable:skl
|
||||
*/
|
||||
I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
|
||||
GEN8_GAPSUNIT_CLOCK_GATE_DISABLE |
|
||||
GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
|
||||
}
|
||||
|
||||
if (INTEL_REVID(dev) <= SKL_REVID_D0) {
|
||||
/* WaDisableHDCInvalidation:skl */
|
||||
I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
|
||||
BDW_DISABLE_HDC_INVALIDATION);
|
||||
|
||||
/* WaDisableChickenBitTSGBarrierAckForFFSliceCS:skl */
|
||||
I915_WRITE(FF_SLICE_CS_CHICKEN2,
|
||||
I915_READ(FF_SLICE_CS_CHICKEN2) |
|
||||
GEN9_TSG_BARRIER_ACK_DISABLE);
|
||||
}
|
||||
|
||||
if (INTEL_REVID(dev) <= SKL_REVID_E0)
|
||||
/* WaDisableLSQCROPERFforOCL:skl */
|
||||
I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
|
||||
GEN8_LQSC_RO_PERF_DIS);
|
||||
}
|
||||
|
||||
static void i915_pineview_get_mem_freq(struct drm_device *dev)
|
||||
@ -1711,6 +1729,8 @@ static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[8])
|
||||
GEN9_MEM_LATENCY_LEVEL_MASK;
|
||||
|
||||
/*
|
||||
* WaWmMemoryReadLatency:skl
|
||||
*
|
||||
* punit doesn't take into account the read latency so we need
|
||||
* to add 2us to the various latency levels we retrieve from
|
||||
* the punit.
|
||||
@ -3750,7 +3770,7 @@ static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
|
||||
/* gen6_set_rps is called to update the frequency request, but should also be
|
||||
* called when the range (min_delay and max_delay) is modified so that we can
|
||||
* update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */
|
||||
void gen6_set_rps(struct drm_device *dev, u8 val)
|
||||
static void gen6_set_rps(struct drm_device *dev, u8 val)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
@ -3786,6 +3806,27 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
|
||||
trace_intel_gpu_freq_change(val * 50);
|
||||
}
|
||||
|
||||
static void valleyview_set_rps(struct drm_device *dev, u8 val)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
|
||||
WARN_ON(val > dev_priv->rps.max_freq_softlimit);
|
||||
WARN_ON(val < dev_priv->rps.min_freq_softlimit);
|
||||
|
||||
if (WARN_ONCE(IS_CHERRYVIEW(dev) && (val & 1),
|
||||
"Odd GPU freq value\n"))
|
||||
val &= ~1;
|
||||
|
||||
if (val != dev_priv->rps.cur_freq)
|
||||
vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
|
||||
|
||||
I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
|
||||
|
||||
dev_priv->rps.cur_freq = val;
|
||||
trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
|
||||
}
|
||||
|
||||
/* vlv_set_rps_idle: Set the frequency to Rpn if Gfx clocks are down
|
||||
*
|
||||
* * If Gfx is Idle, then
|
||||
@ -3850,38 +3891,20 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv)
|
||||
|
||||
void gen6_rps_boost(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
|
||||
mutex_lock(&dev_priv->rps.hw_lock);
|
||||
if (dev_priv->rps.enabled) {
|
||||
if (IS_VALLEYVIEW(dev))
|
||||
valleyview_set_rps(dev_priv->dev, dev_priv->rps.max_freq_softlimit);
|
||||
else
|
||||
gen6_set_rps(dev_priv->dev, dev_priv->rps.max_freq_softlimit);
|
||||
intel_set_rps(dev_priv->dev, dev_priv->rps.max_freq_softlimit);
|
||||
dev_priv->rps.last_adj = 0;
|
||||
}
|
||||
mutex_unlock(&dev_priv->rps.hw_lock);
|
||||
}
|
||||
|
||||
void valleyview_set_rps(struct drm_device *dev, u8 val)
|
||||
void intel_set_rps(struct drm_device *dev, u8 val)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
|
||||
WARN_ON(val > dev_priv->rps.max_freq_softlimit);
|
||||
WARN_ON(val < dev_priv->rps.min_freq_softlimit);
|
||||
|
||||
if (WARN_ONCE(IS_CHERRYVIEW(dev) && (val & 1),
|
||||
"Odd GPU freq value\n"))
|
||||
val &= ~1;
|
||||
|
||||
if (val != dev_priv->rps.cur_freq)
|
||||
vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
|
||||
|
||||
I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
|
||||
|
||||
dev_priv->rps.cur_freq = val;
|
||||
trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
|
||||
if (IS_VALLEYVIEW(dev))
|
||||
valleyview_set_rps(dev, val);
|
||||
else
|
||||
gen6_set_rps(dev, val);
|
||||
}
|
||||
|
||||
static void gen9_disable_rps(struct drm_device *dev)
|
||||
@ -5633,6 +5656,10 @@ void intel_enable_gt_powersave(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
/* Powersaving is controlled by the host when inside a VM */
|
||||
if (intel_vgpu_active(dev))
|
||||
return;
|
||||
|
||||
if (IS_IRONLAKE_M(dev)) {
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
ironlake_enable_drps(dev);
|
||||
@ -6396,7 +6423,8 @@ void intel_init_clock_gating(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
dev_priv->display.init_clock_gating(dev);
|
||||
if (dev_priv->display.init_clock_gating)
|
||||
dev_priv->display.init_clock_gating(dev);
|
||||
}
|
||||
|
||||
void intel_suspend_hw(struct drm_device *dev)
|
||||
@ -6422,7 +6450,7 @@ void intel_init_pm(struct drm_device *dev)
|
||||
if (INTEL_INFO(dev)->gen >= 9) {
|
||||
skl_setup_wm_latency(dev);
|
||||
|
||||
dev_priv->display.init_clock_gating = gen9_init_clock_gating;
|
||||
dev_priv->display.init_clock_gating = skl_init_clock_gating;
|
||||
dev_priv->display.update_wm = skl_update_wm;
|
||||
dev_priv->display.update_sprite_wm = skl_update_sprite_wm;
|
||||
} else if (HAS_PCH_SPLIT(dev)) {
|
||||
|
@ -502,6 +502,68 @@ static void ring_setup_phys_status_page(struct intel_engine_cs *ring)
|
||||
I915_WRITE(HWS_PGA, addr);
|
||||
}
|
||||
|
||||
static void intel_ring_setup_status_page(struct intel_engine_cs *ring)
|
||||
{
|
||||
struct drm_device *dev = ring->dev;
|
||||
struct drm_i915_private *dev_priv = ring->dev->dev_private;
|
||||
u32 mmio = 0;
|
||||
|
||||
/* The ring status page addresses are no longer next to the rest of
|
||||
* the ring registers as of gen7.
|
||||
*/
|
||||
if (IS_GEN7(dev)) {
|
||||
switch (ring->id) {
|
||||
case RCS:
|
||||
mmio = RENDER_HWS_PGA_GEN7;
|
||||
break;
|
||||
case BCS:
|
||||
mmio = BLT_HWS_PGA_GEN7;
|
||||
break;
|
||||
/*
|
||||
* VCS2 actually doesn't exist on Gen7. Only shut up
|
||||
* gcc switch check warning
|
||||
*/
|
||||
case VCS2:
|
||||
case VCS:
|
||||
mmio = BSD_HWS_PGA_GEN7;
|
||||
break;
|
||||
case VECS:
|
||||
mmio = VEBOX_HWS_PGA_GEN7;
|
||||
break;
|
||||
}
|
||||
} else if (IS_GEN6(ring->dev)) {
|
||||
mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
|
||||
} else {
|
||||
/* XXX: gen8 returns to sanity */
|
||||
mmio = RING_HWS_PGA(ring->mmio_base);
|
||||
}
|
||||
|
||||
I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
|
||||
POSTING_READ(mmio);
|
||||
|
||||
/*
|
||||
* Flush the TLB for this page
|
||||
*
|
||||
* FIXME: These two bits have disappeared on gen8, so a question
|
||||
* arises: do we still need this and if so how should we go about
|
||||
* invalidating the TLB?
|
||||
*/
|
||||
if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) {
|
||||
u32 reg = RING_INSTPM(ring->mmio_base);
|
||||
|
||||
/* ring should be idle before issuing a sync flush*/
|
||||
WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0);
|
||||
|
||||
I915_WRITE(reg,
|
||||
_MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
|
||||
INSTPM_SYNC_FLUSH));
|
||||
if (wait_for((I915_READ(reg) & INSTPM_SYNC_FLUSH) == 0,
|
||||
1000))
|
||||
DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
|
||||
ring->name);
|
||||
}
|
||||
}
|
||||
|
||||
static bool stop_ring(struct intel_engine_cs *ring)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(ring->dev);
|
||||
@ -788,12 +850,14 @@ static int bdw_init_workarounds(struct intel_engine_cs *ring)
|
||||
* workaround for for a possible hang in the unlikely event a TLB
|
||||
* invalidation occurs during a PSD flush.
|
||||
*/
|
||||
/* WaForceEnableNonCoherent:bdw */
|
||||
/* WaHdcDisableFetchWhenMasked:bdw */
|
||||
/* WaDisableFenceDestinationToSLM:bdw (GT3 pre-production) */
|
||||
WA_SET_BIT_MASKED(HDC_CHICKEN0,
|
||||
/* WaForceEnableNonCoherent:bdw */
|
||||
HDC_FORCE_NON_COHERENT |
|
||||
/* WaForceContextSaveRestoreNonCoherent:bdw */
|
||||
HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
|
||||
/* WaHdcDisableFetchWhenMasked:bdw */
|
||||
HDC_DONOT_FETCH_MEM_WHEN_MASKED |
|
||||
/* WaDisableFenceDestinationToSLM:bdw (pre-prod) */
|
||||
(IS_BDW_GT3(dev) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
|
||||
|
||||
/* From the Haswell PRM, Command Reference: Registers, CACHE_MODE_0:
|
||||
@ -870,6 +934,78 @@ static int chv_init_workarounds(struct intel_engine_cs *ring)
|
||||
GEN6_WIZ_HASHING_MASK,
|
||||
GEN6_WIZ_HASHING_16x4);
|
||||
|
||||
if (INTEL_REVID(dev) == SKL_REVID_C0 ||
|
||||
INTEL_REVID(dev) == SKL_REVID_D0)
|
||||
/* WaBarrierPerformanceFixDisable:skl */
|
||||
WA_SET_BIT_MASKED(HDC_CHICKEN0,
|
||||
HDC_FENCE_DEST_SLM_DISABLE |
|
||||
HDC_BARRIER_PERFORMANCE_DISABLE);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gen9_init_workarounds(struct intel_engine_cs *ring)
|
||||
{
|
||||
struct drm_device *dev = ring->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
/* WaDisablePartialInstShootdown:skl */
|
||||
WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
|
||||
PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
|
||||
|
||||
/* Syncing dependencies between camera and graphics */
|
||||
WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
|
||||
GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC);
|
||||
|
||||
if (INTEL_REVID(dev) == SKL_REVID_A0 ||
|
||||
INTEL_REVID(dev) == SKL_REVID_B0) {
|
||||
/* WaDisableDgMirrorFixInHalfSliceChicken5:skl */
|
||||
WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
|
||||
GEN9_DG_MIRROR_FIX_ENABLE);
|
||||
}
|
||||
|
||||
if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0) {
|
||||
/* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl */
|
||||
WA_SET_BIT_MASKED(GEN7_COMMON_SLICE_CHICKEN1,
|
||||
GEN9_RHWO_OPTIMIZATION_DISABLE);
|
||||
WA_SET_BIT_MASKED(GEN9_SLICE_COMMON_ECO_CHICKEN0,
|
||||
DISABLE_PIXEL_MASK_CAMMING);
|
||||
}
|
||||
|
||||
if (INTEL_REVID(dev) >= SKL_REVID_C0) {
|
||||
/* WaEnableYV12BugFixInHalfSliceChicken7:skl */
|
||||
WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
|
||||
GEN9_ENABLE_YV12_BUGFIX);
|
||||
}
|
||||
|
||||
if (INTEL_REVID(dev) <= SKL_REVID_D0) {
|
||||
/*
|
||||
*Use Force Non-Coherent whenever executing a 3D context. This
|
||||
* is a workaround for a possible hang in the unlikely event
|
||||
* a TLB invalidation occurs during a PSD flush.
|
||||
*/
|
||||
/* WaForceEnableNonCoherent:skl */
|
||||
WA_SET_BIT_MASKED(HDC_CHICKEN0,
|
||||
HDC_FORCE_NON_COHERENT);
|
||||
}
|
||||
|
||||
/* Wa4x4STCOptimizationDisable:skl */
|
||||
WA_SET_BIT_MASKED(CACHE_MODE_1, GEN8_4x4_STC_OPTIMIZATION_DISABLE);
|
||||
|
||||
/* WaDisablePartialResolveInVc:skl */
|
||||
WA_SET_BIT_MASKED(CACHE_MODE_1, GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE);
|
||||
|
||||
/* WaCcsTlbPrefetchDisable:skl */
|
||||
WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
|
||||
GEN9_CCS_TLB_PREFETCH_ENABLE);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int skl_init_workarounds(struct intel_engine_cs *ring)
|
||||
{
|
||||
gen9_init_workarounds(ring);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -888,6 +1024,11 @@ int init_workarounds_ring(struct intel_engine_cs *ring)
|
||||
if (IS_CHERRYVIEW(dev))
|
||||
return chv_init_workarounds(ring);
|
||||
|
||||
if (IS_SKYLAKE(dev))
|
||||
return skl_init_workarounds(ring);
|
||||
else if (IS_GEN9(dev))
|
||||
return gen9_init_workarounds(ring);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1386,68 +1527,6 @@ i8xx_ring_put_irq(struct intel_engine_cs *ring)
|
||||
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
|
||||
}
|
||||
|
||||
void intel_ring_setup_status_page(struct intel_engine_cs *ring)
|
||||
{
|
||||
struct drm_device *dev = ring->dev;
|
||||
struct drm_i915_private *dev_priv = ring->dev->dev_private;
|
||||
u32 mmio = 0;
|
||||
|
||||
/* The ring status page addresses are no longer next to the rest of
|
||||
* the ring registers as of gen7.
|
||||
*/
|
||||
if (IS_GEN7(dev)) {
|
||||
switch (ring->id) {
|
||||
case RCS:
|
||||
mmio = RENDER_HWS_PGA_GEN7;
|
||||
break;
|
||||
case BCS:
|
||||
mmio = BLT_HWS_PGA_GEN7;
|
||||
break;
|
||||
/*
|
||||
* VCS2 actually doesn't exist on Gen7. Only shut up
|
||||
* gcc switch check warning
|
||||
*/
|
||||
case VCS2:
|
||||
case VCS:
|
||||
mmio = BSD_HWS_PGA_GEN7;
|
||||
break;
|
||||
case VECS:
|
||||
mmio = VEBOX_HWS_PGA_GEN7;
|
||||
break;
|
||||
}
|
||||
} else if (IS_GEN6(ring->dev)) {
|
||||
mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
|
||||
} else {
|
||||
/* XXX: gen8 returns to sanity */
|
||||
mmio = RING_HWS_PGA(ring->mmio_base);
|
||||
}
|
||||
|
||||
I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
|
||||
POSTING_READ(mmio);
|
||||
|
||||
/*
|
||||
* Flush the TLB for this page
|
||||
*
|
||||
* FIXME: These two bits have disappeared on gen8, so a question
|
||||
* arises: do we still need this and if so how should we go about
|
||||
* invalidating the TLB?
|
||||
*/
|
||||
if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) {
|
||||
u32 reg = RING_INSTPM(ring->mmio_base);
|
||||
|
||||
/* ring should be idle before issuing a sync flush*/
|
||||
WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0);
|
||||
|
||||
I915_WRITE(reg,
|
||||
_MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
|
||||
INSTPM_SYNC_FLUSH));
|
||||
if (wait_for((I915_READ(reg) & INSTPM_SYNC_FLUSH) == 0,
|
||||
1000))
|
||||
DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
|
||||
ring->name);
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
bsd_ring_flush(struct intel_engine_cs *ring,
|
||||
u32 invalidate_domains,
|
||||
@ -2612,19 +2691,13 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize the second BSD ring for Broadwell GT3.
|
||||
* It is noted that this only exists on Broadwell GT3.
|
||||
* Initialize the second BSD ring (eg. Broadwell GT3, Skylake GT3)
|
||||
*/
|
||||
int intel_init_bsd2_ring_buffer(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_engine_cs *ring = &dev_priv->ring[VCS2];
|
||||
|
||||
if ((INTEL_INFO(dev)->gen != 8)) {
|
||||
DRM_ERROR("No dual-BSD ring on non-BDW machine\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ring->name = "bsd2 ring";
|
||||
ring->id = VCS2;
|
||||
|
||||
|
@ -425,7 +425,6 @@ int intel_init_blt_ring_buffer(struct drm_device *dev);
|
||||
int intel_init_vebox_ring_buffer(struct drm_device *dev);
|
||||
|
||||
u64 intel_ring_get_active_head(struct intel_engine_cs *ring);
|
||||
void intel_ring_setup_status_page(struct intel_engine_cs *ring);
|
||||
|
||||
int init_workarounds_ring(struct intel_engine_cs *ring);
|
||||
|
||||
|
@ -230,6 +230,136 @@ static void hsw_set_power_well(struct drm_i915_private *dev_priv,
|
||||
}
|
||||
}
|
||||
|
||||
#define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
|
||||
BIT(POWER_DOMAIN_TRANSCODER_A) | \
|
||||
BIT(POWER_DOMAIN_PIPE_B) | \
|
||||
BIT(POWER_DOMAIN_TRANSCODER_B) | \
|
||||
BIT(POWER_DOMAIN_PIPE_C) | \
|
||||
BIT(POWER_DOMAIN_TRANSCODER_C) | \
|
||||
BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
|
||||
BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
|
||||
BIT(POWER_DOMAIN_AUX_B) | \
|
||||
BIT(POWER_DOMAIN_AUX_C) | \
|
||||
BIT(POWER_DOMAIN_AUX_D) | \
|
||||
BIT(POWER_DOMAIN_AUDIO) | \
|
||||
BIT(POWER_DOMAIN_VGA) | \
|
||||
BIT(POWER_DOMAIN_INIT))
|
||||
#define SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS ( \
|
||||
SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
|
||||
BIT(POWER_DOMAIN_PLLS) | \
|
||||
BIT(POWER_DOMAIN_PIPE_A) | \
|
||||
BIT(POWER_DOMAIN_TRANSCODER_EDP) | \
|
||||
BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \
|
||||
BIT(POWER_DOMAIN_AUX_A) | \
|
||||
BIT(POWER_DOMAIN_INIT))
|
||||
#define SKL_DISPLAY_DDI_A_E_POWER_DOMAINS ( \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \
|
||||
BIT(POWER_DOMAIN_INIT))
|
||||
#define SKL_DISPLAY_DDI_B_POWER_DOMAINS ( \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
|
||||
BIT(POWER_DOMAIN_INIT))
|
||||
#define SKL_DISPLAY_DDI_C_POWER_DOMAINS ( \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
|
||||
BIT(POWER_DOMAIN_INIT))
|
||||
#define SKL_DISPLAY_DDI_D_POWER_DOMAINS ( \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
|
||||
BIT(POWER_DOMAIN_INIT))
|
||||
#define SKL_DISPLAY_MISC_IO_POWER_DOMAINS ( \
|
||||
SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS)
|
||||
#define SKL_DISPLAY_ALWAYS_ON_POWER_DOMAINS ( \
|
||||
(POWER_DOMAIN_MASK & ~(SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS | \
|
||||
SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
|
||||
SKL_DISPLAY_DDI_A_E_POWER_DOMAINS | \
|
||||
SKL_DISPLAY_DDI_B_POWER_DOMAINS | \
|
||||
SKL_DISPLAY_DDI_C_POWER_DOMAINS | \
|
||||
SKL_DISPLAY_DDI_D_POWER_DOMAINS | \
|
||||
SKL_DISPLAY_MISC_IO_POWER_DOMAINS)) | \
|
||||
BIT(POWER_DOMAIN_INIT))
|
||||
|
||||
static void skl_set_power_well(struct drm_i915_private *dev_priv,
|
||||
struct i915_power_well *power_well, bool enable)
|
||||
{
|
||||
uint32_t tmp, fuse_status;
|
||||
uint32_t req_mask, state_mask;
|
||||
bool check_fuse_status = false;
|
||||
|
||||
tmp = I915_READ(HSW_PWR_WELL_DRIVER);
|
||||
fuse_status = I915_READ(SKL_FUSE_STATUS);
|
||||
|
||||
switch (power_well->data) {
|
||||
case SKL_DISP_PW_1:
|
||||
if (wait_for((I915_READ(SKL_FUSE_STATUS) &
|
||||
SKL_FUSE_PG0_DIST_STATUS), 1)) {
|
||||
DRM_ERROR("PG0 not enabled\n");
|
||||
return;
|
||||
}
|
||||
break;
|
||||
case SKL_DISP_PW_2:
|
||||
if (!(fuse_status & SKL_FUSE_PG1_DIST_STATUS)) {
|
||||
DRM_ERROR("PG1 in disabled state\n");
|
||||
return;
|
||||
}
|
||||
break;
|
||||
case SKL_DISP_PW_DDI_A_E:
|
||||
case SKL_DISP_PW_DDI_B:
|
||||
case SKL_DISP_PW_DDI_C:
|
||||
case SKL_DISP_PW_DDI_D:
|
||||
case SKL_DISP_PW_MISC_IO:
|
||||
break;
|
||||
default:
|
||||
WARN(1, "Unknown power well %lu\n", power_well->data);
|
||||
return;
|
||||
}
|
||||
|
||||
req_mask = SKL_POWER_WELL_REQ(power_well->data);
|
||||
state_mask = SKL_POWER_WELL_STATE(power_well->data);
|
||||
|
||||
if (enable) {
|
||||
if (!(tmp & req_mask)) {
|
||||
I915_WRITE(HSW_PWR_WELL_DRIVER, tmp | req_mask);
|
||||
DRM_DEBUG_KMS("Enabling %s\n", power_well->name);
|
||||
}
|
||||
|
||||
if (!(tmp & state_mask)) {
|
||||
if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) &
|
||||
state_mask), 1))
|
||||
DRM_ERROR("%s enable timeout\n",
|
||||
power_well->name);
|
||||
check_fuse_status = true;
|
||||
}
|
||||
} else {
|
||||
if (tmp & req_mask) {
|
||||
I915_WRITE(HSW_PWR_WELL_DRIVER, tmp & ~req_mask);
|
||||
POSTING_READ(HSW_PWR_WELL_DRIVER);
|
||||
DRM_DEBUG_KMS("Disabling %s\n", power_well->name);
|
||||
}
|
||||
}
|
||||
|
||||
if (check_fuse_status) {
|
||||
if (power_well->data == SKL_DISP_PW_1) {
|
||||
if (wait_for((I915_READ(SKL_FUSE_STATUS) &
|
||||
SKL_FUSE_PG1_DIST_STATUS), 1))
|
||||
DRM_ERROR("PG1 distributing status timeout\n");
|
||||
} else if (power_well->data == SKL_DISP_PW_2) {
|
||||
if (wait_for((I915_READ(SKL_FUSE_STATUS) &
|
||||
SKL_FUSE_PG2_DIST_STATUS), 1))
|
||||
DRM_ERROR("PG2 distributing status timeout\n");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
|
||||
struct i915_power_well *power_well)
|
||||
{
|
||||
@ -255,6 +385,36 @@ static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
|
||||
hsw_set_power_well(dev_priv, power_well, false);
|
||||
}
|
||||
|
||||
static bool skl_power_well_enabled(struct drm_i915_private *dev_priv,
|
||||
struct i915_power_well *power_well)
|
||||
{
|
||||
uint32_t mask = SKL_POWER_WELL_REQ(power_well->data) |
|
||||
SKL_POWER_WELL_STATE(power_well->data);
|
||||
|
||||
return (I915_READ(HSW_PWR_WELL_DRIVER) & mask) == mask;
|
||||
}
|
||||
|
||||
static void skl_power_well_sync_hw(struct drm_i915_private *dev_priv,
|
||||
struct i915_power_well *power_well)
|
||||
{
|
||||
skl_set_power_well(dev_priv, power_well, power_well->count > 0);
|
||||
|
||||
/* Clear any request made by BIOS as driver is taking over */
|
||||
I915_WRITE(HSW_PWR_WELL_BIOS, 0);
|
||||
}
|
||||
|
||||
static void skl_power_well_enable(struct drm_i915_private *dev_priv,
|
||||
struct i915_power_well *power_well)
|
||||
{
|
||||
skl_set_power_well(dev_priv, power_well, true);
|
||||
}
|
||||
|
||||
static void skl_power_well_disable(struct drm_i915_private *dev_priv,
|
||||
struct i915_power_well *power_well)
|
||||
{
|
||||
skl_set_power_well(dev_priv, power_well, false);
|
||||
}
|
||||
|
||||
static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
|
||||
struct i915_power_well *power_well)
|
||||
{
|
||||
@ -829,6 +989,13 @@ static const struct i915_power_well_ops hsw_power_well_ops = {
|
||||
.is_enabled = hsw_power_well_enabled,
|
||||
};
|
||||
|
||||
static const struct i915_power_well_ops skl_power_well_ops = {
|
||||
.sync_hw = skl_power_well_sync_hw,
|
||||
.enable = skl_power_well_enable,
|
||||
.disable = skl_power_well_disable,
|
||||
.is_enabled = skl_power_well_enabled,
|
||||
};
|
||||
|
||||
static struct i915_power_well hsw_power_wells[] = {
|
||||
{
|
||||
.name = "always-on",
|
||||
@ -1059,6 +1226,57 @@ static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_pr
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct i915_power_well skl_power_wells[] = {
|
||||
{
|
||||
.name = "always-on",
|
||||
.always_on = 1,
|
||||
.domains = SKL_DISPLAY_ALWAYS_ON_POWER_DOMAINS,
|
||||
.ops = &i9xx_always_on_power_well_ops,
|
||||
},
|
||||
{
|
||||
.name = "power well 1",
|
||||
.domains = SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS,
|
||||
.ops = &skl_power_well_ops,
|
||||
.data = SKL_DISP_PW_1,
|
||||
},
|
||||
{
|
||||
.name = "MISC IO power well",
|
||||
.domains = SKL_DISPLAY_MISC_IO_POWER_DOMAINS,
|
||||
.ops = &skl_power_well_ops,
|
||||
.data = SKL_DISP_PW_MISC_IO,
|
||||
},
|
||||
{
|
||||
.name = "power well 2",
|
||||
.domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
|
||||
.ops = &skl_power_well_ops,
|
||||
.data = SKL_DISP_PW_2,
|
||||
},
|
||||
{
|
||||
.name = "DDI A/E power well",
|
||||
.domains = SKL_DISPLAY_DDI_A_E_POWER_DOMAINS,
|
||||
.ops = &skl_power_well_ops,
|
||||
.data = SKL_DISP_PW_DDI_A_E,
|
||||
},
|
||||
{
|
||||
.name = "DDI B power well",
|
||||
.domains = SKL_DISPLAY_DDI_B_POWER_DOMAINS,
|
||||
.ops = &skl_power_well_ops,
|
||||
.data = SKL_DISP_PW_DDI_B,
|
||||
},
|
||||
{
|
||||
.name = "DDI C power well",
|
||||
.domains = SKL_DISPLAY_DDI_C_POWER_DOMAINS,
|
||||
.ops = &skl_power_well_ops,
|
||||
.data = SKL_DISP_PW_DDI_C,
|
||||
},
|
||||
{
|
||||
.name = "DDI D power well",
|
||||
.domains = SKL_DISPLAY_DDI_D_POWER_DOMAINS,
|
||||
.ops = &skl_power_well_ops,
|
||||
.data = SKL_DISP_PW_DDI_D,
|
||||
},
|
||||
};
|
||||
|
||||
#define set_power_wells(power_domains, __power_wells) ({ \
|
||||
(power_domains)->power_wells = (__power_wells); \
|
||||
(power_domains)->power_well_count = ARRAY_SIZE(__power_wells); \
|
||||
@ -1085,6 +1303,8 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
|
||||
set_power_wells(power_domains, hsw_power_wells);
|
||||
} else if (IS_BROADWELL(dev_priv->dev)) {
|
||||
set_power_wells(power_domains, bdw_power_wells);
|
||||
} else if (IS_SKYLAKE(dev_priv->dev)) {
|
||||
set_power_wells(power_domains, skl_power_wells);
|
||||
} else if (IS_CHERRYVIEW(dev_priv->dev)) {
|
||||
set_power_wells(power_domains, chv_power_wells);
|
||||
} else if (IS_VALLEYVIEW(dev_priv->dev)) {
|
||||
|
@ -245,11 +245,11 @@ skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
|
||||
BUG();
|
||||
}
|
||||
|
||||
switch (obj->tiling_mode) {
|
||||
case I915_TILING_NONE:
|
||||
switch (fb->modifier[0]) {
|
||||
case DRM_FORMAT_MOD_NONE:
|
||||
stride = fb->pitches[0] >> 6;
|
||||
break;
|
||||
case I915_TILING_X:
|
||||
case I915_FORMAT_MOD_X_TILED:
|
||||
plane_ctl |= PLANE_CTL_TILED_X;
|
||||
stride = fb->pitches[0] >> 9;
|
||||
break;
|
||||
@ -993,7 +993,7 @@ intel_pre_disable_primary(struct drm_crtc *crtc)
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
if (dev_priv->fbc.plane == intel_crtc->plane)
|
||||
if (dev_priv->fbc.crtc == intel_crtc)
|
||||
intel_fbc_disable(dev);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
@ -1076,7 +1076,6 @@ intel_check_sprite_plane(struct drm_plane *plane,
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(state->base.crtc);
|
||||
struct intel_plane *intel_plane = to_intel_plane(plane);
|
||||
struct drm_framebuffer *fb = state->base.fb;
|
||||
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
|
||||
int crtc_x, crtc_y;
|
||||
unsigned int crtc_w, crtc_h;
|
||||
uint32_t src_x, src_y, src_w, src_h;
|
||||
@ -1106,16 +1105,6 @@ intel_check_sprite_plane(struct drm_plane *plane,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Sprite planes can be linear or x-tiled surfaces */
|
||||
switch (obj->tiling_mode) {
|
||||
case I915_TILING_NONE:
|
||||
case I915_TILING_X:
|
||||
break;
|
||||
default:
|
||||
DRM_DEBUG_KMS("Unsupported tiling mode\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* FIXME the following code does a bunch of fuzzy adjustments to the
|
||||
* coordinates and sizes. We probably need some way to decide whether
|
||||
|
@ -23,6 +23,7 @@
|
||||
|
||||
#include "i915_drv.h"
|
||||
#include "intel_drv.h"
|
||||
#include "i915_vgpu.h"
|
||||
|
||||
#include <linux/pm_runtime.h>
|
||||
|
||||
@ -328,8 +329,9 @@ static void intel_uncore_ellc_detect(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
if ((IS_HASWELL(dev) || IS_BROADWELL(dev)) &&
|
||||
(__raw_i915_read32(dev_priv, HSW_EDRAM_PRESENT) == 1)) {
|
||||
if ((IS_HASWELL(dev) || IS_BROADWELL(dev) ||
|
||||
INTEL_INFO(dev)->gen >= 9) &&
|
||||
(__raw_i915_read32(dev_priv, HSW_EDRAM_PRESENT) & EDRAM_ENABLED)) {
|
||||
/* The docs do not explain exactly how the calculation can be
|
||||
* made. It is somewhat guessable, but for now, it's always
|
||||
* 128MB.
|
||||
@ -640,6 +642,14 @@ static inline void __force_wake_get(struct drm_i915_private *dev_priv,
|
||||
dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
|
||||
}
|
||||
|
||||
#define __vgpu_read(x) \
|
||||
static u##x \
|
||||
vgpu_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
|
||||
GEN6_READ_HEADER(x); \
|
||||
val = __raw_i915_read##x(dev_priv, reg); \
|
||||
GEN6_READ_FOOTER; \
|
||||
}
|
||||
|
||||
#define __gen6_read(x) \
|
||||
static u##x \
|
||||
gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
|
||||
@ -703,6 +713,10 @@ gen9_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
|
||||
GEN6_READ_FOOTER; \
|
||||
}
|
||||
|
||||
__vgpu_read(8)
|
||||
__vgpu_read(16)
|
||||
__vgpu_read(32)
|
||||
__vgpu_read(64)
|
||||
__gen9_read(8)
|
||||
__gen9_read(16)
|
||||
__gen9_read(32)
|
||||
@ -724,6 +738,7 @@ __gen6_read(64)
|
||||
#undef __chv_read
|
||||
#undef __vlv_read
|
||||
#undef __gen6_read
|
||||
#undef __vgpu_read
|
||||
#undef GEN6_READ_FOOTER
|
||||
#undef GEN6_READ_HEADER
|
||||
|
||||
@ -807,6 +822,14 @@ hsw_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace)
|
||||
GEN6_WRITE_FOOTER; \
|
||||
}
|
||||
|
||||
#define __vgpu_write(x) \
|
||||
static void vgpu_write##x(struct drm_i915_private *dev_priv, \
|
||||
off_t reg, u##x val, bool trace) { \
|
||||
GEN6_WRITE_HEADER; \
|
||||
__raw_i915_write##x(dev_priv, reg, val); \
|
||||
GEN6_WRITE_FOOTER; \
|
||||
}
|
||||
|
||||
static const u32 gen8_shadowed_regs[] = {
|
||||
FORCEWAKE_MT,
|
||||
GEN6_RPNSWREQ,
|
||||
@ -924,12 +947,17 @@ __gen6_write(8)
|
||||
__gen6_write(16)
|
||||
__gen6_write(32)
|
||||
__gen6_write(64)
|
||||
__vgpu_write(8)
|
||||
__vgpu_write(16)
|
||||
__vgpu_write(32)
|
||||
__vgpu_write(64)
|
||||
|
||||
#undef __gen9_write
|
||||
#undef __chv_write
|
||||
#undef __gen8_write
|
||||
#undef __hsw_write
|
||||
#undef __gen6_write
|
||||
#undef __vgpu_write
|
||||
#undef GEN6_WRITE_FOOTER
|
||||
#undef GEN6_WRITE_HEADER
|
||||
|
||||
@ -972,6 +1000,7 @@ static void fw_domain_init(struct drm_i915_private *dev_priv,
|
||||
d->val_set = FORCEWAKE_KERNEL;
|
||||
d->val_clear = 0;
|
||||
} else {
|
||||
/* WaRsClearFWBitsAtReset:bdw,skl */
|
||||
d->val_reset = _MASKED_BIT_DISABLE(0xffff);
|
||||
d->val_set = _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL);
|
||||
d->val_clear = _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL);
|
||||
@ -1082,6 +1111,8 @@ void intel_uncore_init(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
i915_check_vgpu(dev);
|
||||
|
||||
intel_uncore_ellc_detect(dev);
|
||||
intel_uncore_fw_domains_init(dev);
|
||||
__intel_uncore_early_sanitize(dev, false);
|
||||
@ -1130,6 +1161,11 @@ void intel_uncore_init(struct drm_device *dev)
|
||||
break;
|
||||
}
|
||||
|
||||
if (intel_vgpu_active(dev)) {
|
||||
ASSIGN_WRITE_MMIO_VFUNCS(vgpu);
|
||||
ASSIGN_READ_MMIO_VFUNCS(vgpu);
|
||||
}
|
||||
|
||||
i915_check_and_clear_faults(dev);
|
||||
}
|
||||
#undef ASSIGN_WRITE_MMIO_VFUNCS
|
||||
|
@ -202,6 +202,7 @@ struct drm_framebuffer {
|
||||
const struct drm_framebuffer_funcs *funcs;
|
||||
unsigned int pitches[4];
|
||||
unsigned int offsets[4];
|
||||
uint64_t modifier[4];
|
||||
unsigned int width;
|
||||
unsigned int height;
|
||||
/* depth can be 15 or 16 */
|
||||
@ -1155,6 +1156,9 @@ struct drm_mode_config {
|
||||
/* whether async page flip is supported or not */
|
||||
bool async_page_flip;
|
||||
|
||||
/* whether the driver supports fb modifiers */
|
||||
bool allow_fb_modifiers;
|
||||
|
||||
/* cursor size */
|
||||
uint32_t cursor_width, cursor_height;
|
||||
};
|
||||
|
@ -259,21 +259,31 @@
|
||||
INTEL_VGA_DEVICE(0x22b2, info), \
|
||||
INTEL_VGA_DEVICE(0x22b3, info)
|
||||
|
||||
#define INTEL_SKL_IDS(info) \
|
||||
INTEL_VGA_DEVICE(0x1916, info), /* ULT GT2 */ \
|
||||
#define INTEL_SKL_GT1_IDS(info) \
|
||||
INTEL_VGA_DEVICE(0x1906, info), /* ULT GT1 */ \
|
||||
INTEL_VGA_DEVICE(0x1926, info), /* ULT GT3 */ \
|
||||
INTEL_VGA_DEVICE(0x1921, info), /* ULT GT2F */ \
|
||||
INTEL_VGA_DEVICE(0x190E, info), /* ULX GT1 */ \
|
||||
INTEL_VGA_DEVICE(0x1902, info), /* DT GT1 */ \
|
||||
INTEL_VGA_DEVICE(0x190B, info), /* Halo GT1 */ \
|
||||
INTEL_VGA_DEVICE(0x190A, info) /* SRV GT1 */
|
||||
|
||||
#define INTEL_SKL_GT2_IDS(info) \
|
||||
INTEL_VGA_DEVICE(0x1916, info), /* ULT GT2 */ \
|
||||
INTEL_VGA_DEVICE(0x1921, info), /* ULT GT2F */ \
|
||||
INTEL_VGA_DEVICE(0x191E, info), /* ULX GT2 */ \
|
||||
INTEL_VGA_DEVICE(0x1912, info), /* DT GT2 */ \
|
||||
INTEL_VGA_DEVICE(0x1902, info), /* DT GT1 */ \
|
||||
INTEL_VGA_DEVICE(0x191B, info), /* Halo GT2 */ \
|
||||
INTEL_VGA_DEVICE(0x192B, info), /* Halo GT3 */ \
|
||||
INTEL_VGA_DEVICE(0x190B, info), /* Halo GT1 */ \
|
||||
INTEL_VGA_DEVICE(0x191A, info), /* SRV GT2 */ \
|
||||
INTEL_VGA_DEVICE(0x192A, info), /* SRV GT3 */ \
|
||||
INTEL_VGA_DEVICE(0x190A, info), /* SRV GT1 */ \
|
||||
INTEL_VGA_DEVICE(0x191D, info) /* WKS GT2 */
|
||||
|
||||
#define INTEL_SKL_GT3_IDS(info) \
|
||||
INTEL_VGA_DEVICE(0x1926, info), /* ULT GT3 */ \
|
||||
INTEL_VGA_DEVICE(0x192B, info), /* Halo GT3 */ \
|
||||
INTEL_VGA_DEVICE(0x192A, info) /* SRV GT3 */ \
|
||||
|
||||
#define INTEL_SKL_IDS(info) \
|
||||
INTEL_SKL_GT1_IDS(info), \
|
||||
INTEL_SKL_GT2_IDS(info), \
|
||||
INTEL_SKL_GT3_IDS(info)
|
||||
|
||||
|
||||
#endif /* _I915_PCIIDS_H */
|
||||
|
@ -630,6 +630,7 @@ struct drm_gem_open {
|
||||
*/
|
||||
#define DRM_CAP_CURSOR_WIDTH 0x8
|
||||
#define DRM_CAP_CURSOR_HEIGHT 0x9
|
||||
#define DRM_CAP_ADDFB2_MODIFIERS 0x10
|
||||
|
||||
/** DRM_IOCTL_GET_CAP ioctl argument type */
|
||||
struct drm_get_cap {
|
||||
|
@ -129,4 +129,67 @@
|
||||
#define DRM_FORMAT_YUV444 fourcc_code('Y', 'U', '2', '4') /* non-subsampled Cb (1) and Cr (2) planes */
|
||||
#define DRM_FORMAT_YVU444 fourcc_code('Y', 'V', '2', '4') /* non-subsampled Cr (1) and Cb (2) planes */
|
||||
|
||||
|
||||
/*
|
||||
* Format Modifiers:
|
||||
*
|
||||
* Format modifiers describe, typically, a re-ordering or modification
|
||||
* of the data in a plane of an FB. This can be used to express tiled/
|
||||
* swizzled formats, or compression, or a combination of the two.
|
||||
*
|
||||
* The upper 8 bits of the format modifier are a vendor-id as assigned
|
||||
* below. The lower 56 bits are assigned as vendor sees fit.
|
||||
*/
|
||||
|
||||
/* Vendor Ids: */
|
||||
#define DRM_FORMAT_MOD_NONE 0
|
||||
#define DRM_FORMAT_MOD_VENDOR_INTEL 0x01
|
||||
#define DRM_FORMAT_MOD_VENDOR_AMD 0x02
|
||||
#define DRM_FORMAT_MOD_VENDOR_NV 0x03
|
||||
#define DRM_FORMAT_MOD_VENDOR_SAMSUNG 0x04
|
||||
#define DRM_FORMAT_MOD_VENDOR_QCOM 0x05
|
||||
/* add more to the end as needed */
|
||||
|
||||
#define fourcc_mod_code(vendor, val) \
|
||||
((((u64)DRM_FORMAT_MOD_VENDOR_## vendor) << 56) | (val & 0x00ffffffffffffffL))
|
||||
|
||||
/*
|
||||
* Format Modifier tokens:
|
||||
*
|
||||
* When adding a new token please document the layout with a code comment,
|
||||
* similar to the fourcc codes above. drm_fourcc.h is considered the
|
||||
* authoritative source for all of these.
|
||||
*/
|
||||
|
||||
/* Intel framebuffer modifiers */
|
||||
|
||||
/*
|
||||
* Intel X-tiling layout
|
||||
*
|
||||
* This is a tiled layout using 4Kb tiles (except on gen2 where the tiles 2Kb)
|
||||
* in row-major layout. Within the tile bytes are laid out row-major, with
|
||||
* a platform-dependent stride. On top of that the memory can apply
|
||||
* platform-depending swizzling of some higher address bits into bit6.
|
||||
*
|
||||
* This format is highly platforms specific and not useful for cross-driver
|
||||
* sharing. It exists since on a given platform it does uniquely identify the
|
||||
* layout in a simple way for i915-specific userspace.
|
||||
*/
|
||||
#define I915_FORMAT_MOD_X_TILED fourcc_mod_code(INTEL, 1)
|
||||
|
||||
/*
|
||||
* Intel Y-tiling layout
|
||||
*
|
||||
* This is a tiled layout using 4Kb tiles (except on gen2 where the tiles 2Kb)
|
||||
* in row-major layout. Within the tile bytes are laid out in OWORD (16 bytes)
|
||||
* chunks column-major, with a platform-dependent height. On top of that the
|
||||
* memory can apply platform-depending swizzling of some higher address bits
|
||||
* into bit6.
|
||||
*
|
||||
* This format is highly platforms specific and not useful for cross-driver
|
||||
* sharing. It exists since on a given platform it does uniquely identify the
|
||||
* layout in a simple way for i915-specific userspace.
|
||||
*/
|
||||
#define I915_FORMAT_MOD_Y_TILED fourcc_mod_code(INTEL, 2)
|
||||
|
||||
#endif /* DRM_FOURCC_H */
|
||||
|
@ -336,6 +336,7 @@ struct drm_mode_fb_cmd {
|
||||
};
|
||||
|
||||
#define DRM_MODE_FB_INTERLACED (1<<0) /* for interlaced framebuffers */
|
||||
#define DRM_MODE_FB_MODIFIERS (1<<1) /* enables ->modifer[] */
|
||||
|
||||
struct drm_mode_fb_cmd2 {
|
||||
__u32 fb_id;
|
||||
@ -356,10 +357,18 @@ struct drm_mode_fb_cmd2 {
|
||||
* So it would consist of Y as offsets[0] and UV as
|
||||
* offsets[1]. Note that offsets[0] will generally
|
||||
* be 0 (but this is not required).
|
||||
*
|
||||
* To accommodate tiled, compressed, etc formats, a per-plane
|
||||
* modifier can be specified. The default value of zero
|
||||
* indicates "native" format as specified by the fourcc.
|
||||
* Vendor specific modifier token. This allows, for example,
|
||||
* different tiling/swizzling pattern on different planes.
|
||||
* See discussion above of DRM_FORMAT_MOD_xxx.
|
||||
*/
|
||||
__u32 handles[4];
|
||||
__u32 pitches[4]; /* pitch for each plane */
|
||||
__u32 offsets[4]; /* offset of each plane */
|
||||
__u64 modifier[4]; /* ie, tiling, compressed (per plane) */
|
||||
};
|
||||
|
||||
#define DRM_MODE_FB_DIRTY_ANNOTATE_COPY 0x01
|
||||
|
Loading…
Reference in New Issue
Block a user