forked from Minki/linux
Merge branch 'for-airlied' of git://people.freedesktop.org/~danvet/drm-intel into drm-next
Daniel writes: " First -next pull for 3.7. Highlights: - hsw hdmi improvements (Paulo) - ips/rps locking rework and cleanups - rc6 on ilk by default again - hw context&dp&dpff support for hsw (Ben) - GET_PARAM_HAS_SEMAPHORES (Chris) - gen6+ pipe_control improvements (Chris) - set_cacheing ioctl and assorted support code (Chris) - cleanups around the busy/idle/pm code (Chris&me) - flushing_list removal, hopefully for good (Chris) - read_reg ioctl (Ben) - support the ns2501 dvo (Thomas Richter) - avoid the costly gen6+ "missed IRQ" workaround where we don't need a race-free seqno readback (Chris) - various bits&pieces, mostly early patches from the modeset rework branch" * 'for-airlied' of git://people.freedesktop.org/~danvet/drm-intel: (54 commits) drm/i915: don't grab dev->struct_mutex for userspace forcewak drm/i915: try harder to find WR PLL clock settings drm/i915: use the correct encoder type when comparing drm/i915: Lazily apply the SNB+ seqno w/a drm/i915: enable rc6 on ilk again drm/i915: fix up ilk drps/ips locking drm/i915: DE_PCU_EVENT irq is ilk-only drm/i915: kill dev_priv->mchdev_lock drm/i915: move all rps state into dev_priv->rps drm/i915: use mutex_lock_interruptible for debugfs files drm/i915: fixup up debugfs rps state handling drm/i915: properly guard ilk ips state drm/i915: add parentheses around PIXCLK_GATE definitions drm/i915: reindent Haswell register definitions drm/i915: completely reset the value of DDI_FUNC_CTL drm/i915: correctly set the DDI_FUNC_CTL bpc field drm/i915: set the DDI sync polarity bits drm/i915: fix pipe DDI mode select drm/i915: dump the device info drm/i915: fixup desired rps frequency computation ...
This commit is contained in:
commit
269b62db0e
@ -40,6 +40,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o \
|
||||
dvo_ivch.o \
|
||||
dvo_tfp410.o \
|
||||
dvo_sil164.o \
|
||||
dvo_ns2501.o \
|
||||
i915_gem_dmabuf.o
|
||||
|
||||
i915-$(CONFIG_COMPAT) += i915_ioc32.o
|
||||
|
@ -140,5 +140,6 @@ extern struct intel_dvo_dev_ops ch7xxx_ops;
|
||||
extern struct intel_dvo_dev_ops ivch_ops;
|
||||
extern struct intel_dvo_dev_ops tfp410_ops;
|
||||
extern struct intel_dvo_dev_ops ch7017_ops;
|
||||
extern struct intel_dvo_dev_ops ns2501_ops;
|
||||
|
||||
#endif /* _INTEL_DVO_H */
|
||||
|
582
drivers/gpu/drm/i915/dvo_ns2501.c
Normal file
582
drivers/gpu/drm/i915/dvo_ns2501.c
Normal file
@ -0,0 +1,582 @@
|
||||
/*
|
||||
*
|
||||
* Copyright (c) 2012 Gilles Dartiguelongue, Thomas Richter
|
||||
*
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
||||
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
|
||||
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
|
||||
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
|
||||
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "dvo.h"
|
||||
#include "i915_reg.h"
|
||||
#include "i915_drv.h"
|
||||
|
||||
#define NS2501_VID 0x1305
|
||||
#define NS2501_DID 0x6726
|
||||
|
||||
#define NS2501_VID_LO 0x00
|
||||
#define NS2501_VID_HI 0x01
|
||||
#define NS2501_DID_LO 0x02
|
||||
#define NS2501_DID_HI 0x03
|
||||
#define NS2501_REV 0x04
|
||||
#define NS2501_RSVD 0x05
|
||||
#define NS2501_FREQ_LO 0x06
|
||||
#define NS2501_FREQ_HI 0x07
|
||||
|
||||
#define NS2501_REG8 0x08
|
||||
#define NS2501_8_VEN (1<<5)
|
||||
#define NS2501_8_HEN (1<<4)
|
||||
#define NS2501_8_DSEL (1<<3)
|
||||
#define NS2501_8_BPAS (1<<2)
|
||||
#define NS2501_8_RSVD (1<<1)
|
||||
#define NS2501_8_PD (1<<0)
|
||||
|
||||
#define NS2501_REG9 0x09
|
||||
#define NS2501_9_VLOW (1<<7)
|
||||
#define NS2501_9_MSEL_MASK (0x7<<4)
|
||||
#define NS2501_9_TSEL (1<<3)
|
||||
#define NS2501_9_RSEN (1<<2)
|
||||
#define NS2501_9_RSVD (1<<1)
|
||||
#define NS2501_9_MDI (1<<0)
|
||||
|
||||
#define NS2501_REGC 0x0c
|
||||
|
||||
struct ns2501_priv {
|
||||
//I2CDevRec d;
|
||||
bool quiet;
|
||||
int reg_8_shadow;
|
||||
int reg_8_set;
|
||||
// Shadow registers for i915
|
||||
int dvoc;
|
||||
int pll_a;
|
||||
int srcdim;
|
||||
int fw_blc;
|
||||
};
|
||||
|
||||
#define NSPTR(d) ((NS2501Ptr)(d->DriverPrivate.ptr))
|
||||
|
||||
/*
|
||||
* Include the PLL launcher prototype
|
||||
*/
|
||||
extern void intel_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe);
|
||||
|
||||
/*
|
||||
* For reasons unclear to me, the ns2501 at least on the Fujitsu/Siemens
|
||||
* laptops does not react on the i2c bus unless
|
||||
* both the PLL is running and the display is configured in its native
|
||||
* resolution.
|
||||
* This function forces the DVO on, and stores the registers it touches.
|
||||
* Afterwards, registers are restored to regular values.
|
||||
*
|
||||
* This is pretty much a hack, though it works.
|
||||
* Without that, ns2501_readb and ns2501_writeb fail
|
||||
* when switching the resolution.
|
||||
*/
|
||||
|
||||
static void enable_dvo(struct intel_dvo_device *dvo)
|
||||
{
|
||||
struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
|
||||
struct i2c_adapter *adapter = dvo->i2c_bus;
|
||||
struct intel_gmbus *bus = container_of(adapter,
|
||||
struct intel_gmbus,
|
||||
adapter);
|
||||
struct drm_i915_private *dev_priv = bus->dev_priv;
|
||||
|
||||
DRM_DEBUG_KMS("%s: Trying to re-enable the DVO\n", __FUNCTION__);
|
||||
|
||||
ns->dvoc = I915_READ(DVO_C);
|
||||
ns->pll_a = I915_READ(_DPLL_A);
|
||||
ns->srcdim = I915_READ(DVOC_SRCDIM);
|
||||
ns->fw_blc = I915_READ(FW_BLC);
|
||||
|
||||
I915_WRITE(DVOC, 0x10004084);
|
||||
I915_WRITE(_DPLL_A, 0xd0820000);
|
||||
I915_WRITE(DVOC_SRCDIM, 0x400300); // 1024x768
|
||||
I915_WRITE(FW_BLC, 0x1080304);
|
||||
|
||||
intel_enable_pll(dev_priv, 0);
|
||||
|
||||
I915_WRITE(DVOC, 0x90004084);
|
||||
}
|
||||
|
||||
/*
|
||||
* Restore the I915 registers modified by the above
|
||||
* trigger function.
|
||||
*/
|
||||
static void restore_dvo(struct intel_dvo_device *dvo)
|
||||
{
|
||||
struct i2c_adapter *adapter = dvo->i2c_bus;
|
||||
struct intel_gmbus *bus = container_of(adapter,
|
||||
struct intel_gmbus,
|
||||
adapter);
|
||||
struct drm_i915_private *dev_priv = bus->dev_priv;
|
||||
struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
|
||||
|
||||
I915_WRITE(DVOC, ns->dvoc);
|
||||
I915_WRITE(_DPLL_A, ns->pll_a);
|
||||
I915_WRITE(DVOC_SRCDIM, ns->srcdim);
|
||||
I915_WRITE(FW_BLC, ns->fw_blc);
|
||||
}
|
||||
|
||||
/*
|
||||
** Read a register from the ns2501.
|
||||
** Returns true if successful, false otherwise.
|
||||
** If it returns false, it might be wise to enable the
|
||||
** DVO with the above function.
|
||||
*/
|
||||
static bool ns2501_readb(struct intel_dvo_device *dvo, int addr, uint8_t * ch)
|
||||
{
|
||||
struct ns2501_priv *ns = dvo->dev_priv;
|
||||
struct i2c_adapter *adapter = dvo->i2c_bus;
|
||||
u8 out_buf[2];
|
||||
u8 in_buf[2];
|
||||
|
||||
struct i2c_msg msgs[] = {
|
||||
{
|
||||
.addr = dvo->slave_addr,
|
||||
.flags = 0,
|
||||
.len = 1,
|
||||
.buf = out_buf,
|
||||
},
|
||||
{
|
||||
.addr = dvo->slave_addr,
|
||||
.flags = I2C_M_RD,
|
||||
.len = 1,
|
||||
.buf = in_buf,
|
||||
}
|
||||
};
|
||||
|
||||
out_buf[0] = addr;
|
||||
out_buf[1] = 0;
|
||||
|
||||
if (i2c_transfer(adapter, msgs, 2) == 2) {
|
||||
*ch = in_buf[0];
|
||||
return true;
|
||||
};
|
||||
|
||||
if (!ns->quiet) {
|
||||
DRM_DEBUG_KMS
|
||||
("Unable to read register 0x%02x from %s:0x%02x.\n", addr,
|
||||
adapter->name, dvo->slave_addr);
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
** Write a register to the ns2501.
|
||||
** Returns true if successful, false otherwise.
|
||||
** If it returns false, it might be wise to enable the
|
||||
** DVO with the above function.
|
||||
*/
|
||||
static bool ns2501_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
|
||||
{
|
||||
struct ns2501_priv *ns = dvo->dev_priv;
|
||||
struct i2c_adapter *adapter = dvo->i2c_bus;
|
||||
uint8_t out_buf[2];
|
||||
|
||||
struct i2c_msg msg = {
|
||||
.addr = dvo->slave_addr,
|
||||
.flags = 0,
|
||||
.len = 2,
|
||||
.buf = out_buf,
|
||||
};
|
||||
|
||||
out_buf[0] = addr;
|
||||
out_buf[1] = ch;
|
||||
|
||||
if (i2c_transfer(adapter, &msg, 1) == 1) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (!ns->quiet) {
|
||||
DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d\n",
|
||||
addr, adapter->name, dvo->slave_addr);
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/* National Semiconductor 2501 driver for chip on i2c bus
|
||||
* scan for the chip on the bus.
|
||||
* Hope the VBIOS initialized the PLL correctly so we can
|
||||
* talk to it. If not, it will not be seen and not detected.
|
||||
* Bummer!
|
||||
*/
|
||||
static bool ns2501_init(struct intel_dvo_device *dvo,
|
||||
struct i2c_adapter *adapter)
|
||||
{
|
||||
/* this will detect the NS2501 chip on the specified i2c bus */
|
||||
struct ns2501_priv *ns;
|
||||
unsigned char ch;
|
||||
|
||||
ns = kzalloc(sizeof(struct ns2501_priv), GFP_KERNEL);
|
||||
if (ns == NULL)
|
||||
return false;
|
||||
|
||||
dvo->i2c_bus = adapter;
|
||||
dvo->dev_priv = ns;
|
||||
ns->quiet = true;
|
||||
|
||||
if (!ns2501_readb(dvo, NS2501_VID_LO, &ch))
|
||||
goto out;
|
||||
|
||||
if (ch != (NS2501_VID & 0xff)) {
|
||||
DRM_DEBUG_KMS("ns2501 not detected got %d: from %s Slave %d.\n",
|
||||
ch, adapter->name, dvo->slave_addr);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!ns2501_readb(dvo, NS2501_DID_LO, &ch))
|
||||
goto out;
|
||||
|
||||
if (ch != (NS2501_DID & 0xff)) {
|
||||
DRM_DEBUG_KMS("ns2501 not detected got %d: from %s Slave %d.\n",
|
||||
ch, adapter->name, dvo->slave_addr);
|
||||
goto out;
|
||||
}
|
||||
ns->quiet = false;
|
||||
ns->reg_8_set = 0;
|
||||
ns->reg_8_shadow =
|
||||
NS2501_8_PD | NS2501_8_BPAS | NS2501_8_VEN | NS2501_8_HEN;
|
||||
|
||||
DRM_DEBUG_KMS("init ns2501 dvo controller successfully!\n");
|
||||
return true;
|
||||
|
||||
out:
|
||||
kfree(ns);
|
||||
return false;
|
||||
}
|
||||
|
||||
static enum drm_connector_status ns2501_detect(struct intel_dvo_device *dvo)
|
||||
{
|
||||
/*
|
||||
* This is a Laptop display, it doesn't have hotplugging.
|
||||
* Even if not, the detection bit of the 2501 is unreliable as
|
||||
* it only works for some display types.
|
||||
* It is even more unreliable as the PLL must be active for
|
||||
* allowing reading from the chiop.
|
||||
*/
|
||||
return connector_status_connected;
|
||||
}
|
||||
|
||||
static enum drm_mode_status ns2501_mode_valid(struct intel_dvo_device *dvo,
|
||||
struct drm_display_mode *mode)
|
||||
{
|
||||
DRM_DEBUG_KMS
|
||||
("%s: is mode valid (hdisplay=%d,htotal=%d,vdisplay=%d,vtotal=%d)\n",
|
||||
__FUNCTION__, mode->hdisplay, mode->htotal, mode->vdisplay,
|
||||
mode->vtotal);
|
||||
|
||||
/*
|
||||
* Currently, these are all the modes I have data from.
|
||||
* More might exist. Unclear how to find the native resolution
|
||||
* of the panel in here so we could always accept it
|
||||
* by disabling the scaler.
|
||||
*/
|
||||
if ((mode->hdisplay == 800 && mode->vdisplay == 600) ||
|
||||
(mode->hdisplay == 640 && mode->vdisplay == 480) ||
|
||||
(mode->hdisplay == 1024 && mode->vdisplay == 768)) {
|
||||
return MODE_OK;
|
||||
} else {
|
||||
return MODE_ONE_SIZE; /* Is this a reasonable error? */
|
||||
}
|
||||
}
|
||||
|
||||
static void ns2501_mode_set(struct intel_dvo_device *dvo,
|
||||
struct drm_display_mode *mode,
|
||||
struct drm_display_mode *adjusted_mode)
|
||||
{
|
||||
bool ok;
|
||||
bool restore = false;
|
||||
struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
|
||||
|
||||
DRM_DEBUG_KMS
|
||||
("%s: set mode (hdisplay=%d,htotal=%d,vdisplay=%d,vtotal=%d).\n",
|
||||
__FUNCTION__, mode->hdisplay, mode->htotal, mode->vdisplay,
|
||||
mode->vtotal);
|
||||
|
||||
/*
|
||||
* Where do I find the native resolution for which scaling is not required???
|
||||
*
|
||||
* First trigger the DVO on as otherwise the chip does not appear on the i2c
|
||||
* bus.
|
||||
*/
|
||||
do {
|
||||
ok = true;
|
||||
|
||||
if (mode->hdisplay == 800 && mode->vdisplay == 600) {
|
||||
/* mode 277 */
|
||||
ns->reg_8_shadow &= ~NS2501_8_BPAS;
|
||||
DRM_DEBUG_KMS("%s: switching to 800x600\n",
|
||||
__FUNCTION__);
|
||||
|
||||
/*
|
||||
* No, I do not know where this data comes from.
|
||||
* It is just what the video bios left in the DVO, so
|
||||
* I'm just copying it here over.
|
||||
* This also means that I cannot support any other modes
|
||||
* except the ones supported by the bios.
|
||||
*/
|
||||
ok &= ns2501_writeb(dvo, 0x11, 0xc8); // 0xc7 also works.
|
||||
ok &= ns2501_writeb(dvo, 0x1b, 0x19);
|
||||
ok &= ns2501_writeb(dvo, 0x1c, 0x62); // VBIOS left 0x64 here, but 0x62 works nicer
|
||||
ok &= ns2501_writeb(dvo, 0x1d, 0x02);
|
||||
|
||||
ok &= ns2501_writeb(dvo, 0x34, 0x03);
|
||||
ok &= ns2501_writeb(dvo, 0x35, 0xff);
|
||||
|
||||
ok &= ns2501_writeb(dvo, 0x80, 0x27);
|
||||
ok &= ns2501_writeb(dvo, 0x81, 0x03);
|
||||
ok &= ns2501_writeb(dvo, 0x82, 0x41);
|
||||
ok &= ns2501_writeb(dvo, 0x83, 0x05);
|
||||
|
||||
ok &= ns2501_writeb(dvo, 0x8d, 0x02);
|
||||
ok &= ns2501_writeb(dvo, 0x8e, 0x04);
|
||||
ok &= ns2501_writeb(dvo, 0x8f, 0x00);
|
||||
|
||||
ok &= ns2501_writeb(dvo, 0x90, 0xfe); /* vertical. VBIOS left 0xff here, but 0xfe works better */
|
||||
ok &= ns2501_writeb(dvo, 0x91, 0x07);
|
||||
ok &= ns2501_writeb(dvo, 0x94, 0x00);
|
||||
ok &= ns2501_writeb(dvo, 0x95, 0x00);
|
||||
|
||||
ok &= ns2501_writeb(dvo, 0x96, 0x00);
|
||||
|
||||
ok &= ns2501_writeb(dvo, 0x99, 0x00);
|
||||
ok &= ns2501_writeb(dvo, 0x9a, 0x88);
|
||||
|
||||
ok &= ns2501_writeb(dvo, 0x9c, 0x23); /* Looks like first and last line of the image. */
|
||||
ok &= ns2501_writeb(dvo, 0x9d, 0x00);
|
||||
ok &= ns2501_writeb(dvo, 0x9e, 0x25);
|
||||
ok &= ns2501_writeb(dvo, 0x9f, 0x03);
|
||||
|
||||
ok &= ns2501_writeb(dvo, 0xa4, 0x80);
|
||||
|
||||
ok &= ns2501_writeb(dvo, 0xb6, 0x00);
|
||||
|
||||
ok &= ns2501_writeb(dvo, 0xb9, 0xc8); /* horizontal? */
|
||||
ok &= ns2501_writeb(dvo, 0xba, 0x00); /* horizontal? */
|
||||
|
||||
ok &= ns2501_writeb(dvo, 0xc0, 0x05); /* horizontal? */
|
||||
ok &= ns2501_writeb(dvo, 0xc1, 0xd7);
|
||||
|
||||
ok &= ns2501_writeb(dvo, 0xc2, 0x00);
|
||||
ok &= ns2501_writeb(dvo, 0xc3, 0xf8);
|
||||
|
||||
ok &= ns2501_writeb(dvo, 0xc4, 0x03);
|
||||
ok &= ns2501_writeb(dvo, 0xc5, 0x1a);
|
||||
|
||||
ok &= ns2501_writeb(dvo, 0xc6, 0x00);
|
||||
ok &= ns2501_writeb(dvo, 0xc7, 0x73);
|
||||
ok &= ns2501_writeb(dvo, 0xc8, 0x02);
|
||||
|
||||
} else if (mode->hdisplay == 640 && mode->vdisplay == 480) {
|
||||
/* mode 274 */
|
||||
DRM_DEBUG_KMS("%s: switching to 640x480\n",
|
||||
__FUNCTION__);
|
||||
/*
|
||||
* No, I do not know where this data comes from.
|
||||
* It is just what the video bios left in the DVO, so
|
||||
* I'm just copying it here over.
|
||||
* This also means that I cannot support any other modes
|
||||
* except the ones supported by the bios.
|
||||
*/
|
||||
ns->reg_8_shadow &= ~NS2501_8_BPAS;
|
||||
|
||||
ok &= ns2501_writeb(dvo, 0x11, 0xa0);
|
||||
ok &= ns2501_writeb(dvo, 0x1b, 0x11);
|
||||
ok &= ns2501_writeb(dvo, 0x1c, 0x54);
|
||||
ok &= ns2501_writeb(dvo, 0x1d, 0x03);
|
||||
|
||||
ok &= ns2501_writeb(dvo, 0x34, 0x03);
|
||||
ok &= ns2501_writeb(dvo, 0x35, 0xff);
|
||||
|
||||
ok &= ns2501_writeb(dvo, 0x80, 0xff);
|
||||
ok &= ns2501_writeb(dvo, 0x81, 0x07);
|
||||
ok &= ns2501_writeb(dvo, 0x82, 0x3d);
|
||||
ok &= ns2501_writeb(dvo, 0x83, 0x05);
|
||||
|
||||
ok &= ns2501_writeb(dvo, 0x8d, 0x02);
|
||||
ok &= ns2501_writeb(dvo, 0x8e, 0x10);
|
||||
ok &= ns2501_writeb(dvo, 0x8f, 0x00);
|
||||
|
||||
ok &= ns2501_writeb(dvo, 0x90, 0xff); /* vertical */
|
||||
ok &= ns2501_writeb(dvo, 0x91, 0x07);
|
||||
ok &= ns2501_writeb(dvo, 0x94, 0x00);
|
||||
ok &= ns2501_writeb(dvo, 0x95, 0x00);
|
||||
|
||||
ok &= ns2501_writeb(dvo, 0x96, 0x05);
|
||||
|
||||
ok &= ns2501_writeb(dvo, 0x99, 0x00);
|
||||
ok &= ns2501_writeb(dvo, 0x9a, 0x88);
|
||||
|
||||
ok &= ns2501_writeb(dvo, 0x9c, 0x24);
|
||||
ok &= ns2501_writeb(dvo, 0x9d, 0x00);
|
||||
ok &= ns2501_writeb(dvo, 0x9e, 0x25);
|
||||
ok &= ns2501_writeb(dvo, 0x9f, 0x03);
|
||||
|
||||
ok &= ns2501_writeb(dvo, 0xa4, 0x84);
|
||||
|
||||
ok &= ns2501_writeb(dvo, 0xb6, 0x09);
|
||||
|
||||
ok &= ns2501_writeb(dvo, 0xb9, 0xa0); /* horizontal? */
|
||||
ok &= ns2501_writeb(dvo, 0xba, 0x00); /* horizontal? */
|
||||
|
||||
ok &= ns2501_writeb(dvo, 0xc0, 0x05); /* horizontal? */
|
||||
ok &= ns2501_writeb(dvo, 0xc1, 0x90);
|
||||
|
||||
ok &= ns2501_writeb(dvo, 0xc2, 0x00);
|
||||
ok &= ns2501_writeb(dvo, 0xc3, 0x0f);
|
||||
|
||||
ok &= ns2501_writeb(dvo, 0xc4, 0x03);
|
||||
ok &= ns2501_writeb(dvo, 0xc5, 0x16);
|
||||
|
||||
ok &= ns2501_writeb(dvo, 0xc6, 0x00);
|
||||
ok &= ns2501_writeb(dvo, 0xc7, 0x02);
|
||||
ok &= ns2501_writeb(dvo, 0xc8, 0x02);
|
||||
|
||||
} else if (mode->hdisplay == 1024 && mode->vdisplay == 768) {
|
||||
/* mode 280 */
|
||||
DRM_DEBUG_KMS("%s: switching to 1024x768\n",
|
||||
__FUNCTION__);
|
||||
/*
|
||||
* This might or might not work, actually. I'm silently
|
||||
* assuming here that the native panel resolution is
|
||||
* 1024x768. If not, then this leaves the scaler disabled
|
||||
* generating a picture that is likely not the expected.
|
||||
*
|
||||
* Problem is that I do not know where to take the panel
|
||||
* dimensions from.
|
||||
*
|
||||
* Enable the bypass, scaling not required.
|
||||
*
|
||||
* The scaler registers are irrelevant here....
|
||||
*
|
||||
*/
|
||||
ns->reg_8_shadow |= NS2501_8_BPAS;
|
||||
ok &= ns2501_writeb(dvo, 0x37, 0x44);
|
||||
} else {
|
||||
/*
|
||||
* Data not known. Bummer!
|
||||
* Hopefully, the code should not go here
|
||||
* as mode_OK delivered no other modes.
|
||||
*/
|
||||
ns->reg_8_shadow |= NS2501_8_BPAS;
|
||||
}
|
||||
ok &= ns2501_writeb(dvo, NS2501_REG8, ns->reg_8_shadow);
|
||||
|
||||
if (!ok) {
|
||||
if (restore)
|
||||
restore_dvo(dvo);
|
||||
enable_dvo(dvo);
|
||||
restore = true;
|
||||
}
|
||||
} while (!ok);
|
||||
/*
|
||||
* Restore the old i915 registers before
|
||||
* forcing the ns2501 on.
|
||||
*/
|
||||
if (restore)
|
||||
restore_dvo(dvo);
|
||||
}
|
||||
|
||||
/* set the NS2501 power state */
|
||||
static void ns2501_dpms(struct intel_dvo_device *dvo, int mode)
|
||||
{
|
||||
bool ok;
|
||||
bool restore = false;
|
||||
struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
|
||||
unsigned char ch;
|
||||
|
||||
DRM_DEBUG_KMS("%s: Trying set the dpms of the DVO to %d\n",
|
||||
__FUNCTION__, mode);
|
||||
|
||||
ch = ns->reg_8_shadow;
|
||||
|
||||
if (mode == DRM_MODE_DPMS_ON)
|
||||
ch |= NS2501_8_PD;
|
||||
else
|
||||
ch &= ~NS2501_8_PD;
|
||||
|
||||
if (ns->reg_8_set == 0 || ns->reg_8_shadow != ch) {
|
||||
ns->reg_8_set = 1;
|
||||
ns->reg_8_shadow = ch;
|
||||
|
||||
do {
|
||||
ok = true;
|
||||
ok &= ns2501_writeb(dvo, NS2501_REG8, ch);
|
||||
ok &=
|
||||
ns2501_writeb(dvo, 0x34,
|
||||
(mode ==
|
||||
DRM_MODE_DPMS_ON) ? (0x03) : (0x00));
|
||||
ok &=
|
||||
ns2501_writeb(dvo, 0x35,
|
||||
(mode ==
|
||||
DRM_MODE_DPMS_ON) ? (0xff) : (0x00));
|
||||
if (!ok) {
|
||||
if (restore)
|
||||
restore_dvo(dvo);
|
||||
enable_dvo(dvo);
|
||||
restore = true;
|
||||
}
|
||||
} while (!ok);
|
||||
|
||||
if (restore)
|
||||
restore_dvo(dvo);
|
||||
}
|
||||
}
|
||||
|
||||
static void ns2501_dump_regs(struct intel_dvo_device *dvo)
|
||||
{
|
||||
uint8_t val;
|
||||
|
||||
ns2501_readb(dvo, NS2501_FREQ_LO, &val);
|
||||
DRM_LOG_KMS("NS2501_FREQ_LO: 0x%02x\n", val);
|
||||
ns2501_readb(dvo, NS2501_FREQ_HI, &val);
|
||||
DRM_LOG_KMS("NS2501_FREQ_HI: 0x%02x\n", val);
|
||||
ns2501_readb(dvo, NS2501_REG8, &val);
|
||||
DRM_LOG_KMS("NS2501_REG8: 0x%02x\n", val);
|
||||
ns2501_readb(dvo, NS2501_REG9, &val);
|
||||
DRM_LOG_KMS("NS2501_REG9: 0x%02x\n", val);
|
||||
ns2501_readb(dvo, NS2501_REGC, &val);
|
||||
DRM_LOG_KMS("NS2501_REGC: 0x%02x\n", val);
|
||||
}
|
||||
|
||||
static void ns2501_destroy(struct intel_dvo_device *dvo)
|
||||
{
|
||||
struct ns2501_priv *ns = dvo->dev_priv;
|
||||
|
||||
if (ns) {
|
||||
kfree(ns);
|
||||
dvo->dev_priv = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
struct intel_dvo_dev_ops ns2501_ops = {
|
||||
.init = ns2501_init,
|
||||
.detect = ns2501_detect,
|
||||
.mode_valid = ns2501_mode_valid,
|
||||
.mode_set = ns2501_mode_set,
|
||||
.dpms = ns2501_dpms,
|
||||
.dump_regs = ns2501_dump_regs,
|
||||
.destroy = ns2501_destroy,
|
||||
};
|
@ -44,7 +44,6 @@
|
||||
|
||||
enum {
|
||||
ACTIVE_LIST,
|
||||
FLUSHING_LIST,
|
||||
INACTIVE_LIST,
|
||||
PINNED_LIST,
|
||||
};
|
||||
@ -62,28 +61,11 @@ static int i915_capabilities(struct seq_file *m, void *data)
|
||||
|
||||
seq_printf(m, "gen: %d\n", info->gen);
|
||||
seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev));
|
||||
#define B(x) seq_printf(m, #x ": %s\n", yesno(info->x))
|
||||
B(is_mobile);
|
||||
B(is_i85x);
|
||||
B(is_i915g);
|
||||
B(is_i945gm);
|
||||
B(is_g33);
|
||||
B(need_gfx_hws);
|
||||
B(is_g4x);
|
||||
B(is_pineview);
|
||||
B(is_broadwater);
|
||||
B(is_crestline);
|
||||
B(has_fbc);
|
||||
B(has_pipe_cxsr);
|
||||
B(has_hotplug);
|
||||
B(cursor_needs_physical);
|
||||
B(has_overlay);
|
||||
B(overlay_needs_physical);
|
||||
B(supports_tv);
|
||||
B(has_bsd_ring);
|
||||
B(has_blt_ring);
|
||||
B(has_llc);
|
||||
#undef B
|
||||
#define DEV_INFO_FLAG(x) seq_printf(m, #x ": %s\n", yesno(info->x))
|
||||
#define DEV_INFO_SEP ;
|
||||
DEV_INFO_FLAGS;
|
||||
#undef DEV_INFO_FLAG
|
||||
#undef DEV_INFO_SEP
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -121,14 +103,15 @@ static const char *cache_level_str(int type)
|
||||
static void
|
||||
describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
|
||||
{
|
||||
seq_printf(m, "%p: %s%s %8zdKiB %04x %04x %d %d%s%s%s",
|
||||
seq_printf(m, "%p: %s%s %8zdKiB %04x %04x %d %d %d%s%s%s",
|
||||
&obj->base,
|
||||
get_pin_flag(obj),
|
||||
get_tiling_flag(obj),
|
||||
obj->base.size / 1024,
|
||||
obj->base.read_domains,
|
||||
obj->base.write_domain,
|
||||
obj->last_rendering_seqno,
|
||||
obj->last_read_seqno,
|
||||
obj->last_write_seqno,
|
||||
obj->last_fenced_seqno,
|
||||
cache_level_str(obj->cache_level),
|
||||
obj->dirty ? " dirty" : "",
|
||||
@ -177,10 +160,6 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
|
||||
seq_printf(m, "Inactive:\n");
|
||||
head = &dev_priv->mm.inactive_list;
|
||||
break;
|
||||
case FLUSHING_LIST:
|
||||
seq_printf(m, "Flushing:\n");
|
||||
head = &dev_priv->mm.flushing_list;
|
||||
break;
|
||||
default:
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return -EINVAL;
|
||||
@ -238,7 +217,6 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
|
||||
|
||||
size = count = mappable_size = mappable_count = 0;
|
||||
count_objects(&dev_priv->mm.active_list, mm_list);
|
||||
count_objects(&dev_priv->mm.flushing_list, mm_list);
|
||||
seq_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n",
|
||||
count, mappable_count, size, mappable_size);
|
||||
|
||||
@ -413,7 +391,7 @@ static void i915_ring_seqno_info(struct seq_file *m,
|
||||
{
|
||||
if (ring->get_seqno) {
|
||||
seq_printf(m, "Current sequence (%s): %d\n",
|
||||
ring->name, ring->get_seqno(ring));
|
||||
ring->name, ring->get_seqno(ring, false));
|
||||
}
|
||||
}
|
||||
|
||||
@ -630,12 +608,12 @@ static void print_error_buffers(struct seq_file *m,
|
||||
seq_printf(m, "%s [%d]:\n", name, count);
|
||||
|
||||
while (count--) {
|
||||
seq_printf(m, " %08x %8u %04x %04x %08x%s%s%s%s%s%s%s",
|
||||
seq_printf(m, " %08x %8u %04x %04x %x %x%s%s%s%s%s%s%s",
|
||||
err->gtt_offset,
|
||||
err->size,
|
||||
err->read_domains,
|
||||
err->write_domain,
|
||||
err->seqno,
|
||||
err->rseqno, err->wseqno,
|
||||
pin_flag(err->pinned),
|
||||
tiling_flag(err->tiling),
|
||||
dirty_flag(err->dirty),
|
||||
@ -799,10 +777,14 @@ i915_error_state_write(struct file *filp,
|
||||
struct seq_file *m = filp->private_data;
|
||||
struct i915_error_state_file_priv *error_priv = m->private;
|
||||
struct drm_device *dev = error_priv->dev;
|
||||
int ret;
|
||||
|
||||
DRM_DEBUG_DRIVER("Resetting error state\n");
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
ret = mutex_lock_interruptible(&dev->struct_mutex);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
i915_destroy_error_state(dev);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
@ -1292,7 +1274,8 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
|
||||
|
||||
seq_printf(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\n");
|
||||
|
||||
for (gpu_freq = dev_priv->min_delay; gpu_freq <= dev_priv->max_delay;
|
||||
for (gpu_freq = dev_priv->rps.min_delay;
|
||||
gpu_freq <= dev_priv->rps.max_delay;
|
||||
gpu_freq++) {
|
||||
I915_WRITE(GEN6_PCODE_DATA, gpu_freq);
|
||||
I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
|
||||
@ -1472,8 +1455,12 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
|
||||
struct drm_info_node *node = (struct drm_info_node *) m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int ret;
|
||||
|
||||
ret = mutex_lock_interruptible(&dev->struct_mutex);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
|
||||
swizzle_string(dev_priv->mm.bit_6_swizzle_x));
|
||||
seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
|
||||
@ -1674,7 +1661,7 @@ i915_ring_stop_write(struct file *filp,
|
||||
struct drm_device *dev = filp->private_data;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
char buf[20];
|
||||
int val = 0;
|
||||
int val = 0, ret;
|
||||
|
||||
if (cnt > 0) {
|
||||
if (cnt > sizeof(buf) - 1)
|
||||
@ -1689,7 +1676,10 @@ i915_ring_stop_write(struct file *filp,
|
||||
|
||||
DRM_DEBUG_DRIVER("Stopping rings 0x%08x\n", val);
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
ret = mutex_lock_interruptible(&dev->struct_mutex);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
dev_priv->stop_rings = val;
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
@ -1713,10 +1703,18 @@ i915_max_freq_read(struct file *filp,
|
||||
struct drm_device *dev = filp->private_data;
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
char buf[80];
|
||||
int len;
|
||||
int len, ret;
|
||||
|
||||
if (!(IS_GEN6(dev) || IS_GEN7(dev)))
|
||||
return -ENODEV;
|
||||
|
||||
ret = mutex_lock_interruptible(&dev->struct_mutex);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
len = snprintf(buf, sizeof(buf),
|
||||
"max freq: %d\n", dev_priv->max_delay * 50);
|
||||
"max freq: %d\n", dev_priv->rps.max_delay * 50);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
if (len > sizeof(buf))
|
||||
len = sizeof(buf);
|
||||
@ -1733,7 +1731,10 @@ i915_max_freq_write(struct file *filp,
|
||||
struct drm_device *dev = filp->private_data;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
char buf[20];
|
||||
int val = 1;
|
||||
int val = 1, ret;
|
||||
|
||||
if (!(IS_GEN6(dev) || IS_GEN7(dev)))
|
||||
return -ENODEV;
|
||||
|
||||
if (cnt > 0) {
|
||||
if (cnt > sizeof(buf) - 1)
|
||||
@ -1748,12 +1749,17 @@ i915_max_freq_write(struct file *filp,
|
||||
|
||||
DRM_DEBUG_DRIVER("Manually setting max freq to %d\n", val);
|
||||
|
||||
ret = mutex_lock_interruptible(&dev->struct_mutex);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* Turbo will still be enabled, but won't go above the set value.
|
||||
*/
|
||||
dev_priv->max_delay = val / 50;
|
||||
dev_priv->rps.max_delay = val / 50;
|
||||
|
||||
gen6_set_rps(dev, val / 50);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
return cnt;
|
||||
}
|
||||
@ -1773,10 +1779,18 @@ i915_min_freq_read(struct file *filp, char __user *ubuf, size_t max,
|
||||
struct drm_device *dev = filp->private_data;
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
char buf[80];
|
||||
int len;
|
||||
int len, ret;
|
||||
|
||||
if (!(IS_GEN6(dev) || IS_GEN7(dev)))
|
||||
return -ENODEV;
|
||||
|
||||
ret = mutex_lock_interruptible(&dev->struct_mutex);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
len = snprintf(buf, sizeof(buf),
|
||||
"min freq: %d\n", dev_priv->min_delay * 50);
|
||||
"min freq: %d\n", dev_priv->rps.min_delay * 50);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
if (len > sizeof(buf))
|
||||
len = sizeof(buf);
|
||||
@ -1791,7 +1805,10 @@ i915_min_freq_write(struct file *filp, const char __user *ubuf, size_t cnt,
|
||||
struct drm_device *dev = filp->private_data;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
char buf[20];
|
||||
int val = 1;
|
||||
int val = 1, ret;
|
||||
|
||||
if (!(IS_GEN6(dev) || IS_GEN7(dev)))
|
||||
return -ENODEV;
|
||||
|
||||
if (cnt > 0) {
|
||||
if (cnt > sizeof(buf) - 1)
|
||||
@ -1806,12 +1823,17 @@ i915_min_freq_write(struct file *filp, const char __user *ubuf, size_t cnt,
|
||||
|
||||
DRM_DEBUG_DRIVER("Manually setting min freq to %d\n", val);
|
||||
|
||||
ret = mutex_lock_interruptible(&dev->struct_mutex);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* Turbo will still be enabled, but won't go below the set value.
|
||||
*/
|
||||
dev_priv->min_delay = val / 50;
|
||||
dev_priv->rps.min_delay = val / 50;
|
||||
|
||||
gen6_set_rps(dev, val / 50);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
return cnt;
|
||||
}
|
||||
@ -1834,9 +1856,15 @@ i915_cache_sharing_read(struct file *filp,
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
char buf[80];
|
||||
u32 snpcr;
|
||||
int len;
|
||||
int len, ret;
|
||||
|
||||
if (!(IS_GEN6(dev) || IS_GEN7(dev)))
|
||||
return -ENODEV;
|
||||
|
||||
ret = mutex_lock_interruptible(&dev->struct_mutex);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
mutex_lock(&dev_priv->dev->struct_mutex);
|
||||
snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
|
||||
mutex_unlock(&dev_priv->dev->struct_mutex);
|
||||
|
||||
@ -1862,6 +1890,9 @@ i915_cache_sharing_write(struct file *filp,
|
||||
u32 snpcr;
|
||||
int val = 1;
|
||||
|
||||
if (!(IS_GEN6(dev) || IS_GEN7(dev)))
|
||||
return -ENODEV;
|
||||
|
||||
if (cnt > 0) {
|
||||
if (cnt > sizeof(buf) - 1)
|
||||
return -EINVAL;
|
||||
@ -1925,16 +1956,11 @@ static int i915_forcewake_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
struct drm_device *dev = inode->i_private;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int ret;
|
||||
|
||||
if (INTEL_INFO(dev)->gen < 6)
|
||||
return 0;
|
||||
|
||||
ret = mutex_lock_interruptible(&dev->struct_mutex);
|
||||
if (ret)
|
||||
return ret;
|
||||
gen6_gt_force_wake_get(dev_priv);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1947,16 +1973,7 @@ static int i915_forcewake_release(struct inode *inode, struct file *file)
|
||||
if (INTEL_INFO(dev)->gen < 6)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* It's bad that we can potentially hang userspace if struct_mutex gets
|
||||
* forever stuck. However, if we cannot acquire this lock it means that
|
||||
* almost certainly the driver has hung, is not unload-able. Therefore
|
||||
* hanging here is probably a minor inconvenience not to be seen my
|
||||
* almost every user.
|
||||
*/
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
gen6_gt_force_wake_put(dev_priv);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -2006,7 +2023,6 @@ static struct drm_info_list i915_debugfs_list[] = {
|
||||
{"i915_gem_gtt", i915_gem_gtt_info, 0},
|
||||
{"i915_gem_pinned", i915_gem_gtt_info, 0, (void *) PINNED_LIST},
|
||||
{"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
|
||||
{"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
|
||||
{"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
|
||||
{"i915_gem_pageflip", i915_gem_pageflip_info, 0},
|
||||
{"i915_gem_request", i915_gem_request_info, 0},
|
||||
@ -2067,6 +2083,7 @@ int i915_debugfs_init(struct drm_minor *minor)
|
||||
&i915_cache_sharing_fops);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = i915_debugfs_create(minor->debugfs_root, minor,
|
||||
"i915_ring_stop",
|
||||
&i915_ring_stop_fops);
|
||||
|
@ -1009,6 +1009,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
|
||||
case I915_PARAM_HAS_WAIT_TIMEOUT:
|
||||
value = 1;
|
||||
break;
|
||||
case I915_PARAM_HAS_SEMAPHORES:
|
||||
value = i915_semaphore_is_enabled(dev);
|
||||
break;
|
||||
default:
|
||||
DRM_DEBUG_DRIVER("Unknown parameter %d\n",
|
||||
param->param);
|
||||
@ -1425,6 +1428,21 @@ static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
|
||||
kfree(ap);
|
||||
}
|
||||
|
||||
static void i915_dump_device_info(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
const struct intel_device_info *info = dev_priv->info;
|
||||
|
||||
#define DEV_INFO_FLAG(name) info->name ? #name "," : ""
|
||||
#define DEV_INFO_SEP ,
|
||||
DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x flags="
|
||||
"%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
|
||||
info->gen,
|
||||
dev_priv->dev->pdev->device,
|
||||
DEV_INFO_FLAGS);
|
||||
#undef DEV_INFO_FLAG
|
||||
#undef DEV_INFO_SEP
|
||||
}
|
||||
|
||||
/**
|
||||
* i915_driver_load - setup chip and create an initial config
|
||||
* @dev: DRM device
|
||||
@ -1449,7 +1467,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
|
||||
if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET))
|
||||
return -ENODEV;
|
||||
|
||||
|
||||
/* i915 has 4 more counters */
|
||||
dev->counters += 4;
|
||||
dev->types[6] = _DRM_STAT_IRQ;
|
||||
@ -1465,6 +1482,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
|
||||
dev_priv->dev = dev;
|
||||
dev_priv->info = info;
|
||||
|
||||
i915_dump_device_info(dev_priv);
|
||||
|
||||
if (i915_get_bridge_dev(dev)) {
|
||||
ret = -EIO;
|
||||
goto free_priv;
|
||||
@ -1586,7 +1605,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
|
||||
|
||||
spin_lock_init(&dev_priv->irq_lock);
|
||||
spin_lock_init(&dev_priv->error_lock);
|
||||
spin_lock_init(&dev_priv->rps_lock);
|
||||
spin_lock_init(&dev_priv->rps.lock);
|
||||
|
||||
if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
|
||||
dev_priv->num_pipe = 3;
|
||||
@ -1835,6 +1854,8 @@ struct drm_ioctl_desc i915_ioctls[] = {
|
||||
DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHEING, i915_gem_set_cacheing_ioctl, DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHEING, i915_gem_get_cacheing_ioctl, DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
|
||||
@ -1857,6 +1878,7 @@ struct drm_ioctl_desc i915_ioctls[] = {
|
||||
DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_UNLOCKED),
|
||||
};
|
||||
|
||||
int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
|
||||
|
@ -1060,7 +1060,7 @@ static bool IS_DISPLAYREG(u32 reg)
|
||||
* This should make it easier to transition modules over to the
|
||||
* new register block scheme, since we can do it incrementally.
|
||||
*/
|
||||
if (reg >= 0x180000)
|
||||
if (reg >= VLV_DISPLAY_BASE)
|
||||
return false;
|
||||
|
||||
if (reg >= RENDER_RING_BASE &&
|
||||
@ -1180,3 +1180,49 @@ __i915_write(16, w)
|
||||
__i915_write(32, l)
|
||||
__i915_write(64, q)
|
||||
#undef __i915_write
|
||||
|
||||
static const struct register_whitelist {
|
||||
uint64_t offset;
|
||||
uint32_t size;
|
||||
uint32_t gen_bitmask; /* support gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
|
||||
} whitelist[] = {
|
||||
{ RING_TIMESTAMP(RENDER_RING_BASE), 8, 0xF0 },
|
||||
};
|
||||
|
||||
int i915_reg_read_ioctl(struct drm_device *dev,
|
||||
void *data, struct drm_file *file)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_reg_read *reg = data;
|
||||
struct register_whitelist const *entry = whitelist;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
|
||||
if (entry->offset == reg->offset &&
|
||||
(1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
|
||||
break;
|
||||
}
|
||||
|
||||
if (i == ARRAY_SIZE(whitelist))
|
||||
return -EINVAL;
|
||||
|
||||
switch (entry->size) {
|
||||
case 8:
|
||||
reg->val = I915_READ64(reg->offset);
|
||||
break;
|
||||
case 4:
|
||||
reg->val = I915_READ(reg->offset);
|
||||
break;
|
||||
case 2:
|
||||
reg->val = I915_READ16(reg->offset);
|
||||
break;
|
||||
case 1:
|
||||
reg->val = I915_READ8(reg->offset);
|
||||
break;
|
||||
default:
|
||||
WARN_ON(1);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -109,6 +109,7 @@ struct intel_pch_pll {
|
||||
|
||||
#define WATCH_COHERENCY 0
|
||||
#define WATCH_LISTS 0
|
||||
#define WATCH_GTT 0
|
||||
|
||||
#define I915_GEM_PHYS_CURSOR_0 1
|
||||
#define I915_GEM_PHYS_CURSOR_1 2
|
||||
@ -221,7 +222,7 @@ struct drm_i915_error_state {
|
||||
struct drm_i915_error_buffer {
|
||||
u32 size;
|
||||
u32 name;
|
||||
u32 seqno;
|
||||
u32 rseqno, wseqno;
|
||||
u32 gtt_offset;
|
||||
u32 read_domains;
|
||||
u32 write_domain;
|
||||
@ -248,7 +249,6 @@ struct drm_i915_display_funcs {
|
||||
void (*update_wm)(struct drm_device *dev);
|
||||
void (*update_sprite_wm)(struct drm_device *dev, int pipe,
|
||||
uint32_t sprite_width, int pixel_size);
|
||||
void (*sanitize_pm)(struct drm_device *dev);
|
||||
void (*update_linetime_wm)(struct drm_device *dev, int pipe,
|
||||
struct drm_display_mode *mode);
|
||||
int (*crtc_mode_set)(struct drm_crtc *crtc,
|
||||
@ -279,6 +279,32 @@ struct drm_i915_gt_funcs {
|
||||
void (*force_wake_put)(struct drm_i915_private *dev_priv);
|
||||
};
|
||||
|
||||
#define DEV_INFO_FLAGS \
|
||||
DEV_INFO_FLAG(is_mobile) DEV_INFO_SEP \
|
||||
DEV_INFO_FLAG(is_i85x) DEV_INFO_SEP \
|
||||
DEV_INFO_FLAG(is_i915g) DEV_INFO_SEP \
|
||||
DEV_INFO_FLAG(is_i945gm) DEV_INFO_SEP \
|
||||
DEV_INFO_FLAG(is_g33) DEV_INFO_SEP \
|
||||
DEV_INFO_FLAG(need_gfx_hws) DEV_INFO_SEP \
|
||||
DEV_INFO_FLAG(is_g4x) DEV_INFO_SEP \
|
||||
DEV_INFO_FLAG(is_pineview) DEV_INFO_SEP \
|
||||
DEV_INFO_FLAG(is_broadwater) DEV_INFO_SEP \
|
||||
DEV_INFO_FLAG(is_crestline) DEV_INFO_SEP \
|
||||
DEV_INFO_FLAG(is_ivybridge) DEV_INFO_SEP \
|
||||
DEV_INFO_FLAG(is_valleyview) DEV_INFO_SEP \
|
||||
DEV_INFO_FLAG(is_haswell) DEV_INFO_SEP \
|
||||
DEV_INFO_FLAG(has_force_wake) DEV_INFO_SEP \
|
||||
DEV_INFO_FLAG(has_fbc) DEV_INFO_SEP \
|
||||
DEV_INFO_FLAG(has_pipe_cxsr) DEV_INFO_SEP \
|
||||
DEV_INFO_FLAG(has_hotplug) DEV_INFO_SEP \
|
||||
DEV_INFO_FLAG(cursor_needs_physical) DEV_INFO_SEP \
|
||||
DEV_INFO_FLAG(has_overlay) DEV_INFO_SEP \
|
||||
DEV_INFO_FLAG(overlay_needs_physical) DEV_INFO_SEP \
|
||||
DEV_INFO_FLAG(supports_tv) DEV_INFO_SEP \
|
||||
DEV_INFO_FLAG(has_bsd_ring) DEV_INFO_SEP \
|
||||
DEV_INFO_FLAG(has_blt_ring) DEV_INFO_SEP \
|
||||
DEV_INFO_FLAG(has_llc)
|
||||
|
||||
struct intel_device_info {
|
||||
u8 gen;
|
||||
u8 is_mobile:1;
|
||||
@ -695,17 +721,6 @@ typedef struct drm_i915_private {
|
||||
*/
|
||||
struct list_head active_list;
|
||||
|
||||
/**
|
||||
* List of objects which are not in the ringbuffer but which
|
||||
* still have a write_domain which needs to be flushed before
|
||||
* unbinding.
|
||||
*
|
||||
* last_rendering_seqno is 0 while an object is in this list.
|
||||
*
|
||||
* A reference is held on the buffer while on this list.
|
||||
*/
|
||||
struct list_head flushing_list;
|
||||
|
||||
/**
|
||||
* LRU list of objects which are not in the ringbuffer and
|
||||
* are ready to unbind, but are still in the GTT.
|
||||
@ -796,9 +811,6 @@ typedef struct drm_i915_private {
|
||||
bool lvds_downclock_avail;
|
||||
/* indicates the reduced downclock for LVDS*/
|
||||
int lvds_downclock;
|
||||
struct work_struct idle_work;
|
||||
struct timer_list idle_timer;
|
||||
bool busy;
|
||||
u16 orig_clock;
|
||||
int child_dev_num;
|
||||
struct child_device_config *child_dev;
|
||||
@ -807,9 +819,21 @@ typedef struct drm_i915_private {
|
||||
|
||||
bool mchbar_need_disable;
|
||||
|
||||
struct work_struct rps_work;
|
||||
spinlock_t rps_lock;
|
||||
u32 pm_iir;
|
||||
/* gen6+ rps state */
|
||||
struct {
|
||||
struct work_struct work;
|
||||
u32 pm_iir;
|
||||
/* lock - irqsave spinlock that protectects the work_struct and
|
||||
* pm_iir. */
|
||||
spinlock_t lock;
|
||||
|
||||
/* The below variables an all the rps hw state are protected by
|
||||
* dev->struct mutext. */
|
||||
u8 cur_delay;
|
||||
u8 min_delay;
|
||||
u8 max_delay;
|
||||
} rps;
|
||||
|
||||
|
||||
u8 cur_delay;
|
||||
u8 min_delay;
|
||||
@ -826,7 +850,6 @@ typedef struct drm_i915_private {
|
||||
int c_m;
|
||||
int r_t;
|
||||
u8 corr;
|
||||
spinlock_t *mchdev_lock;
|
||||
|
||||
enum no_fbc_reason no_fbc_reason;
|
||||
|
||||
@ -861,9 +884,9 @@ enum hdmi_force_audio {
|
||||
};
|
||||
|
||||
enum i915_cache_level {
|
||||
I915_CACHE_NONE,
|
||||
I915_CACHE_NONE = 0,
|
||||
I915_CACHE_LLC,
|
||||
I915_CACHE_LLC_MLC, /* gen6+ */
|
||||
I915_CACHE_LLC_MLC, /* gen6+, in docs at least! */
|
||||
};
|
||||
|
||||
struct drm_i915_gem_object {
|
||||
@ -873,18 +896,16 @@ struct drm_i915_gem_object {
|
||||
struct drm_mm_node *gtt_space;
|
||||
struct list_head gtt_list;
|
||||
|
||||
/** This object's place on the active/flushing/inactive lists */
|
||||
/** This object's place on the active/inactive lists */
|
||||
struct list_head ring_list;
|
||||
struct list_head mm_list;
|
||||
/** This object's place on GPU write list */
|
||||
struct list_head gpu_write_list;
|
||||
/** This object's place in the batchbuffer or on the eviction list */
|
||||
struct list_head exec_list;
|
||||
|
||||
/**
|
||||
* This is set if the object is on the active or flushing lists
|
||||
* (has pending rendering), and is not set if it's on inactive (ready
|
||||
* to be unbound).
|
||||
* This is set if the object is on the active lists (has pending
|
||||
* rendering and so a non-zero seqno), and is not set if it i s on
|
||||
* inactive (ready to be unbound) list.
|
||||
*/
|
||||
unsigned int active:1;
|
||||
|
||||
@ -894,12 +915,6 @@ struct drm_i915_gem_object {
|
||||
*/
|
||||
unsigned int dirty:1;
|
||||
|
||||
/**
|
||||
* This is set if the object has been written to since the last
|
||||
* GPU flush.
|
||||
*/
|
||||
unsigned int pending_gpu_write:1;
|
||||
|
||||
/**
|
||||
* Fence register bits (if any) for this object. Will be set
|
||||
* as needed when mapped into the GTT.
|
||||
@ -992,7 +1007,8 @@ struct drm_i915_gem_object {
|
||||
struct intel_ring_buffer *ring;
|
||||
|
||||
/** Breadcrumb of last rendering to the buffer. */
|
||||
uint32_t last_rendering_seqno;
|
||||
uint32_t last_read_seqno;
|
||||
uint32_t last_write_seqno;
|
||||
/** Breadcrumb of last fenced GPU access to the buffer. */
|
||||
uint32_t last_fenced_seqno;
|
||||
|
||||
@ -1135,6 +1151,8 @@ struct drm_i915_file_private {
|
||||
|
||||
#define HAS_FORCE_WAKE(dev) (INTEL_INFO(dev)->has_force_wake)
|
||||
|
||||
#define HAS_L3_GPU_CACHE(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
|
||||
|
||||
#include "i915_trace.h"
|
||||
|
||||
/**
|
||||
@ -1256,6 +1274,10 @@ int i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
int i915_gem_busy_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
int i915_gem_get_cacheing_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file);
|
||||
int i915_gem_set_cacheing_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file);
|
||||
int i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
int i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
|
||||
@ -1274,9 +1296,6 @@ int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
void i915_gem_load(struct drm_device *dev);
|
||||
int i915_gem_init_object(struct drm_gem_object *obj);
|
||||
int __must_check i915_gem_flush_ring(struct intel_ring_buffer *ring,
|
||||
uint32_t invalidate_domains,
|
||||
uint32_t flush_domains);
|
||||
struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
|
||||
size_t size);
|
||||
void i915_gem_free_object(struct drm_gem_object *obj);
|
||||
@ -1291,7 +1310,6 @@ void i915_gem_lastclose(struct drm_device *dev);
|
||||
int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
|
||||
gfp_t gfpmask);
|
||||
int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
|
||||
int __must_check i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj);
|
||||
int i915_gem_object_sync(struct drm_i915_gem_object *obj,
|
||||
struct intel_ring_buffer *to);
|
||||
void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
|
||||
@ -1358,9 +1376,9 @@ void i915_gem_init_ppgtt(struct drm_device *dev);
|
||||
void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
|
||||
int __must_check i915_gpu_idle(struct drm_device *dev);
|
||||
int __must_check i915_gem_idle(struct drm_device *dev);
|
||||
int __must_check i915_add_request(struct intel_ring_buffer *ring,
|
||||
struct drm_file *file,
|
||||
struct drm_i915_gem_request *request);
|
||||
int i915_add_request(struct intel_ring_buffer *ring,
|
||||
struct drm_file *file,
|
||||
struct drm_i915_gem_request *request);
|
||||
int __must_check i915_wait_seqno(struct intel_ring_buffer *ring,
|
||||
uint32_t seqno);
|
||||
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
|
||||
@ -1429,7 +1447,9 @@ void i915_gem_init_global_gtt(struct drm_device *dev,
|
||||
|
||||
/* i915_gem_evict.c */
|
||||
int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size,
|
||||
unsigned alignment, bool mappable);
|
||||
unsigned alignment,
|
||||
unsigned cache_level,
|
||||
bool mappable);
|
||||
int i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only);
|
||||
|
||||
/* i915_gem_stolen.c */
|
||||
@ -1529,6 +1549,8 @@ extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
|
||||
extern int intel_enable_rc6(const struct drm_device *dev);
|
||||
|
||||
extern bool i915_semaphore_is_enabled(struct drm_device *dev);
|
||||
int i915_reg_read_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file);
|
||||
|
||||
/* overlay */
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
|
@ -37,7 +37,6 @@
|
||||
#include <linux/pci.h>
|
||||
#include <linux/dma-buf.h>
|
||||
|
||||
static __must_check int i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj);
|
||||
static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
|
||||
static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
|
||||
static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
|
||||
@ -1441,7 +1440,7 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
|
||||
list_move_tail(&obj->mm_list, &dev_priv->mm.active_list);
|
||||
list_move_tail(&obj->ring_list, &ring->active_list);
|
||||
|
||||
obj->last_rendering_seqno = seqno;
|
||||
obj->last_read_seqno = seqno;
|
||||
|
||||
if (obj->fenced_gpu_access) {
|
||||
obj->last_fenced_seqno = seqno;
|
||||
@ -1457,43 +1456,31 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
i915_gem_object_move_off_active(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
list_del_init(&obj->ring_list);
|
||||
obj->last_rendering_seqno = 0;
|
||||
obj->last_fenced_seqno = 0;
|
||||
}
|
||||
|
||||
static void
|
||||
i915_gem_object_move_to_flushing(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
struct drm_device *dev = obj->base.dev;
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
|
||||
BUG_ON(!obj->active);
|
||||
list_move_tail(&obj->mm_list, &dev_priv->mm.flushing_list);
|
||||
|
||||
i915_gem_object_move_off_active(obj);
|
||||
}
|
||||
|
||||
static void
|
||||
i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
struct drm_device *dev = obj->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
|
||||
BUG_ON(!obj->active);
|
||||
|
||||
if (obj->pin_count) /* are we a framebuffer? */
|
||||
intel_mark_fb_idle(obj);
|
||||
|
||||
list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
|
||||
|
||||
BUG_ON(!list_empty(&obj->gpu_write_list));
|
||||
BUG_ON(!obj->active);
|
||||
list_del_init(&obj->ring_list);
|
||||
obj->ring = NULL;
|
||||
|
||||
i915_gem_object_move_off_active(obj);
|
||||
obj->last_read_seqno = 0;
|
||||
obj->last_write_seqno = 0;
|
||||
obj->base.write_domain = 0;
|
||||
|
||||
obj->last_fenced_seqno = 0;
|
||||
obj->fenced_gpu_access = false;
|
||||
|
||||
obj->active = 0;
|
||||
obj->pending_gpu_write = false;
|
||||
drm_gem_object_unreference(&obj->base);
|
||||
|
||||
WARN_ON(i915_verify_lists(dev));
|
||||
@ -1525,30 +1512,6 @@ i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
|
||||
return obj->madv == I915_MADV_DONTNEED;
|
||||
}
|
||||
|
||||
static void
|
||||
i915_gem_process_flushing_list(struct intel_ring_buffer *ring,
|
||||
uint32_t flush_domains)
|
||||
{
|
||||
struct drm_i915_gem_object *obj, *next;
|
||||
|
||||
list_for_each_entry_safe(obj, next,
|
||||
&ring->gpu_write_list,
|
||||
gpu_write_list) {
|
||||
if (obj->base.write_domain & flush_domains) {
|
||||
uint32_t old_write_domain = obj->base.write_domain;
|
||||
|
||||
obj->base.write_domain = 0;
|
||||
list_del_init(&obj->gpu_write_list);
|
||||
i915_gem_object_move_to_active(obj, ring,
|
||||
i915_gem_next_request_seqno(ring));
|
||||
|
||||
trace_i915_gem_object_change_domain(obj,
|
||||
obj->base.read_domains,
|
||||
old_write_domain);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static u32
|
||||
i915_gem_get_seqno(struct drm_device *dev)
|
||||
{
|
||||
@ -1589,15 +1552,16 @@ i915_add_request(struct intel_ring_buffer *ring,
|
||||
* is that the flush _must_ happen before the next request, no matter
|
||||
* what.
|
||||
*/
|
||||
if (ring->gpu_caches_dirty) {
|
||||
ret = i915_gem_flush_ring(ring, 0, I915_GEM_GPU_DOMAINS);
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = intel_ring_flush_all_caches(ring);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ring->gpu_caches_dirty = false;
|
||||
if (request == NULL) {
|
||||
request = kmalloc(sizeof(*request), GFP_KERNEL);
|
||||
if (request == NULL)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
BUG_ON(request == NULL);
|
||||
seqno = i915_gem_next_request_seqno(ring);
|
||||
|
||||
/* Record the position of the start of the request so that
|
||||
@ -1608,8 +1572,10 @@ i915_add_request(struct intel_ring_buffer *ring,
|
||||
request_ring_position = intel_ring_get_tail(ring);
|
||||
|
||||
ret = ring->add_request(ring, &seqno);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (ret) {
|
||||
kfree(request);
|
||||
return ret;
|
||||
}
|
||||
|
||||
trace_i915_gem_request_add(ring, seqno);
|
||||
|
||||
@ -1619,6 +1585,7 @@ i915_add_request(struct intel_ring_buffer *ring,
|
||||
request->emitted_jiffies = jiffies;
|
||||
was_empty = list_empty(&ring->request_list);
|
||||
list_add_tail(&request->list, &ring->request_list);
|
||||
request->file_priv = NULL;
|
||||
|
||||
if (file) {
|
||||
struct drm_i915_file_private *file_priv = file->driver_priv;
|
||||
@ -1638,13 +1605,13 @@ i915_add_request(struct intel_ring_buffer *ring,
|
||||
jiffies +
|
||||
msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
|
||||
}
|
||||
if (was_empty)
|
||||
if (was_empty) {
|
||||
queue_delayed_work(dev_priv->wq,
|
||||
&dev_priv->mm.retire_work, HZ);
|
||||
intel_mark_busy(dev_priv->dev);
|
||||
}
|
||||
}
|
||||
|
||||
WARN_ON(!list_empty(&ring->gpu_write_list));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1686,8 +1653,6 @@ static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
|
||||
struct drm_i915_gem_object,
|
||||
ring_list);
|
||||
|
||||
obj->base.write_domain = 0;
|
||||
list_del_init(&obj->gpu_write_list);
|
||||
i915_gem_object_move_to_inactive(obj);
|
||||
}
|
||||
}
|
||||
@ -1723,20 +1688,6 @@ void i915_gem_reset(struct drm_device *dev)
|
||||
for_each_ring(ring, dev_priv, i)
|
||||
i915_gem_reset_ring_lists(dev_priv, ring);
|
||||
|
||||
/* Remove anything from the flushing lists. The GPU cache is likely
|
||||
* to be lost on reset along with the data, so simply move the
|
||||
* lost bo to the inactive list.
|
||||
*/
|
||||
while (!list_empty(&dev_priv->mm.flushing_list)) {
|
||||
obj = list_first_entry(&dev_priv->mm.flushing_list,
|
||||
struct drm_i915_gem_object,
|
||||
mm_list);
|
||||
|
||||
obj->base.write_domain = 0;
|
||||
list_del_init(&obj->gpu_write_list);
|
||||
i915_gem_object_move_to_inactive(obj);
|
||||
}
|
||||
|
||||
/* Move everything out of the GPU domains to ensure we do any
|
||||
* necessary invalidation upon reuse.
|
||||
*/
|
||||
@ -1765,7 +1716,7 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
|
||||
|
||||
WARN_ON(i915_verify_lists(ring->dev));
|
||||
|
||||
seqno = ring->get_seqno(ring);
|
||||
seqno = ring->get_seqno(ring, true);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++)
|
||||
if (seqno >= ring->sync_seqno[i])
|
||||
@ -1804,13 +1755,10 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
|
||||
struct drm_i915_gem_object,
|
||||
ring_list);
|
||||
|
||||
if (!i915_seqno_passed(seqno, obj->last_rendering_seqno))
|
||||
if (!i915_seqno_passed(seqno, obj->last_read_seqno))
|
||||
break;
|
||||
|
||||
if (obj->base.write_domain != 0)
|
||||
i915_gem_object_move_to_flushing(obj);
|
||||
else
|
||||
i915_gem_object_move_to_inactive(obj);
|
||||
i915_gem_object_move_to_inactive(obj);
|
||||
}
|
||||
|
||||
if (unlikely(ring->trace_irq_seqno &&
|
||||
@ -1859,20 +1807,16 @@ i915_gem_retire_work_handler(struct work_struct *work)
|
||||
*/
|
||||
idle = true;
|
||||
for_each_ring(ring, dev_priv, i) {
|
||||
if (ring->gpu_caches_dirty) {
|
||||
struct drm_i915_gem_request *request;
|
||||
|
||||
request = kzalloc(sizeof(*request), GFP_KERNEL);
|
||||
if (request == NULL ||
|
||||
i915_add_request(ring, NULL, request))
|
||||
kfree(request);
|
||||
}
|
||||
if (ring->gpu_caches_dirty)
|
||||
i915_add_request(ring, NULL, NULL);
|
||||
|
||||
idle &= list_empty(&ring->request_list);
|
||||
}
|
||||
|
||||
if (!dev_priv->mm.suspended && !idle)
|
||||
queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
|
||||
if (idle)
|
||||
intel_mark_idle(dev);
|
||||
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
}
|
||||
@ -1913,25 +1857,13 @@ i915_gem_check_wedge(struct drm_i915_private *dev_priv,
|
||||
static int
|
||||
i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
|
||||
{
|
||||
int ret = 0;
|
||||
int ret;
|
||||
|
||||
BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
|
||||
|
||||
if (seqno == ring->outstanding_lazy_request) {
|
||||
struct drm_i915_gem_request *request;
|
||||
|
||||
request = kzalloc(sizeof(*request), GFP_KERNEL);
|
||||
if (request == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = i915_add_request(ring, NULL, request);
|
||||
if (ret) {
|
||||
kfree(request);
|
||||
return ret;
|
||||
}
|
||||
|
||||
BUG_ON(seqno != request->seqno);
|
||||
}
|
||||
ret = 0;
|
||||
if (seqno == ring->outstanding_lazy_request)
|
||||
ret = i915_add_request(ring, NULL, NULL);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -1956,7 +1888,7 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
|
||||
bool wait_forever = true;
|
||||
int ret;
|
||||
|
||||
if (i915_seqno_passed(ring->get_seqno(ring), seqno))
|
||||
if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
|
||||
return 0;
|
||||
|
||||
trace_i915_gem_request_wait_begin(ring, seqno);
|
||||
@ -1975,7 +1907,7 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
|
||||
getrawmonotonic(&before);
|
||||
|
||||
#define EXIT_COND \
|
||||
(i915_seqno_passed(ring->get_seqno(ring), seqno) || \
|
||||
(i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \
|
||||
atomic_read(&dev_priv->mm.wedged))
|
||||
do {
|
||||
if (interruptible)
|
||||
@ -2046,26 +1978,37 @@ i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
|
||||
* Ensures that all rendering to the object has completed and the object is
|
||||
* safe to unbind from the GTT or access from the CPU.
|
||||
*/
|
||||
int
|
||||
i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj)
|
||||
static __must_check int
|
||||
i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
|
||||
bool readonly)
|
||||
{
|
||||
u32 seqno;
|
||||
int ret;
|
||||
|
||||
/* This function only exists to support waiting for existing rendering,
|
||||
* not for emitting required flushes.
|
||||
*/
|
||||
BUG_ON((obj->base.write_domain & I915_GEM_GPU_DOMAINS) != 0);
|
||||
|
||||
/* If there is rendering queued on the buffer being evicted, wait for
|
||||
* it.
|
||||
*/
|
||||
if (obj->active) {
|
||||
ret = i915_wait_seqno(obj->ring, obj->last_rendering_seqno);
|
||||
if (ret)
|
||||
return ret;
|
||||
i915_gem_retire_requests_ring(obj->ring);
|
||||
if (readonly)
|
||||
seqno = obj->last_write_seqno;
|
||||
else
|
||||
seqno = obj->last_read_seqno;
|
||||
if (seqno == 0)
|
||||
return 0;
|
||||
|
||||
ret = i915_wait_seqno(obj->ring, seqno);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Manually manage the write flush as we may have not yet retired
|
||||
* the buffer.
|
||||
*/
|
||||
if (obj->last_write_seqno &&
|
||||
i915_seqno_passed(seqno, obj->last_write_seqno)) {
|
||||
obj->last_write_seqno = 0;
|
||||
obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
|
||||
}
|
||||
|
||||
i915_gem_retire_requests_ring(obj->ring);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -2080,14 +2023,10 @@ i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
|
||||
int ret;
|
||||
|
||||
if (obj->active) {
|
||||
ret = i915_gem_object_flush_gpu_write_domain(obj);
|
||||
ret = i915_gem_check_olr(obj->ring, obj->last_read_seqno);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = i915_gem_check_olr(obj->ring,
|
||||
obj->last_rendering_seqno);
|
||||
if (ret)
|
||||
return ret;
|
||||
i915_gem_retire_requests_ring(obj->ring);
|
||||
}
|
||||
|
||||
@ -2147,7 +2086,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
|
||||
goto out;
|
||||
|
||||
if (obj->active) {
|
||||
seqno = obj->last_rendering_seqno;
|
||||
seqno = obj->last_read_seqno;
|
||||
ring = obj->ring;
|
||||
}
|
||||
|
||||
@ -2202,11 +2141,11 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj,
|
||||
return 0;
|
||||
|
||||
if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev))
|
||||
return i915_gem_object_wait_rendering(obj);
|
||||
return i915_gem_object_wait_rendering(obj, false);
|
||||
|
||||
idx = intel_ring_sync_index(from, to);
|
||||
|
||||
seqno = obj->last_rendering_seqno;
|
||||
seqno = obj->last_read_seqno;
|
||||
if (seqno <= from->sync_seqno[idx])
|
||||
return 0;
|
||||
|
||||
@ -2318,42 +2257,11 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
|
||||
return ret;
|
||||
}
|
||||
|
||||
int
|
||||
i915_gem_flush_ring(struct intel_ring_buffer *ring,
|
||||
uint32_t invalidate_domains,
|
||||
uint32_t flush_domains)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) == 0)
|
||||
return 0;
|
||||
|
||||
trace_i915_gem_ring_flush(ring, invalidate_domains, flush_domains);
|
||||
|
||||
ret = ring->flush(ring, invalidate_domains, flush_domains);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (flush_domains & I915_GEM_GPU_DOMAINS)
|
||||
i915_gem_process_flushing_list(ring, flush_domains);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int i915_ring_idle(struct intel_ring_buffer *ring)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list))
|
||||
if (list_empty(&ring->active_list))
|
||||
return 0;
|
||||
|
||||
if (!list_empty(&ring->gpu_write_list)) {
|
||||
ret = i915_gem_flush_ring(ring,
|
||||
I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return i915_wait_seqno(ring, i915_gem_next_request_seqno(ring));
|
||||
}
|
||||
|
||||
@ -2369,10 +2277,6 @@ int i915_gpu_idle(struct drm_device *dev)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Is the device fubar? */
|
||||
if (WARN_ON(!list_empty(&ring->gpu_write_list)))
|
||||
return -EBUSY;
|
||||
|
||||
ret = i915_switch_context(ring, NULL, DEFAULT_CONTEXT_ID);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -2548,21 +2452,8 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
|
||||
static int
|
||||
i915_gem_object_flush_fence(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (obj->fenced_gpu_access) {
|
||||
if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
|
||||
ret = i915_gem_flush_ring(obj->ring,
|
||||
0, obj->base.write_domain);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
obj->fenced_gpu_access = false;
|
||||
}
|
||||
|
||||
if (obj->last_fenced_seqno) {
|
||||
ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
|
||||
int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -2575,6 +2466,7 @@ i915_gem_object_flush_fence(struct drm_i915_gem_object *obj)
|
||||
if (obj->base.read_domains & I915_GEM_DOMAIN_GTT)
|
||||
mb();
|
||||
|
||||
obj->fenced_gpu_access = false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -2694,6 +2586,76 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool i915_gem_valid_gtt_space(struct drm_device *dev,
|
||||
struct drm_mm_node *gtt_space,
|
||||
unsigned long cache_level)
|
||||
{
|
||||
struct drm_mm_node *other;
|
||||
|
||||
/* On non-LLC machines we have to be careful when putting differing
|
||||
* types of snoopable memory together to avoid the prefetcher
|
||||
* crossing memory domains and dieing.
|
||||
*/
|
||||
if (HAS_LLC(dev))
|
||||
return true;
|
||||
|
||||
if (gtt_space == NULL)
|
||||
return true;
|
||||
|
||||
if (list_empty(>t_space->node_list))
|
||||
return true;
|
||||
|
||||
other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
|
||||
if (other->allocated && !other->hole_follows && other->color != cache_level)
|
||||
return false;
|
||||
|
||||
other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
|
||||
if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void i915_gem_verify_gtt(struct drm_device *dev)
|
||||
{
|
||||
#if WATCH_GTT
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_gem_object *obj;
|
||||
int err = 0;
|
||||
|
||||
list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
|
||||
if (obj->gtt_space == NULL) {
|
||||
printk(KERN_ERR "object found on GTT list with no space reserved\n");
|
||||
err++;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (obj->cache_level != obj->gtt_space->color) {
|
||||
printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n",
|
||||
obj->gtt_space->start,
|
||||
obj->gtt_space->start + obj->gtt_space->size,
|
||||
obj->cache_level,
|
||||
obj->gtt_space->color);
|
||||
err++;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!i915_gem_valid_gtt_space(dev,
|
||||
obj->gtt_space,
|
||||
obj->cache_level)) {
|
||||
printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n",
|
||||
obj->gtt_space->start,
|
||||
obj->gtt_space->start + obj->gtt_space->size,
|
||||
obj->cache_level);
|
||||
err++;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
WARN_ON(err);
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
* Finds free space in the GTT aperture and binds the object there.
|
||||
*/
|
||||
@ -2748,36 +2710,47 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
|
||||
search_free:
|
||||
if (map_and_fenceable)
|
||||
free_space =
|
||||
drm_mm_search_free_in_range(&dev_priv->mm.gtt_space,
|
||||
size, alignment,
|
||||
0, dev_priv->mm.gtt_mappable_end,
|
||||
0);
|
||||
drm_mm_search_free_in_range_color(&dev_priv->mm.gtt_space,
|
||||
size, alignment, obj->cache_level,
|
||||
0, dev_priv->mm.gtt_mappable_end,
|
||||
false);
|
||||
else
|
||||
free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
|
||||
size, alignment, 0);
|
||||
free_space = drm_mm_search_free_color(&dev_priv->mm.gtt_space,
|
||||
size, alignment, obj->cache_level,
|
||||
false);
|
||||
|
||||
if (free_space != NULL) {
|
||||
if (map_and_fenceable)
|
||||
obj->gtt_space =
|
||||
drm_mm_get_block_range_generic(free_space,
|
||||
size, alignment, 0,
|
||||
size, alignment, obj->cache_level,
|
||||
0, dev_priv->mm.gtt_mappable_end,
|
||||
0);
|
||||
false);
|
||||
else
|
||||
obj->gtt_space =
|
||||
drm_mm_get_block(free_space, size, alignment);
|
||||
drm_mm_get_block_generic(free_space,
|
||||
size, alignment, obj->cache_level,
|
||||
false);
|
||||
}
|
||||
if (obj->gtt_space == NULL) {
|
||||
/* If the gtt is empty and we're still having trouble
|
||||
* fitting our object in, we're out of memory.
|
||||
*/
|
||||
ret = i915_gem_evict_something(dev, size, alignment,
|
||||
obj->cache_level,
|
||||
map_and_fenceable);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
goto search_free;
|
||||
}
|
||||
if (WARN_ON(!i915_gem_valid_gtt_space(dev,
|
||||
obj->gtt_space,
|
||||
obj->cache_level))) {
|
||||
drm_mm_put_block(obj->gtt_space);
|
||||
obj->gtt_space = NULL;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = i915_gem_object_get_pages_gtt(obj, gfpmask);
|
||||
if (ret) {
|
||||
@ -2840,6 +2813,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
|
||||
obj->map_and_fenceable = mappable && fenceable;
|
||||
|
||||
trace_i915_gem_object_bind(obj, map_and_fenceable);
|
||||
i915_gem_verify_gtt(dev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -2869,17 +2843,6 @@ i915_gem_clflush_object(struct drm_i915_gem_object *obj)
|
||||
drm_clflush_pages(obj->pages, obj->base.size / PAGE_SIZE);
|
||||
}
|
||||
|
||||
/** Flushes any GPU write domain for the object if it's dirty. */
|
||||
static int
|
||||
i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
if ((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0)
|
||||
return 0;
|
||||
|
||||
/* Queue the GPU write cache flushing we need. */
|
||||
return i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain);
|
||||
}
|
||||
|
||||
/** Flushes the GTT write domain for the object if it's dirty. */
|
||||
static void
|
||||
i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
|
||||
@ -2946,16 +2909,10 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
|
||||
if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
|
||||
return 0;
|
||||
|
||||
ret = i915_gem_object_flush_gpu_write_domain(obj);
|
||||
ret = i915_gem_object_wait_rendering(obj, !write);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (obj->pending_gpu_write || write) {
|
||||
ret = i915_gem_object_wait_rendering(obj);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
i915_gem_object_flush_cpu_write_domain(obj);
|
||||
|
||||
old_write_domain = obj->base.write_domain;
|
||||
@ -2998,6 +2955,12 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
if (!i915_gem_valid_gtt_space(dev, obj->gtt_space, cache_level)) {
|
||||
ret = i915_gem_object_unbind(obj);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (obj->gtt_space) {
|
||||
ret = i915_gem_object_finish_gpu(obj);
|
||||
if (ret)
|
||||
@ -3009,7 +2972,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
|
||||
* registers with snooped memory, so relinquish any fences
|
||||
* currently pointing to our region in the aperture.
|
||||
*/
|
||||
if (INTEL_INFO(obj->base.dev)->gen < 6) {
|
||||
if (INTEL_INFO(dev)->gen < 6) {
|
||||
ret = i915_gem_object_put_fence(obj);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -3020,6 +2983,8 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
|
||||
if (obj->has_aliasing_ppgtt_mapping)
|
||||
i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
|
||||
obj, cache_level);
|
||||
|
||||
obj->gtt_space->color = cache_level;
|
||||
}
|
||||
|
||||
if (cache_level == I915_CACHE_NONE) {
|
||||
@ -3046,9 +3011,72 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
|
||||
}
|
||||
|
||||
obj->cache_level = cache_level;
|
||||
i915_gem_verify_gtt(dev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int i915_gem_get_cacheing_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file)
|
||||
{
|
||||
struct drm_i915_gem_cacheing *args = data;
|
||||
struct drm_i915_gem_object *obj;
|
||||
int ret;
|
||||
|
||||
ret = i915_mutex_lock_interruptible(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
|
||||
if (&obj->base == NULL) {
|
||||
ret = -ENOENT;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
args->cacheing = obj->cache_level != I915_CACHE_NONE;
|
||||
|
||||
drm_gem_object_unreference(&obj->base);
|
||||
unlock:
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int i915_gem_set_cacheing_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file)
|
||||
{
|
||||
struct drm_i915_gem_cacheing *args = data;
|
||||
struct drm_i915_gem_object *obj;
|
||||
enum i915_cache_level level;
|
||||
int ret;
|
||||
|
||||
ret = i915_mutex_lock_interruptible(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
switch (args->cacheing) {
|
||||
case I915_CACHEING_NONE:
|
||||
level = I915_CACHE_NONE;
|
||||
break;
|
||||
case I915_CACHEING_CACHED:
|
||||
level = I915_CACHE_LLC;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
|
||||
if (&obj->base == NULL) {
|
||||
ret = -ENOENT;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
ret = i915_gem_object_set_cache_level(obj, level);
|
||||
|
||||
drm_gem_object_unreference(&obj->base);
|
||||
unlock:
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Prepare buffer for display plane (scanout, cursors, etc).
|
||||
* Can be called from an uninterruptible phase (modesetting) and allows
|
||||
@ -3062,10 +3090,6 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
|
||||
u32 old_read_domains, old_write_domain;
|
||||
int ret;
|
||||
|
||||
ret = i915_gem_object_flush_gpu_write_domain(obj);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (pipelined != obj->ring) {
|
||||
ret = i915_gem_object_sync(obj, pipelined);
|
||||
if (ret)
|
||||
@ -3101,7 +3125,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
|
||||
/* It should now be out of any other write domains, and we can update
|
||||
* the domain values for our changes.
|
||||
*/
|
||||
BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
|
||||
obj->base.write_domain = 0;
|
||||
obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
|
||||
|
||||
trace_i915_gem_object_change_domain(obj,
|
||||
@ -3119,13 +3143,7 @@ i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
|
||||
if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
|
||||
return 0;
|
||||
|
||||
if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
|
||||
ret = i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = i915_gem_object_wait_rendering(obj);
|
||||
ret = i915_gem_object_wait_rendering(obj, false);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -3149,16 +3167,10 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
|
||||
if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
|
||||
return 0;
|
||||
|
||||
ret = i915_gem_object_flush_gpu_write_domain(obj);
|
||||
ret = i915_gem_object_wait_rendering(obj, !write);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (write || obj->pending_gpu_write) {
|
||||
ret = i915_gem_object_wait_rendering(obj);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
i915_gem_object_flush_gtt_write_domain(obj);
|
||||
|
||||
old_write_domain = obj->base.write_domain;
|
||||
@ -3400,6 +3412,10 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
|
||||
ret = i915_gem_object_flush_active(obj);
|
||||
|
||||
args->busy = obj->active;
|
||||
if (obj->ring) {
|
||||
BUILD_BUG_ON(I915_NUM_RINGS > 16);
|
||||
args->busy |= intel_ring_flag(obj->ring) << 16;
|
||||
}
|
||||
|
||||
drm_gem_object_unreference(&obj->base);
|
||||
unlock:
|
||||
@ -3517,7 +3533,6 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
|
||||
INIT_LIST_HEAD(&obj->gtt_list);
|
||||
INIT_LIST_HEAD(&obj->ring_list);
|
||||
INIT_LIST_HEAD(&obj->exec_list);
|
||||
INIT_LIST_HEAD(&obj->gpu_write_list);
|
||||
obj->madv = I915_MADV_WILLNEED;
|
||||
/* Avoid an unnecessary call to unbind on the first bind. */
|
||||
obj->map_and_fenceable = true;
|
||||
@ -3891,7 +3906,6 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
|
||||
}
|
||||
|
||||
BUG_ON(!list_empty(&dev_priv->mm.active_list));
|
||||
BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
|
||||
BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
@ -3939,7 +3953,6 @@ init_ring_lists(struct intel_ring_buffer *ring)
|
||||
{
|
||||
INIT_LIST_HEAD(&ring->active_list);
|
||||
INIT_LIST_HEAD(&ring->request_list);
|
||||
INIT_LIST_HEAD(&ring->gpu_write_list);
|
||||
}
|
||||
|
||||
void
|
||||
@ -3949,7 +3962,6 @@ i915_gem_load(struct drm_device *dev)
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
|
||||
INIT_LIST_HEAD(&dev_priv->mm.active_list);
|
||||
INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
|
||||
INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
|
||||
INIT_LIST_HEAD(&dev_priv->mm.fence_list);
|
||||
INIT_LIST_HEAD(&dev_priv->mm.gtt_list);
|
||||
@ -4200,12 +4212,7 @@ static int
|
||||
i915_gpu_is_active(struct drm_device *dev)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
int lists_empty;
|
||||
|
||||
lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
|
||||
list_empty(&dev_priv->mm.active_list);
|
||||
|
||||
return !lists_empty;
|
||||
return !list_empty(&dev_priv->mm.active_list);
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -97,8 +97,7 @@
|
||||
|
||||
static struct i915_hw_context *
|
||||
i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id);
|
||||
static int do_switch(struct drm_i915_gem_object *from_obj,
|
||||
struct i915_hw_context *to, u32 seqno);
|
||||
static int do_switch(struct i915_hw_context *to);
|
||||
|
||||
static int get_context_size(struct drm_device *dev)
|
||||
{
|
||||
@ -113,7 +112,10 @@ static int get_context_size(struct drm_device *dev)
|
||||
break;
|
||||
case 7:
|
||||
reg = I915_READ(GEN7_CXT_SIZE);
|
||||
ret = GEN7_CXT_TOTAL_SIZE(reg) * 64;
|
||||
if (IS_HASWELL(dev))
|
||||
ret = HSW_CXT_TOTAL_SIZE(reg) * 64;
|
||||
else
|
||||
ret = GEN7_CXT_TOTAL_SIZE(reg) * 64;
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
@ -220,19 +222,20 @@ static int create_default_context(struct drm_i915_private *dev_priv)
|
||||
*/
|
||||
dev_priv->ring[RCS].default_context = ctx;
|
||||
ret = i915_gem_object_pin(ctx->obj, CONTEXT_ALIGN, false);
|
||||
if (ret) {
|
||||
do_destroy(ctx);
|
||||
return ret;
|
||||
}
|
||||
if (ret)
|
||||
goto err_destroy;
|
||||
|
||||
ret = do_switch(NULL, ctx, 0);
|
||||
if (ret) {
|
||||
i915_gem_object_unpin(ctx->obj);
|
||||
do_destroy(ctx);
|
||||
} else {
|
||||
DRM_DEBUG_DRIVER("Default HW context loaded\n");
|
||||
}
|
||||
ret = do_switch(ctx);
|
||||
if (ret)
|
||||
goto err_unpin;
|
||||
|
||||
DRM_DEBUG_DRIVER("Default HW context loaded\n");
|
||||
return 0;
|
||||
|
||||
err_unpin:
|
||||
i915_gem_object_unpin(ctx->obj);
|
||||
err_destroy:
|
||||
do_destroy(ctx);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -359,17 +362,18 @@ mi_set_context(struct intel_ring_buffer *ring,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int do_switch(struct drm_i915_gem_object *from_obj,
|
||||
struct i915_hw_context *to,
|
||||
u32 seqno)
|
||||
static int do_switch(struct i915_hw_context *to)
|
||||
{
|
||||
struct intel_ring_buffer *ring = NULL;
|
||||
struct intel_ring_buffer *ring = to->ring;
|
||||
struct drm_i915_gem_object *from_obj = ring->last_context_obj;
|
||||
u32 hw_flags = 0;
|
||||
int ret;
|
||||
|
||||
BUG_ON(to == NULL);
|
||||
BUG_ON(from_obj != NULL && from_obj->pin_count == 0);
|
||||
|
||||
if (from_obj == to->obj)
|
||||
return 0;
|
||||
|
||||
ret = i915_gem_object_pin(to->obj, CONTEXT_ALIGN, false);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -393,7 +397,6 @@ static int do_switch(struct drm_i915_gem_object *from_obj,
|
||||
else if (WARN_ON_ONCE(from_obj == to->obj)) /* not yet expected */
|
||||
hw_flags |= MI_FORCE_RESTORE;
|
||||
|
||||
ring = to->ring;
|
||||
ret = mi_set_context(ring, to, hw_flags);
|
||||
if (ret) {
|
||||
i915_gem_object_unpin(to->obj);
|
||||
@ -407,6 +410,7 @@ static int do_switch(struct drm_i915_gem_object *from_obj,
|
||||
* MI_SET_CONTEXT instead of when the next seqno has completed.
|
||||
*/
|
||||
if (from_obj != NULL) {
|
||||
u32 seqno = i915_gem_next_request_seqno(ring);
|
||||
from_obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
|
||||
i915_gem_object_move_to_active(from_obj, ring, seqno);
|
||||
/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
|
||||
@ -417,7 +421,7 @@ static int do_switch(struct drm_i915_gem_object *from_obj,
|
||||
* swapped, but there is no way to do that yet.
|
||||
*/
|
||||
from_obj->dirty = 1;
|
||||
BUG_ON(from_obj->ring != to->ring);
|
||||
BUG_ON(from_obj->ring != ring);
|
||||
i915_gem_object_unpin(from_obj);
|
||||
|
||||
drm_gem_object_unreference(&from_obj->base);
|
||||
@ -448,9 +452,7 @@ int i915_switch_context(struct intel_ring_buffer *ring,
|
||||
int to_id)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = ring->dev->dev_private;
|
||||
struct drm_i915_file_private *file_priv = NULL;
|
||||
struct i915_hw_context *to;
|
||||
struct drm_i915_gem_object *from_obj = ring->last_context_obj;
|
||||
|
||||
if (dev_priv->hw_contexts_disabled)
|
||||
return 0;
|
||||
@ -458,21 +460,18 @@ int i915_switch_context(struct intel_ring_buffer *ring,
|
||||
if (ring != &dev_priv->ring[RCS])
|
||||
return 0;
|
||||
|
||||
if (file)
|
||||
file_priv = file->driver_priv;
|
||||
|
||||
if (to_id == DEFAULT_CONTEXT_ID) {
|
||||
to = ring->default_context;
|
||||
} else {
|
||||
to = i915_gem_context_get(file_priv, to_id);
|
||||
if (file == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
to = i915_gem_context_get(file->driver_priv, to_id);
|
||||
if (to == NULL)
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
if (from_obj == to->obj)
|
||||
return 0;
|
||||
|
||||
return do_switch(from_obj, to, i915_gem_next_request_seqno(to->ring));
|
||||
return do_switch(to);
|
||||
}
|
||||
|
||||
int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
|
||||
|
@ -44,7 +44,8 @@ mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind)
|
||||
|
||||
int
|
||||
i915_gem_evict_something(struct drm_device *dev, int min_size,
|
||||
unsigned alignment, bool mappable)
|
||||
unsigned alignment, unsigned cache_level,
|
||||
bool mappable)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
struct list_head eviction_list, unwind_list;
|
||||
@ -79,11 +80,11 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
|
||||
INIT_LIST_HEAD(&unwind_list);
|
||||
if (mappable)
|
||||
drm_mm_init_scan_with_range(&dev_priv->mm.gtt_space,
|
||||
min_size, alignment, 0,
|
||||
min_size, alignment, cache_level,
|
||||
0, dev_priv->mm.gtt_mappable_end);
|
||||
else
|
||||
drm_mm_init_scan(&dev_priv->mm.gtt_space,
|
||||
min_size, alignment, 0);
|
||||
min_size, alignment, cache_level);
|
||||
|
||||
/* First see if there is a large enough contiguous idle region... */
|
||||
list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) {
|
||||
@ -93,23 +94,6 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
|
||||
|
||||
/* Now merge in the soon-to-be-expired objects... */
|
||||
list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
|
||||
/* Does the object require an outstanding flush? */
|
||||
if (obj->base.write_domain)
|
||||
continue;
|
||||
|
||||
if (mark_free(obj, &unwind_list))
|
||||
goto found;
|
||||
}
|
||||
|
||||
/* Finally add anything with a pending flush (in order of retirement) */
|
||||
list_for_each_entry(obj, &dev_priv->mm.flushing_list, mm_list) {
|
||||
if (mark_free(obj, &unwind_list))
|
||||
goto found;
|
||||
}
|
||||
list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
|
||||
if (!obj->base.write_domain)
|
||||
continue;
|
||||
|
||||
if (mark_free(obj, &unwind_list))
|
||||
goto found;
|
||||
}
|
||||
@ -172,7 +156,6 @@ i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only)
|
||||
int ret;
|
||||
|
||||
lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
|
||||
list_empty(&dev_priv->mm.flushing_list) &&
|
||||
list_empty(&dev_priv->mm.active_list));
|
||||
if (lists_empty)
|
||||
return -ENOSPC;
|
||||
@ -189,8 +172,6 @@ i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only)
|
||||
|
||||
i915_gem_retire_requests(dev);
|
||||
|
||||
BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
|
||||
|
||||
/* Having flushed everything, unbind() should never raise an error */
|
||||
list_for_each_entry_safe(obj, next,
|
||||
&dev_priv->mm.inactive_list, mm_list) {
|
||||
|
@ -34,180 +34,6 @@
|
||||
#include "intel_drv.h"
|
||||
#include <linux/dma_remapping.h>
|
||||
|
||||
struct change_domains {
|
||||
uint32_t invalidate_domains;
|
||||
uint32_t flush_domains;
|
||||
uint32_t flush_rings;
|
||||
uint32_t flips;
|
||||
};
|
||||
|
||||
/*
|
||||
* Set the next domain for the specified object. This
|
||||
* may not actually perform the necessary flushing/invaliding though,
|
||||
* as that may want to be batched with other set_domain operations
|
||||
*
|
||||
* This is (we hope) the only really tricky part of gem. The goal
|
||||
* is fairly simple -- track which caches hold bits of the object
|
||||
* and make sure they remain coherent. A few concrete examples may
|
||||
* help to explain how it works. For shorthand, we use the notation
|
||||
* (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
|
||||
* a pair of read and write domain masks.
|
||||
*
|
||||
* Case 1: the batch buffer
|
||||
*
|
||||
* 1. Allocated
|
||||
* 2. Written by CPU
|
||||
* 3. Mapped to GTT
|
||||
* 4. Read by GPU
|
||||
* 5. Unmapped from GTT
|
||||
* 6. Freed
|
||||
*
|
||||
* Let's take these a step at a time
|
||||
*
|
||||
* 1. Allocated
|
||||
* Pages allocated from the kernel may still have
|
||||
* cache contents, so we set them to (CPU, CPU) always.
|
||||
* 2. Written by CPU (using pwrite)
|
||||
* The pwrite function calls set_domain (CPU, CPU) and
|
||||
* this function does nothing (as nothing changes)
|
||||
* 3. Mapped by GTT
|
||||
* This function asserts that the object is not
|
||||
* currently in any GPU-based read or write domains
|
||||
* 4. Read by GPU
|
||||
* i915_gem_execbuffer calls set_domain (COMMAND, 0).
|
||||
* As write_domain is zero, this function adds in the
|
||||
* current read domains (CPU+COMMAND, 0).
|
||||
* flush_domains is set to CPU.
|
||||
* invalidate_domains is set to COMMAND
|
||||
* clflush is run to get data out of the CPU caches
|
||||
* then i915_dev_set_domain calls i915_gem_flush to
|
||||
* emit an MI_FLUSH and drm_agp_chipset_flush
|
||||
* 5. Unmapped from GTT
|
||||
* i915_gem_object_unbind calls set_domain (CPU, CPU)
|
||||
* flush_domains and invalidate_domains end up both zero
|
||||
* so no flushing/invalidating happens
|
||||
* 6. Freed
|
||||
* yay, done
|
||||
*
|
||||
* Case 2: The shared render buffer
|
||||
*
|
||||
* 1. Allocated
|
||||
* 2. Mapped to GTT
|
||||
* 3. Read/written by GPU
|
||||
* 4. set_domain to (CPU,CPU)
|
||||
* 5. Read/written by CPU
|
||||
* 6. Read/written by GPU
|
||||
*
|
||||
* 1. Allocated
|
||||
* Same as last example, (CPU, CPU)
|
||||
* 2. Mapped to GTT
|
||||
* Nothing changes (assertions find that it is not in the GPU)
|
||||
* 3. Read/written by GPU
|
||||
* execbuffer calls set_domain (RENDER, RENDER)
|
||||
* flush_domains gets CPU
|
||||
* invalidate_domains gets GPU
|
||||
* clflush (obj)
|
||||
* MI_FLUSH and drm_agp_chipset_flush
|
||||
* 4. set_domain (CPU, CPU)
|
||||
* flush_domains gets GPU
|
||||
* invalidate_domains gets CPU
|
||||
* wait_rendering (obj) to make sure all drawing is complete.
|
||||
* This will include an MI_FLUSH to get the data from GPU
|
||||
* to memory
|
||||
* clflush (obj) to invalidate the CPU cache
|
||||
* Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
|
||||
* 5. Read/written by CPU
|
||||
* cache lines are loaded and dirtied
|
||||
* 6. Read written by GPU
|
||||
* Same as last GPU access
|
||||
*
|
||||
* Case 3: The constant buffer
|
||||
*
|
||||
* 1. Allocated
|
||||
* 2. Written by CPU
|
||||
* 3. Read by GPU
|
||||
* 4. Updated (written) by CPU again
|
||||
* 5. Read by GPU
|
||||
*
|
||||
* 1. Allocated
|
||||
* (CPU, CPU)
|
||||
* 2. Written by CPU
|
||||
* (CPU, CPU)
|
||||
* 3. Read by GPU
|
||||
* (CPU+RENDER, 0)
|
||||
* flush_domains = CPU
|
||||
* invalidate_domains = RENDER
|
||||
* clflush (obj)
|
||||
* MI_FLUSH
|
||||
* drm_agp_chipset_flush
|
||||
* 4. Updated (written) by CPU again
|
||||
* (CPU, CPU)
|
||||
* flush_domains = 0 (no previous write domain)
|
||||
* invalidate_domains = 0 (no new read domains)
|
||||
* 5. Read by GPU
|
||||
* (CPU+RENDER, 0)
|
||||
* flush_domains = CPU
|
||||
* invalidate_domains = RENDER
|
||||
* clflush (obj)
|
||||
* MI_FLUSH
|
||||
* drm_agp_chipset_flush
|
||||
*/
|
||||
static void
|
||||
i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
|
||||
struct intel_ring_buffer *ring,
|
||||
struct change_domains *cd)
|
||||
{
|
||||
uint32_t invalidate_domains = 0, flush_domains = 0;
|
||||
|
||||
/*
|
||||
* If the object isn't moving to a new write domain,
|
||||
* let the object stay in multiple read domains
|
||||
*/
|
||||
if (obj->base.pending_write_domain == 0)
|
||||
obj->base.pending_read_domains |= obj->base.read_domains;
|
||||
|
||||
/*
|
||||
* Flush the current write domain if
|
||||
* the new read domains don't match. Invalidate
|
||||
* any read domains which differ from the old
|
||||
* write domain
|
||||
*/
|
||||
if (obj->base.write_domain &&
|
||||
(((obj->base.write_domain != obj->base.pending_read_domains ||
|
||||
obj->ring != ring)) ||
|
||||
(obj->fenced_gpu_access && !obj->pending_fenced_gpu_access))) {
|
||||
flush_domains |= obj->base.write_domain;
|
||||
invalidate_domains |=
|
||||
obj->base.pending_read_domains & ~obj->base.write_domain;
|
||||
}
|
||||
/*
|
||||
* Invalidate any read caches which may have
|
||||
* stale data. That is, any new read domains.
|
||||
*/
|
||||
invalidate_domains |= obj->base.pending_read_domains & ~obj->base.read_domains;
|
||||
if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU)
|
||||
i915_gem_clflush_object(obj);
|
||||
|
||||
if (obj->base.pending_write_domain)
|
||||
cd->flips |= atomic_read(&obj->pending_flip);
|
||||
|
||||
/* The actual obj->write_domain will be updated with
|
||||
* pending_write_domain after we emit the accumulated flush for all
|
||||
* of our domain changes in execbuffers (which clears objects'
|
||||
* write_domains). So if we have a current write domain that we
|
||||
* aren't changing, set pending_write_domain to that.
|
||||
*/
|
||||
if (flush_domains == 0 && obj->base.pending_write_domain == 0)
|
||||
obj->base.pending_write_domain = obj->base.write_domain;
|
||||
|
||||
cd->invalidate_domains |= invalidate_domains;
|
||||
cd->flush_domains |= flush_domains;
|
||||
if (flush_domains & I915_GEM_GPU_DOMAINS)
|
||||
cd->flush_rings |= intel_ring_flag(obj->ring);
|
||||
if (invalidate_domains & I915_GEM_GPU_DOMAINS)
|
||||
cd->flush_rings |= intel_ring_flag(ring);
|
||||
}
|
||||
|
||||
struct eb_objects {
|
||||
int and;
|
||||
struct hlist_head buckets[0];
|
||||
@ -587,6 +413,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
|
||||
|
||||
obj->base.pending_read_domains = 0;
|
||||
obj->base.pending_write_domain = 0;
|
||||
obj->pending_fenced_gpu_access = false;
|
||||
}
|
||||
list_splice(&ordered_objects, objects);
|
||||
|
||||
@ -810,18 +637,6 @@ err:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void
|
||||
i915_gem_execbuffer_flush(struct drm_device *dev,
|
||||
uint32_t invalidate_domains,
|
||||
uint32_t flush_domains)
|
||||
{
|
||||
if (flush_domains & I915_GEM_DOMAIN_CPU)
|
||||
intel_gtt_chipset_flush();
|
||||
|
||||
if (flush_domains & I915_GEM_DOMAIN_GTT)
|
||||
wmb();
|
||||
}
|
||||
|
||||
static int
|
||||
i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring, u32 flips)
|
||||
{
|
||||
@ -854,48 +669,45 @@ i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring, u32 flips)
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static int
|
||||
i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
|
||||
struct list_head *objects)
|
||||
{
|
||||
struct drm_i915_gem_object *obj;
|
||||
struct change_domains cd;
|
||||
uint32_t flush_domains = 0;
|
||||
uint32_t flips = 0;
|
||||
int ret;
|
||||
|
||||
memset(&cd, 0, sizeof(cd));
|
||||
list_for_each_entry(obj, objects, exec_list)
|
||||
i915_gem_object_set_to_gpu_domain(obj, ring, &cd);
|
||||
|
||||
if (cd.invalidate_domains | cd.flush_domains) {
|
||||
i915_gem_execbuffer_flush(ring->dev,
|
||||
cd.invalidate_domains,
|
||||
cd.flush_domains);
|
||||
}
|
||||
|
||||
if (cd.flips) {
|
||||
ret = i915_gem_execbuffer_wait_for_flips(ring, cd.flips);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
list_for_each_entry(obj, objects, exec_list) {
|
||||
ret = i915_gem_object_sync(obj, ring);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
|
||||
i915_gem_clflush_object(obj);
|
||||
|
||||
if (obj->base.pending_write_domain)
|
||||
flips |= atomic_read(&obj->pending_flip);
|
||||
|
||||
flush_domains |= obj->base.write_domain;
|
||||
}
|
||||
|
||||
if (flips) {
|
||||
ret = i915_gem_execbuffer_wait_for_flips(ring, flips);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (flush_domains & I915_GEM_DOMAIN_CPU)
|
||||
intel_gtt_chipset_flush();
|
||||
|
||||
if (flush_domains & I915_GEM_DOMAIN_GTT)
|
||||
wmb();
|
||||
|
||||
/* Unconditionally invalidate gpu caches and ensure that we do flush
|
||||
* any residual writes from the previous batch.
|
||||
*/
|
||||
ret = i915_gem_flush_ring(ring,
|
||||
I915_GEM_GPU_DOMAINS,
|
||||
ring->gpu_caches_dirty ? I915_GEM_GPU_DOMAINS : 0);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ring->gpu_caches_dirty = false;
|
||||
return 0;
|
||||
return intel_ring_invalidate_all_caches(ring);
|
||||
}
|
||||
|
||||
static bool
|
||||
@ -943,9 +755,8 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects,
|
||||
struct drm_i915_gem_object *obj;
|
||||
|
||||
list_for_each_entry(obj, objects, exec_list) {
|
||||
u32 old_read = obj->base.read_domains;
|
||||
u32 old_write = obj->base.write_domain;
|
||||
|
||||
u32 old_read = obj->base.read_domains;
|
||||
u32 old_write = obj->base.write_domain;
|
||||
|
||||
obj->base.read_domains = obj->base.pending_read_domains;
|
||||
obj->base.write_domain = obj->base.pending_write_domain;
|
||||
@ -954,17 +765,13 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects,
|
||||
i915_gem_object_move_to_active(obj, ring, seqno);
|
||||
if (obj->base.write_domain) {
|
||||
obj->dirty = 1;
|
||||
obj->pending_gpu_write = true;
|
||||
list_move_tail(&obj->gpu_write_list,
|
||||
&ring->gpu_write_list);
|
||||
obj->last_write_seqno = seqno;
|
||||
if (obj->pin_count) /* check for potential scanout */
|
||||
intel_mark_busy(ring->dev, obj);
|
||||
intel_mark_fb_busy(obj);
|
||||
}
|
||||
|
||||
trace_i915_gem_object_change_domain(obj, old_read, old_write);
|
||||
}
|
||||
|
||||
intel_mark_busy(ring->dev, NULL);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -972,16 +779,11 @@ i915_gem_execbuffer_retire_commands(struct drm_device *dev,
|
||||
struct drm_file *file,
|
||||
struct intel_ring_buffer *ring)
|
||||
{
|
||||
struct drm_i915_gem_request *request;
|
||||
|
||||
/* Unconditionally force add_request to emit a full flush. */
|
||||
ring->gpu_caches_dirty = true;
|
||||
|
||||
/* Add a breadcrumb for the completion of the batch buffer */
|
||||
request = kzalloc(sizeof(*request), GFP_KERNEL);
|
||||
if (request == NULL || i915_add_request(ring, file, request)) {
|
||||
kfree(request);
|
||||
}
|
||||
(void)i915_add_request(ring, file, NULL);
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -423,6 +423,23 @@ void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
|
||||
undo_idling(dev_priv, interruptible);
|
||||
}
|
||||
|
||||
static void i915_gtt_color_adjust(struct drm_mm_node *node,
|
||||
unsigned long color,
|
||||
unsigned long *start,
|
||||
unsigned long *end)
|
||||
{
|
||||
if (node->color != color)
|
||||
*start += 4096;
|
||||
|
||||
if (!list_empty(&node->node_list)) {
|
||||
node = list_entry(node->node_list.next,
|
||||
struct drm_mm_node,
|
||||
node_list);
|
||||
if (node->allocated && node->color != color)
|
||||
*end -= 4096;
|
||||
}
|
||||
}
|
||||
|
||||
void i915_gem_init_global_gtt(struct drm_device *dev,
|
||||
unsigned long start,
|
||||
unsigned long mappable_end,
|
||||
@ -432,6 +449,8 @@ void i915_gem_init_global_gtt(struct drm_device *dev,
|
||||
|
||||
/* Substract the guard page ... */
|
||||
drm_mm_init(&dev_priv->mm.gtt_space, start, end - start - PAGE_SIZE);
|
||||
if (!HAS_LLC(dev))
|
||||
dev_priv->mm.gtt_space.color_adjust = i915_gtt_color_adjust;
|
||||
|
||||
dev_priv->mm.gtt_start = start;
|
||||
dev_priv->mm.gtt_mappable_end = mappable_end;
|
||||
|
@ -296,11 +296,21 @@ static void i915_hotplug_work_func(struct work_struct *work)
|
||||
drm_helper_hpd_irq_event(dev);
|
||||
}
|
||||
|
||||
static void i915_handle_rps_change(struct drm_device *dev)
|
||||
/* defined intel_pm.c */
|
||||
extern spinlock_t mchdev_lock;
|
||||
|
||||
static void ironlake_handle_rps_change(struct drm_device *dev)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
u32 busy_up, busy_down, max_avg, min_avg;
|
||||
u8 new_delay = dev_priv->cur_delay;
|
||||
u8 new_delay;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&mchdev_lock, flags);
|
||||
|
||||
I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
|
||||
|
||||
new_delay = dev_priv->cur_delay;
|
||||
|
||||
I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
|
||||
busy_up = I915_READ(RCPREVBSYTUPAVG);
|
||||
@ -324,6 +334,8 @@ static void i915_handle_rps_change(struct drm_device *dev)
|
||||
if (ironlake_set_drps(dev, new_delay))
|
||||
dev_priv->cur_delay = new_delay;
|
||||
|
||||
spin_unlock_irqrestore(&mchdev_lock, flags);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
@ -335,7 +347,7 @@ static void notify_ring(struct drm_device *dev,
|
||||
if (ring->obj == NULL)
|
||||
return;
|
||||
|
||||
trace_i915_gem_request_complete(ring, ring->get_seqno(ring));
|
||||
trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false));
|
||||
|
||||
wake_up_all(&ring->irq_queue);
|
||||
if (i915_enable_hangcheck) {
|
||||
@ -349,16 +361,16 @@ static void notify_ring(struct drm_device *dev,
|
||||
static void gen6_pm_rps_work(struct work_struct *work)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
|
||||
rps_work);
|
||||
rps.work);
|
||||
u32 pm_iir, pm_imr;
|
||||
u8 new_delay;
|
||||
|
||||
spin_lock_irq(&dev_priv->rps_lock);
|
||||
pm_iir = dev_priv->pm_iir;
|
||||
dev_priv->pm_iir = 0;
|
||||
spin_lock_irq(&dev_priv->rps.lock);
|
||||
pm_iir = dev_priv->rps.pm_iir;
|
||||
dev_priv->rps.pm_iir = 0;
|
||||
pm_imr = I915_READ(GEN6_PMIMR);
|
||||
I915_WRITE(GEN6_PMIMR, 0);
|
||||
spin_unlock_irq(&dev_priv->rps_lock);
|
||||
spin_unlock_irq(&dev_priv->rps.lock);
|
||||
|
||||
if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0)
|
||||
return;
|
||||
@ -366,9 +378,9 @@ static void gen6_pm_rps_work(struct work_struct *work)
|
||||
mutex_lock(&dev_priv->dev->struct_mutex);
|
||||
|
||||
if (pm_iir & GEN6_PM_RP_UP_THRESHOLD)
|
||||
new_delay = dev_priv->cur_delay + 1;
|
||||
new_delay = dev_priv->rps.cur_delay + 1;
|
||||
else
|
||||
new_delay = dev_priv->cur_delay - 1;
|
||||
new_delay = dev_priv->rps.cur_delay - 1;
|
||||
|
||||
gen6_set_rps(dev_priv->dev, new_delay);
|
||||
|
||||
@ -444,7 +456,7 @@ static void ivybridge_handle_parity_error(struct drm_device *dev)
|
||||
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
|
||||
unsigned long flags;
|
||||
|
||||
if (!IS_IVYBRIDGE(dev))
|
||||
if (!HAS_L3_GPU_CACHE(dev))
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, flags);
|
||||
@ -488,19 +500,19 @@ static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
|
||||
* IIR bits should never already be set because IMR should
|
||||
* prevent an interrupt from being shown in IIR. The warning
|
||||
* displays a case where we've unsafely cleared
|
||||
* dev_priv->pm_iir. Although missing an interrupt of the same
|
||||
* dev_priv->rps.pm_iir. Although missing an interrupt of the same
|
||||
* type is not a problem, it displays a problem in the logic.
|
||||
*
|
||||
* The mask bit in IMR is cleared by rps_work.
|
||||
* The mask bit in IMR is cleared by dev_priv->rps.work.
|
||||
*/
|
||||
|
||||
spin_lock_irqsave(&dev_priv->rps_lock, flags);
|
||||
dev_priv->pm_iir |= pm_iir;
|
||||
I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir);
|
||||
spin_lock_irqsave(&dev_priv->rps.lock, flags);
|
||||
dev_priv->rps.pm_iir |= pm_iir;
|
||||
I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
|
||||
POSTING_READ(GEN6_PMIMR);
|
||||
spin_unlock_irqrestore(&dev_priv->rps_lock, flags);
|
||||
spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
|
||||
|
||||
queue_work(dev_priv->wq, &dev_priv->rps_work);
|
||||
queue_work(dev_priv->wq, &dev_priv->rps.work);
|
||||
}
|
||||
|
||||
static irqreturn_t valleyview_irq_handler(DRM_IRQ_ARGS)
|
||||
@ -793,10 +805,8 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
|
||||
ibx_irq_handler(dev, pch_iir);
|
||||
}
|
||||
|
||||
if (de_iir & DE_PCU_EVENT) {
|
||||
I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
|
||||
i915_handle_rps_change(dev);
|
||||
}
|
||||
if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
|
||||
ironlake_handle_rps_change(dev);
|
||||
|
||||
if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS)
|
||||
gen6_queue_rps_work(dev_priv, pm_iir);
|
||||
@ -949,7 +959,8 @@ static void capture_bo(struct drm_i915_error_buffer *err,
|
||||
{
|
||||
err->size = obj->base.size;
|
||||
err->name = obj->base.name;
|
||||
err->seqno = obj->last_rendering_seqno;
|
||||
err->rseqno = obj->last_read_seqno;
|
||||
err->wseqno = obj->last_write_seqno;
|
||||
err->gtt_offset = obj->gtt_offset;
|
||||
err->read_domains = obj->base.read_domains;
|
||||
err->write_domain = obj->base.write_domain;
|
||||
@ -1039,12 +1050,12 @@ i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
|
||||
if (!ring->get_seqno)
|
||||
return NULL;
|
||||
|
||||
seqno = ring->get_seqno(ring);
|
||||
seqno = ring->get_seqno(ring, false);
|
||||
list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
|
||||
if (obj->ring != ring)
|
||||
continue;
|
||||
|
||||
if (i915_seqno_passed(seqno, obj->last_rendering_seqno))
|
||||
if (i915_seqno_passed(seqno, obj->last_read_seqno))
|
||||
continue;
|
||||
|
||||
if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
|
||||
@ -1093,7 +1104,7 @@ static void i915_record_ring_state(struct drm_device *dev,
|
||||
|
||||
error->waiting[ring->id] = waitqueue_active(&ring->irq_queue);
|
||||
error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base));
|
||||
error->seqno[ring->id] = ring->get_seqno(ring);
|
||||
error->seqno[ring->id] = ring->get_seqno(ring, false);
|
||||
error->acthd[ring->id] = intel_ring_get_active_head(ring);
|
||||
error->head[ring->id] = I915_READ_HEAD(ring);
|
||||
error->tail[ring->id] = I915_READ_TAIL(ring);
|
||||
@ -1590,7 +1601,8 @@ ring_last_seqno(struct intel_ring_buffer *ring)
|
||||
static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err)
|
||||
{
|
||||
if (list_empty(&ring->request_list) ||
|
||||
i915_seqno_passed(ring->get_seqno(ring), ring_last_seqno(ring))) {
|
||||
i915_seqno_passed(ring->get_seqno(ring, false),
|
||||
ring_last_seqno(ring))) {
|
||||
/* Issue a wake-up to catch stuck h/w. */
|
||||
if (waitqueue_active(&ring->irq_queue)) {
|
||||
DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
|
||||
@ -2647,7 +2659,7 @@ void intel_irq_init(struct drm_device *dev)
|
||||
|
||||
INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
|
||||
INIT_WORK(&dev_priv->error_work, i915_error_work_func);
|
||||
INIT_WORK(&dev_priv->rps_work, gen6_pm_rps_work);
|
||||
INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
|
||||
INIT_WORK(&dev_priv->parity_error_work, ivybridge_parity_work);
|
||||
|
||||
dev->driver->get_vblank_counter = i915_get_vblank_counter;
|
||||
|
@ -449,6 +449,7 @@
|
||||
#define RING_ACTHD(base) ((base)+0x74)
|
||||
#define RING_NOPID(base) ((base)+0x94)
|
||||
#define RING_IMR(base) ((base)+0xa8)
|
||||
#define RING_TIMESTAMP(base) ((base)+0x358)
|
||||
#define TAIL_ADDR 0x001FFFF8
|
||||
#define HEAD_WRAP_COUNT 0xFFE00000
|
||||
#define HEAD_WRAP_ONE 0x00200000
|
||||
@ -528,6 +529,8 @@
|
||||
#define GFX_PSMI_GRANULARITY (1<<10)
|
||||
#define GFX_PPGTT_ENABLE (1<<9)
|
||||
|
||||
#define VLV_DISPLAY_BASE 0x180000
|
||||
|
||||
#define SCPD0 0x0209c /* 915+ only */
|
||||
#define IER 0x020a0
|
||||
#define IIR 0x020a4
|
||||
@ -1495,6 +1498,14 @@
|
||||
GEN7_CXT_EXTENDED_SIZE(ctx_reg) + \
|
||||
GEN7_CXT_GT1_SIZE(ctx_reg) + \
|
||||
GEN7_CXT_VFSTATE_SIZE(ctx_reg))
|
||||
#define HSW_CXT_POWER_SIZE(ctx_reg) ((ctx_reg >> 26) & 0x3f)
|
||||
#define HSW_CXT_RING_SIZE(ctx_reg) ((ctx_reg >> 23) & 0x7)
|
||||
#define HSW_CXT_RENDER_SIZE(ctx_reg) ((ctx_reg >> 15) & 0xff)
|
||||
#define HSW_CXT_TOTAL_SIZE(ctx_reg) (HSW_CXT_POWER_SIZE(ctx_reg) + \
|
||||
HSW_CXT_RING_SIZE(ctx_reg) + \
|
||||
HSW_CXT_RENDER_SIZE(ctx_reg) + \
|
||||
GEN7_CXT_VFSTATE_SIZE(ctx_reg))
|
||||
|
||||
|
||||
/*
|
||||
* Overlay regs
|
||||
@ -1548,12 +1559,35 @@
|
||||
|
||||
/* VGA port control */
|
||||
#define ADPA 0x61100
|
||||
#define PCH_ADPA 0xe1100
|
||||
#define VLV_ADPA (VLV_DISPLAY_BASE + ADPA)
|
||||
|
||||
#define ADPA_DAC_ENABLE (1<<31)
|
||||
#define ADPA_DAC_DISABLE 0
|
||||
#define ADPA_PIPE_SELECT_MASK (1<<30)
|
||||
#define ADPA_PIPE_A_SELECT 0
|
||||
#define ADPA_PIPE_B_SELECT (1<<30)
|
||||
#define ADPA_PIPE_SELECT(pipe) ((pipe) << 30)
|
||||
/* CPT uses bits 29:30 for pch transcoder select */
|
||||
#define ADPA_CRT_HOTPLUG_MASK 0x03ff0000 /* bit 25-16 */
|
||||
#define ADPA_CRT_HOTPLUG_MONITOR_NONE (0<<24)
|
||||
#define ADPA_CRT_HOTPLUG_MONITOR_MASK (3<<24)
|
||||
#define ADPA_CRT_HOTPLUG_MONITOR_COLOR (3<<24)
|
||||
#define ADPA_CRT_HOTPLUG_MONITOR_MONO (2<<24)
|
||||
#define ADPA_CRT_HOTPLUG_ENABLE (1<<23)
|
||||
#define ADPA_CRT_HOTPLUG_PERIOD_64 (0<<22)
|
||||
#define ADPA_CRT_HOTPLUG_PERIOD_128 (1<<22)
|
||||
#define ADPA_CRT_HOTPLUG_WARMUP_5MS (0<<21)
|
||||
#define ADPA_CRT_HOTPLUG_WARMUP_10MS (1<<21)
|
||||
#define ADPA_CRT_HOTPLUG_SAMPLE_2S (0<<20)
|
||||
#define ADPA_CRT_HOTPLUG_SAMPLE_4S (1<<20)
|
||||
#define ADPA_CRT_HOTPLUG_VOLTAGE_40 (0<<18)
|
||||
#define ADPA_CRT_HOTPLUG_VOLTAGE_50 (1<<18)
|
||||
#define ADPA_CRT_HOTPLUG_VOLTAGE_60 (2<<18)
|
||||
#define ADPA_CRT_HOTPLUG_VOLTAGE_70 (3<<18)
|
||||
#define ADPA_CRT_HOTPLUG_VOLREF_325MV (0<<17)
|
||||
#define ADPA_CRT_HOTPLUG_VOLREF_475MV (1<<17)
|
||||
#define ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16)
|
||||
#define ADPA_USE_VGA_HVPOLARITY (1<<15)
|
||||
#define ADPA_SETS_HVPOLARITY 0
|
||||
#define ADPA_VSYNC_CNTL_DISABLE (1<<11)
|
||||
@ -3888,31 +3922,6 @@
|
||||
#define FDI_PLL_CTL_1 0xfe000
|
||||
#define FDI_PLL_CTL_2 0xfe004
|
||||
|
||||
/* CRT */
|
||||
#define PCH_ADPA 0xe1100
|
||||
#define ADPA_TRANS_SELECT_MASK (1<<30)
|
||||
#define ADPA_TRANS_A_SELECT 0
|
||||
#define ADPA_TRANS_B_SELECT (1<<30)
|
||||
#define ADPA_CRT_HOTPLUG_MASK 0x03ff0000 /* bit 25-16 */
|
||||
#define ADPA_CRT_HOTPLUG_MONITOR_NONE (0<<24)
|
||||
#define ADPA_CRT_HOTPLUG_MONITOR_MASK (3<<24)
|
||||
#define ADPA_CRT_HOTPLUG_MONITOR_COLOR (3<<24)
|
||||
#define ADPA_CRT_HOTPLUG_MONITOR_MONO (2<<24)
|
||||
#define ADPA_CRT_HOTPLUG_ENABLE (1<<23)
|
||||
#define ADPA_CRT_HOTPLUG_PERIOD_64 (0<<22)
|
||||
#define ADPA_CRT_HOTPLUG_PERIOD_128 (1<<22)
|
||||
#define ADPA_CRT_HOTPLUG_WARMUP_5MS (0<<21)
|
||||
#define ADPA_CRT_HOTPLUG_WARMUP_10MS (1<<21)
|
||||
#define ADPA_CRT_HOTPLUG_SAMPLE_2S (0<<20)
|
||||
#define ADPA_CRT_HOTPLUG_SAMPLE_4S (1<<20)
|
||||
#define ADPA_CRT_HOTPLUG_VOLTAGE_40 (0<<18)
|
||||
#define ADPA_CRT_HOTPLUG_VOLTAGE_50 (1<<18)
|
||||
#define ADPA_CRT_HOTPLUG_VOLTAGE_60 (2<<18)
|
||||
#define ADPA_CRT_HOTPLUG_VOLTAGE_70 (3<<18)
|
||||
#define ADPA_CRT_HOTPLUG_VOLREF_325MV (0<<17)
|
||||
#define ADPA_CRT_HOTPLUG_VOLREF_475MV (1<<17)
|
||||
#define ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16)
|
||||
|
||||
/* or SDVOB */
|
||||
#define HDMIB 0xe1140
|
||||
#define PORT_ENABLE (1 << 31)
|
||||
@ -4269,194 +4278,184 @@
|
||||
#define AUD_CONFIG_DISABLE_NCTS (1 << 3)
|
||||
|
||||
/* HSW Power Wells */
|
||||
#define HSW_PWR_WELL_CTL1 0x45400 /* BIOS */
|
||||
#define HSW_PWR_WELL_CTL2 0x45404 /* Driver */
|
||||
#define HSW_PWR_WELL_CTL3 0x45408 /* KVMR */
|
||||
#define HSW_PWR_WELL_CTL4 0x4540C /* Debug */
|
||||
#define HSW_PWR_WELL_ENABLE (1<<31)
|
||||
#define HSW_PWR_WELL_STATE (1<<30)
|
||||
#define HSW_PWR_WELL_CTL5 0x45410
|
||||
#define HSW_PWR_WELL_CTL1 0x45400 /* BIOS */
|
||||
#define HSW_PWR_WELL_CTL2 0x45404 /* Driver */
|
||||
#define HSW_PWR_WELL_CTL3 0x45408 /* KVMR */
|
||||
#define HSW_PWR_WELL_CTL4 0x4540C /* Debug */
|
||||
#define HSW_PWR_WELL_ENABLE (1<<31)
|
||||
#define HSW_PWR_WELL_STATE (1<<30)
|
||||
#define HSW_PWR_WELL_CTL5 0x45410
|
||||
#define HSW_PWR_WELL_ENABLE_SINGLE_STEP (1<<31)
|
||||
#define HSW_PWR_WELL_PWR_GATE_OVERRIDE (1<<20)
|
||||
#define HSW_PWR_WELL_FORCE_ON (1<<19)
|
||||
#define HSW_PWR_WELL_CTL6 0x45414
|
||||
#define HSW_PWR_WELL_FORCE_ON (1<<19)
|
||||
#define HSW_PWR_WELL_CTL6 0x45414
|
||||
|
||||
/* Per-pipe DDI Function Control */
|
||||
#define PIPE_DDI_FUNC_CTL_A 0x60400
|
||||
#define PIPE_DDI_FUNC_CTL_B 0x61400
|
||||
#define PIPE_DDI_FUNC_CTL_C 0x62400
|
||||
#define PIPE_DDI_FUNC_CTL_A 0x60400
|
||||
#define PIPE_DDI_FUNC_CTL_B 0x61400
|
||||
#define PIPE_DDI_FUNC_CTL_C 0x62400
|
||||
#define PIPE_DDI_FUNC_CTL_EDP 0x6F400
|
||||
#define DDI_FUNC_CTL(pipe) _PIPE(pipe, \
|
||||
PIPE_DDI_FUNC_CTL_A, \
|
||||
PIPE_DDI_FUNC_CTL_B)
|
||||
#define DDI_FUNC_CTL(pipe) _PIPE(pipe, PIPE_DDI_FUNC_CTL_A, \
|
||||
PIPE_DDI_FUNC_CTL_B)
|
||||
#define PIPE_DDI_FUNC_ENABLE (1<<31)
|
||||
/* Those bits are ignored by pipe EDP since it can only connect to DDI A */
|
||||
#define PIPE_DDI_PORT_MASK (7<<28)
|
||||
#define PIPE_DDI_SELECT_PORT(x) ((x)<<28)
|
||||
#define PIPE_DDI_MODE_SELECT_HDMI (0<<24)
|
||||
#define PIPE_DDI_MODE_SELECT_DVI (1<<24)
|
||||
#define PIPE_DDI_PORT_MASK (7<<28)
|
||||
#define PIPE_DDI_SELECT_PORT(x) ((x)<<28)
|
||||
#define PIPE_DDI_MODE_SELECT_MASK (7<<24)
|
||||
#define PIPE_DDI_MODE_SELECT_HDMI (0<<24)
|
||||
#define PIPE_DDI_MODE_SELECT_DVI (1<<24)
|
||||
#define PIPE_DDI_MODE_SELECT_DP_SST (2<<24)
|
||||
#define PIPE_DDI_MODE_SELECT_DP_MST (3<<24)
|
||||
#define PIPE_DDI_MODE_SELECT_FDI (4<<24)
|
||||
#define PIPE_DDI_BPC_8 (0<<20)
|
||||
#define PIPE_DDI_BPC_10 (1<<20)
|
||||
#define PIPE_DDI_BPC_6 (2<<20)
|
||||
#define PIPE_DDI_BPC_12 (3<<20)
|
||||
#define PIPE_DDI_BFI_ENABLE (1<<4)
|
||||
#define PIPE_DDI_PORT_WIDTH_X1 (0<<1)
|
||||
#define PIPE_DDI_PORT_WIDTH_X2 (1<<1)
|
||||
#define PIPE_DDI_PORT_WIDTH_X4 (3<<1)
|
||||
#define PIPE_DDI_MODE_SELECT_FDI (4<<24)
|
||||
#define PIPE_DDI_BPC_MASK (7<<20)
|
||||
#define PIPE_DDI_BPC_8 (0<<20)
|
||||
#define PIPE_DDI_BPC_10 (1<<20)
|
||||
#define PIPE_DDI_BPC_6 (2<<20)
|
||||
#define PIPE_DDI_BPC_12 (3<<20)
|
||||
#define PIPE_DDI_PVSYNC (1<<17)
|
||||
#define PIPE_DDI_PHSYNC (1<<16)
|
||||
#define PIPE_DDI_BFI_ENABLE (1<<4)
|
||||
#define PIPE_DDI_PORT_WIDTH_X1 (0<<1)
|
||||
#define PIPE_DDI_PORT_WIDTH_X2 (1<<1)
|
||||
#define PIPE_DDI_PORT_WIDTH_X4 (3<<1)
|
||||
|
||||
/* DisplayPort Transport Control */
|
||||
#define DP_TP_CTL_A 0x64040
|
||||
#define DP_TP_CTL_B 0x64140
|
||||
#define DP_TP_CTL(port) _PORT(port, \
|
||||
DP_TP_CTL_A, \
|
||||
DP_TP_CTL_B)
|
||||
#define DP_TP_CTL_ENABLE (1<<31)
|
||||
#define DP_TP_CTL_MODE_SST (0<<27)
|
||||
#define DP_TP_CTL_MODE_MST (1<<27)
|
||||
#define DP_TP_CTL(port) _PORT(port, DP_TP_CTL_A, DP_TP_CTL_B)
|
||||
#define DP_TP_CTL_ENABLE (1<<31)
|
||||
#define DP_TP_CTL_MODE_SST (0<<27)
|
||||
#define DP_TP_CTL_MODE_MST (1<<27)
|
||||
#define DP_TP_CTL_ENHANCED_FRAME_ENABLE (1<<18)
|
||||
#define DP_TP_CTL_FDI_AUTOTRAIN (1<<15)
|
||||
#define DP_TP_CTL_FDI_AUTOTRAIN (1<<15)
|
||||
#define DP_TP_CTL_LINK_TRAIN_MASK (7<<8)
|
||||
#define DP_TP_CTL_LINK_TRAIN_PAT1 (0<<8)
|
||||
#define DP_TP_CTL_LINK_TRAIN_PAT2 (1<<8)
|
||||
#define DP_TP_CTL_LINK_TRAIN_NORMAL (3<<8)
|
||||
#define DP_TP_CTL_LINK_TRAIN_NORMAL (3<<8)
|
||||
|
||||
/* DisplayPort Transport Status */
|
||||
#define DP_TP_STATUS_A 0x64044
|
||||
#define DP_TP_STATUS_B 0x64144
|
||||
#define DP_TP_STATUS(port) _PORT(port, \
|
||||
DP_TP_STATUS_A, \
|
||||
DP_TP_STATUS_B)
|
||||
#define DP_TP_STATUS(port) _PORT(port, DP_TP_STATUS_A, DP_TP_STATUS_B)
|
||||
#define DP_TP_STATUS_AUTOTRAIN_DONE (1<<12)
|
||||
|
||||
/* DDI Buffer Control */
|
||||
#define DDI_BUF_CTL_A 0x64000
|
||||
#define DDI_BUF_CTL_B 0x64100
|
||||
#define DDI_BUF_CTL(port) _PORT(port, \
|
||||
DDI_BUF_CTL_A, \
|
||||
DDI_BUF_CTL_B)
|
||||
#define DDI_BUF_CTL_ENABLE (1<<31)
|
||||
#define DDI_BUF_CTL(port) _PORT(port, DDI_BUF_CTL_A, DDI_BUF_CTL_B)
|
||||
#define DDI_BUF_CTL_ENABLE (1<<31)
|
||||
#define DDI_BUF_EMP_400MV_0DB_HSW (0<<24) /* Sel0 */
|
||||
#define DDI_BUF_EMP_400MV_3_5DB_HSW (1<<24) /* Sel1 */
|
||||
#define DDI_BUF_EMP_400MV_3_5DB_HSW (1<<24) /* Sel1 */
|
||||
#define DDI_BUF_EMP_400MV_6DB_HSW (2<<24) /* Sel2 */
|
||||
#define DDI_BUF_EMP_400MV_9_5DB_HSW (3<<24) /* Sel3 */
|
||||
#define DDI_BUF_EMP_400MV_9_5DB_HSW (3<<24) /* Sel3 */
|
||||
#define DDI_BUF_EMP_600MV_0DB_HSW (4<<24) /* Sel4 */
|
||||
#define DDI_BUF_EMP_600MV_3_5DB_HSW (5<<24) /* Sel5 */
|
||||
#define DDI_BUF_EMP_600MV_3_5DB_HSW (5<<24) /* Sel5 */
|
||||
#define DDI_BUF_EMP_600MV_6DB_HSW (6<<24) /* Sel6 */
|
||||
#define DDI_BUF_EMP_800MV_0DB_HSW (7<<24) /* Sel7 */
|
||||
#define DDI_BUF_EMP_800MV_3_5DB_HSW (8<<24) /* Sel8 */
|
||||
#define DDI_BUF_EMP_MASK (0xf<<24)
|
||||
#define DDI_BUF_IS_IDLE (1<<7)
|
||||
#define DDI_PORT_WIDTH_X1 (0<<1)
|
||||
#define DDI_PORT_WIDTH_X2 (1<<1)
|
||||
#define DDI_PORT_WIDTH_X4 (3<<1)
|
||||
#define DDI_BUF_EMP_800MV_3_5DB_HSW (8<<24) /* Sel8 */
|
||||
#define DDI_BUF_EMP_MASK (0xf<<24)
|
||||
#define DDI_BUF_IS_IDLE (1<<7)
|
||||
#define DDI_PORT_WIDTH_X1 (0<<1)
|
||||
#define DDI_PORT_WIDTH_X2 (1<<1)
|
||||
#define DDI_PORT_WIDTH_X4 (3<<1)
|
||||
#define DDI_INIT_DISPLAY_DETECTED (1<<0)
|
||||
|
||||
/* DDI Buffer Translations */
|
||||
#define DDI_BUF_TRANS_A 0x64E00
|
||||
#define DDI_BUF_TRANS_B 0x64E60
|
||||
#define DDI_BUF_TRANS(port) _PORT(port, \
|
||||
DDI_BUF_TRANS_A, \
|
||||
DDI_BUF_TRANS_B)
|
||||
#define DDI_BUF_TRANS(port) _PORT(port, DDI_BUF_TRANS_A, DDI_BUF_TRANS_B)
|
||||
|
||||
/* Sideband Interface (SBI) is programmed indirectly, via
|
||||
* SBI_ADDR, which contains the register offset; and SBI_DATA,
|
||||
* which contains the payload */
|
||||
#define SBI_ADDR 0xC6000
|
||||
#define SBI_DATA 0xC6004
|
||||
#define SBI_ADDR 0xC6000
|
||||
#define SBI_DATA 0xC6004
|
||||
#define SBI_CTL_STAT 0xC6008
|
||||
#define SBI_CTL_OP_CRRD (0x6<<8)
|
||||
#define SBI_CTL_OP_CRWR (0x7<<8)
|
||||
#define SBI_RESPONSE_FAIL (0x1<<1)
|
||||
#define SBI_RESPONSE_SUCCESS (0x0<<1)
|
||||
#define SBI_BUSY (0x1<<0)
|
||||
#define SBI_READY (0x0<<0)
|
||||
#define SBI_RESPONSE_SUCCESS (0x0<<1)
|
||||
#define SBI_BUSY (0x1<<0)
|
||||
#define SBI_READY (0x0<<0)
|
||||
|
||||
/* SBI offsets */
|
||||
#define SBI_SSCDIVINTPHASE6 0x0600
|
||||
#define SBI_SSCDIVINTPHASE6 0x0600
|
||||
#define SBI_SSCDIVINTPHASE_DIVSEL_MASK ((0x7f)<<1)
|
||||
#define SBI_SSCDIVINTPHASE_DIVSEL(x) ((x)<<1)
|
||||
#define SBI_SSCDIVINTPHASE_INCVAL_MASK ((0x7f)<<8)
|
||||
#define SBI_SSCDIVINTPHASE_INCVAL(x) ((x)<<8)
|
||||
#define SBI_SSCDIVINTPHASE_DIR(x) ((x)<<15)
|
||||
#define SBI_SSCDIVINTPHASE_DIR(x) ((x)<<15)
|
||||
#define SBI_SSCDIVINTPHASE_PROPAGATE (1<<0)
|
||||
#define SBI_SSCCTL 0x020c
|
||||
#define SBI_SSCCTL 0x020c
|
||||
#define SBI_SSCCTL6 0x060C
|
||||
#define SBI_SSCCTL_DISABLE (1<<0)
|
||||
#define SBI_SSCCTL_DISABLE (1<<0)
|
||||
#define SBI_SSCAUXDIV6 0x0610
|
||||
#define SBI_SSCAUXDIV_FINALDIV2SEL(x) ((x)<<4)
|
||||
#define SBI_DBUFF0 0x2a00
|
||||
#define SBI_DBUFF0 0x2a00
|
||||
|
||||
/* LPT PIXCLK_GATE */
|
||||
#define PIXCLK_GATE 0xC6020
|
||||
#define PIXCLK_GATE_UNGATE 1<<0
|
||||
#define PIXCLK_GATE_GATE 0<<0
|
||||
#define PIXCLK_GATE 0xC6020
|
||||
#define PIXCLK_GATE_UNGATE (1<<0)
|
||||
#define PIXCLK_GATE_GATE (0<<0)
|
||||
|
||||
/* SPLL */
|
||||
#define SPLL_CTL 0x46020
|
||||
#define SPLL_CTL 0x46020
|
||||
#define SPLL_PLL_ENABLE (1<<31)
|
||||
#define SPLL_PLL_SCC (1<<28)
|
||||
#define SPLL_PLL_NON_SCC (2<<28)
|
||||
#define SPLL_PLL_FREQ_810MHz (0<<26)
|
||||
#define SPLL_PLL_FREQ_1350MHz (1<<26)
|
||||
#define SPLL_PLL_FREQ_810MHz (0<<26)
|
||||
#define SPLL_PLL_FREQ_1350MHz (1<<26)
|
||||
|
||||
/* WRPLL */
|
||||
#define WRPLL_CTL1 0x46040
|
||||
#define WRPLL_CTL2 0x46060
|
||||
#define WRPLL_PLL_ENABLE (1<<31)
|
||||
#define WRPLL_PLL_SELECT_SSC (0x01<<28)
|
||||
#define WRPLL_PLL_SELECT_NON_SCC (0x02<<28)
|
||||
#define WRPLL_CTL1 0x46040
|
||||
#define WRPLL_CTL2 0x46060
|
||||
#define WRPLL_PLL_ENABLE (1<<31)
|
||||
#define WRPLL_PLL_SELECT_SSC (0x01<<28)
|
||||
#define WRPLL_PLL_SELECT_NON_SCC (0x02<<28)
|
||||
#define WRPLL_PLL_SELECT_LCPLL_2700 (0x03<<28)
|
||||
/* WRPLL divider programming */
|
||||
#define WRPLL_DIVIDER_REFERENCE(x) ((x)<<0)
|
||||
#define WRPLL_DIVIDER_POST(x) ((x)<<8)
|
||||
#define WRPLL_DIVIDER_FEEDBACK(x) ((x)<<16)
|
||||
#define WRPLL_DIVIDER_REFERENCE(x) ((x)<<0)
|
||||
#define WRPLL_DIVIDER_POST(x) ((x)<<8)
|
||||
#define WRPLL_DIVIDER_FEEDBACK(x) ((x)<<16)
|
||||
|
||||
/* Port clock selection */
|
||||
#define PORT_CLK_SEL_A 0x46100
|
||||
#define PORT_CLK_SEL_B 0x46104
|
||||
#define PORT_CLK_SEL(port) _PORT(port, \
|
||||
PORT_CLK_SEL_A, \
|
||||
PORT_CLK_SEL_B)
|
||||
#define PORT_CLK_SEL(port) _PORT(port, PORT_CLK_SEL_A, PORT_CLK_SEL_B)
|
||||
#define PORT_CLK_SEL_LCPLL_2700 (0<<29)
|
||||
#define PORT_CLK_SEL_LCPLL_1350 (1<<29)
|
||||
#define PORT_CLK_SEL_LCPLL_810 (2<<29)
|
||||
#define PORT_CLK_SEL_SPLL (3<<29)
|
||||
#define PORT_CLK_SEL_SPLL (3<<29)
|
||||
#define PORT_CLK_SEL_WRPLL1 (4<<29)
|
||||
#define PORT_CLK_SEL_WRPLL2 (5<<29)
|
||||
|
||||
/* Pipe clock selection */
|
||||
#define PIPE_CLK_SEL_A 0x46140
|
||||
#define PIPE_CLK_SEL_B 0x46144
|
||||
#define PIPE_CLK_SEL(pipe) _PIPE(pipe, \
|
||||
PIPE_CLK_SEL_A, \
|
||||
PIPE_CLK_SEL_B)
|
||||
#define PIPE_CLK_SEL(pipe) _PIPE(pipe, PIPE_CLK_SEL_A, PIPE_CLK_SEL_B)
|
||||
/* For each pipe, we need to select the corresponding port clock */
|
||||
#define PIPE_CLK_SEL_DISABLED (0x0<<29)
|
||||
#define PIPE_CLK_SEL_PORT(x) ((x+1)<<29)
|
||||
#define PIPE_CLK_SEL_DISABLED (0x0<<29)
|
||||
#define PIPE_CLK_SEL_PORT(x) ((x+1)<<29)
|
||||
|
||||
/* LCPLL Control */
|
||||
#define LCPLL_CTL 0x130040
|
||||
#define LCPLL_CTL 0x130040
|
||||
#define LCPLL_PLL_DISABLE (1<<31)
|
||||
#define LCPLL_PLL_LOCK (1<<30)
|
||||
#define LCPLL_CD_CLOCK_DISABLE (1<<25)
|
||||
#define LCPLL_CD_CLOCK_DISABLE (1<<25)
|
||||
#define LCPLL_CD2X_CLOCK_DISABLE (1<<23)
|
||||
|
||||
/* Pipe WM_LINETIME - watermark line time */
|
||||
#define PIPE_WM_LINETIME_A 0x45270
|
||||
#define PIPE_WM_LINETIME_B 0x45274
|
||||
#define PIPE_WM_LINETIME(pipe) _PIPE(pipe, \
|
||||
PIPE_WM_LINETIME_A, \
|
||||
PIPE_WM_LINETIME_B)
|
||||
#define PIPE_WM_LINETIME_MASK (0x1ff)
|
||||
#define PIPE_WM_LINETIME_TIME(x) ((x))
|
||||
#define PIPE_WM_LINETIME(pipe) _PIPE(pipe, PIPE_WM_LINETIME_A, \
|
||||
PIPE_WM_LINETIME_B)
|
||||
#define PIPE_WM_LINETIME_MASK (0x1ff)
|
||||
#define PIPE_WM_LINETIME_TIME(x) ((x))
|
||||
#define PIPE_WM_LINETIME_IPS_LINETIME_MASK (0x1ff<<16)
|
||||
#define PIPE_WM_LINETIME_IPS_LINETIME(x) ((x)<<16)
|
||||
#define PIPE_WM_LINETIME_IPS_LINETIME(x) ((x)<<16)
|
||||
|
||||
/* SFUSE_STRAP */
|
||||
#define SFUSE_STRAP 0xc2014
|
||||
#define SFUSE_STRAP 0xc2014
|
||||
#define SFUSE_STRAP_DDIB_DETECTED (1<<2)
|
||||
#define SFUSE_STRAP_DDIC_DETECTED (1<<1)
|
||||
#define SFUSE_STRAP_DDID_DETECTED (1<<0)
|
||||
|
@ -213,7 +213,7 @@ void i915_setup_sysfs(struct drm_device *dev)
|
||||
DRM_ERROR("RC6 residency sysfs setup failed\n");
|
||||
}
|
||||
|
||||
if (IS_IVYBRIDGE(dev)) {
|
||||
if (HAS_L3_GPU_CACHE(dev)) {
|
||||
ret = device_create_bin_file(&dev->primary->kdev, &dpf_attrs);
|
||||
if (ret)
|
||||
DRM_ERROR("l3 parity sysfs setup failed\n");
|
||||
|
@ -47,6 +47,7 @@
|
||||
struct intel_crt {
|
||||
struct intel_encoder base;
|
||||
bool force_hotplug_required;
|
||||
u32 adpa_reg;
|
||||
};
|
||||
|
||||
static struct intel_crt *intel_attached_crt(struct drm_connector *connector)
|
||||
@ -55,6 +56,11 @@ static struct intel_crt *intel_attached_crt(struct drm_connector *connector)
|
||||
struct intel_crt, base);
|
||||
}
|
||||
|
||||
static struct intel_crt *intel_encoder_to_crt(struct intel_encoder *encoder)
|
||||
{
|
||||
return container_of(encoder, struct intel_crt, base);
|
||||
}
|
||||
|
||||
static void pch_crt_dpms(struct drm_encoder *encoder, int mode)
|
||||
{
|
||||
struct drm_device *dev = encoder->dev;
|
||||
@ -145,19 +151,15 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
|
||||
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct drm_crtc *crtc = encoder->crtc;
|
||||
struct intel_crt *crt =
|
||||
intel_encoder_to_crt(to_intel_encoder(encoder));
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int dpll_md_reg;
|
||||
u32 adpa, dpll_md;
|
||||
u32 adpa_reg;
|
||||
|
||||
dpll_md_reg = DPLL_MD(intel_crtc->pipe);
|
||||
|
||||
if (HAS_PCH_SPLIT(dev))
|
||||
adpa_reg = PCH_ADPA;
|
||||
else
|
||||
adpa_reg = ADPA;
|
||||
|
||||
/*
|
||||
* Disable separate mode multiplier used when cloning SDVO to CRT
|
||||
* XXX this needs to be adjusted when we really are cloning
|
||||
@ -185,7 +187,7 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
|
||||
if (!HAS_PCH_SPLIT(dev))
|
||||
I915_WRITE(BCLRPAT(intel_crtc->pipe), 0);
|
||||
|
||||
I915_WRITE(adpa_reg, adpa);
|
||||
I915_WRITE(crt->adpa_reg, adpa);
|
||||
}
|
||||
|
||||
static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
|
||||
@ -658,9 +660,7 @@ void intel_crt_init(struct drm_device *dev)
|
||||
intel_connector_attach_encoder(intel_connector, &crt->base);
|
||||
|
||||
crt->base.type = INTEL_OUTPUT_ANALOG;
|
||||
crt->base.clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT |
|
||||
1 << INTEL_ANALOG_CLONE_BIT |
|
||||
1 << INTEL_SDVO_LVDS_CLONE_BIT);
|
||||
crt->base.cloneable = true;
|
||||
if (IS_HASWELL(dev))
|
||||
crt->base.crtc_mask = (1 << 0);
|
||||
else
|
||||
@ -677,6 +677,13 @@ void intel_crt_init(struct drm_device *dev)
|
||||
else
|
||||
encoder_helper_funcs = &gmch_encoder_funcs;
|
||||
|
||||
if (HAS_PCH_SPLIT(dev))
|
||||
crt->adpa_reg = PCH_ADPA;
|
||||
else if (IS_VALLEYVIEW(dev))
|
||||
crt->adpa_reg = VLV_ADPA;
|
||||
else
|
||||
crt->adpa_reg = ADPA;
|
||||
|
||||
drm_encoder_helper_add(&crt->base.base, encoder_helper_funcs);
|
||||
drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
|
||||
|
||||
|
@ -250,7 +250,7 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
|
||||
case PORT_B:
|
||||
case PORT_C:
|
||||
case PORT_D:
|
||||
intel_hdmi_init(dev, DDI_BUF_CTL(port));
|
||||
intel_hdmi_init(dev, DDI_BUF_CTL(port), port);
|
||||
break;
|
||||
default:
|
||||
DRM_DEBUG_DRIVER("No handlers defined for port %d, skipping DDI initialization\n",
|
||||
@ -267,7 +267,8 @@ struct wrpll_tmds_clock {
|
||||
u16 r2; /* Reference divider */
|
||||
};
|
||||
|
||||
/* Table of matching values for WRPLL clocks programming for each frequency */
|
||||
/* Table of matching values for WRPLL clocks programming for each frequency.
|
||||
* The code assumes this table is sorted. */
|
||||
static const struct wrpll_tmds_clock wrpll_tmds_clock_table[] = {
|
||||
{19750, 38, 25, 18},
|
||||
{20000, 48, 32, 18},
|
||||
@ -277,7 +278,6 @@ static const struct wrpll_tmds_clock wrpll_tmds_clock_table[] = {
|
||||
{23000, 36, 23, 15},
|
||||
{23500, 40, 40, 23},
|
||||
{23750, 26, 16, 14},
|
||||
{23750, 26, 16, 14},
|
||||
{24000, 36, 24, 15},
|
||||
{25000, 36, 25, 15},
|
||||
{25175, 26, 40, 33},
|
||||
@ -437,7 +437,6 @@ static const struct wrpll_tmds_clock wrpll_tmds_clock_table[] = {
|
||||
{108000, 8, 24, 15},
|
||||
{108108, 8, 173, 108},
|
||||
{109000, 6, 23, 19},
|
||||
{109000, 6, 23, 19},
|
||||
{110000, 6, 22, 18},
|
||||
{110013, 6, 22, 18},
|
||||
{110250, 8, 49, 30},
|
||||
@ -614,7 +613,6 @@ static const struct wrpll_tmds_clock wrpll_tmds_clock_table[] = {
|
||||
{218250, 4, 42, 26},
|
||||
{218750, 4, 34, 21},
|
||||
{219000, 4, 47, 29},
|
||||
{219000, 4, 47, 29},
|
||||
{220000, 4, 44, 27},
|
||||
{220640, 4, 49, 30},
|
||||
{220750, 4, 36, 22},
|
||||
@ -658,7 +656,7 @@ void intel_ddi_mode_set(struct drm_encoder *encoder,
|
||||
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
|
||||
int port = intel_hdmi->ddi_port;
|
||||
int pipe = intel_crtc->pipe;
|
||||
int p, n2, r2, valid=0;
|
||||
int p, n2, r2;
|
||||
u32 temp, i;
|
||||
|
||||
/* On Haswell, we need to enable the clocks and prepare DDI function to
|
||||
@ -666,26 +664,23 @@ void intel_ddi_mode_set(struct drm_encoder *encoder,
|
||||
*/
|
||||
DRM_DEBUG_KMS("Preparing HDMI DDI mode for Haswell on port %c, pipe %c\n", port_name(port), pipe_name(pipe));
|
||||
|
||||
for (i=0; i < ARRAY_SIZE(wrpll_tmds_clock_table); i++) {
|
||||
if (crtc->mode.clock == wrpll_tmds_clock_table[i].clock) {
|
||||
p = wrpll_tmds_clock_table[i].p;
|
||||
n2 = wrpll_tmds_clock_table[i].n2;
|
||||
r2 = wrpll_tmds_clock_table[i].r2;
|
||||
|
||||
DRM_DEBUG_KMS("WR PLL clock: found settings for %dKHz refresh rate: p=%d, n2=%d, r2=%d\n",
|
||||
crtc->mode.clock,
|
||||
p, n2, r2);
|
||||
|
||||
valid = 1;
|
||||
for (i = 0; i < ARRAY_SIZE(wrpll_tmds_clock_table); i++)
|
||||
if (crtc->mode.clock <= wrpll_tmds_clock_table[i].clock)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!valid) {
|
||||
DRM_ERROR("Unable to find WR PLL clock settings for %dKHz refresh rate\n",
|
||||
crtc->mode.clock);
|
||||
return;
|
||||
}
|
||||
if (i == ARRAY_SIZE(wrpll_tmds_clock_table))
|
||||
i--;
|
||||
|
||||
p = wrpll_tmds_clock_table[i].p;
|
||||
n2 = wrpll_tmds_clock_table[i].n2;
|
||||
r2 = wrpll_tmds_clock_table[i].r2;
|
||||
|
||||
if (wrpll_tmds_clock_table[i].clock != crtc->mode.clock)
|
||||
DRM_INFO("WR PLL: using settings for %dKHz on %dKHz mode\n",
|
||||
wrpll_tmds_clock_table[i].clock, crtc->mode.clock);
|
||||
|
||||
DRM_DEBUG_KMS("WR PLL: %dKHz refresh rate with p=%d, n2=%d r2=%d\n",
|
||||
crtc->mode.clock, p, n2, r2);
|
||||
|
||||
/* Enable LCPLL if disabled */
|
||||
temp = I915_READ(LCPLL_CTL);
|
||||
@ -723,15 +718,35 @@ void intel_ddi_mode_set(struct drm_encoder *encoder,
|
||||
}
|
||||
|
||||
/* Enable PIPE_DDI_FUNC_CTL for the pipe to work in HDMI mode */
|
||||
temp = I915_READ(DDI_FUNC_CTL(pipe));
|
||||
temp &= ~PIPE_DDI_PORT_MASK;
|
||||
temp &= ~PIPE_DDI_BPC_12;
|
||||
temp |= PIPE_DDI_SELECT_PORT(port) |
|
||||
PIPE_DDI_MODE_SELECT_HDMI |
|
||||
((intel_crtc->bpp > 24) ?
|
||||
PIPE_DDI_BPC_12 :
|
||||
PIPE_DDI_BPC_8) |
|
||||
PIPE_DDI_FUNC_ENABLE;
|
||||
temp = PIPE_DDI_FUNC_ENABLE | PIPE_DDI_SELECT_PORT(port);
|
||||
|
||||
switch (intel_crtc->bpp) {
|
||||
case 18:
|
||||
temp |= PIPE_DDI_BPC_6;
|
||||
break;
|
||||
case 24:
|
||||
temp |= PIPE_DDI_BPC_8;
|
||||
break;
|
||||
case 30:
|
||||
temp |= PIPE_DDI_BPC_10;
|
||||
break;
|
||||
case 36:
|
||||
temp |= PIPE_DDI_BPC_12;
|
||||
break;
|
||||
default:
|
||||
WARN(1, "%d bpp unsupported by pipe DDI function\n",
|
||||
intel_crtc->bpp);
|
||||
}
|
||||
|
||||
if (intel_hdmi->has_hdmi_sink)
|
||||
temp |= PIPE_DDI_MODE_SELECT_HDMI;
|
||||
else
|
||||
temp |= PIPE_DDI_MODE_SELECT_DVI;
|
||||
|
||||
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
|
||||
temp |= PIPE_DDI_PVSYNC;
|
||||
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
|
||||
temp |= PIPE_DDI_PHSYNC;
|
||||
|
||||
I915_WRITE(DDI_FUNC_CTL(pipe), temp);
|
||||
|
||||
|
@ -1429,8 +1429,10 @@ static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
|
||||
* protect mechanism may be enabled.
|
||||
*
|
||||
* Note! This is for pre-ILK only.
|
||||
*
|
||||
* Unfortunately needed by dvo_ns2501 since the dvo depends on it running.
|
||||
*/
|
||||
static void intel_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
|
||||
void intel_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
|
||||
{
|
||||
int reg;
|
||||
u32 val;
|
||||
@ -2836,13 +2838,13 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
|
||||
static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct intel_encoder *encoder;
|
||||
struct intel_encoder *intel_encoder;
|
||||
|
||||
/*
|
||||
* If there's a non-PCH eDP on this crtc, it must be DP_A, and that
|
||||
* must be driven by its own crtc; no sharing is possible.
|
||||
*/
|
||||
for_each_encoder_on_crtc(dev, crtc, encoder) {
|
||||
for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
|
||||
|
||||
/* On Haswell, LPT PCH handles the VGA connection via FDI, and Haswell
|
||||
* CPU handles all others */
|
||||
@ -2850,19 +2852,19 @@ static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
|
||||
/* It is still unclear how this will work on PPT, so throw up a warning */
|
||||
WARN_ON(!HAS_PCH_LPT(dev));
|
||||
|
||||
if (encoder->type == DRM_MODE_ENCODER_DAC) {
|
||||
if (intel_encoder->type == INTEL_OUTPUT_ANALOG) {
|
||||
DRM_DEBUG_KMS("Haswell detected DAC encoder, assuming is PCH\n");
|
||||
return true;
|
||||
} else {
|
||||
DRM_DEBUG_KMS("Haswell detected encoder %d, assuming is CPU\n",
|
||||
encoder->type);
|
||||
intel_encoder->type);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
switch (encoder->type) {
|
||||
switch (intel_encoder->type) {
|
||||
case INTEL_OUTPUT_EDP:
|
||||
if (!intel_encoder_is_pch_edp(&encoder->base))
|
||||
if (!intel_encoder_is_pch_edp(&intel_encoder->base))
|
||||
return false;
|
||||
continue;
|
||||
}
|
||||
@ -5848,46 +5850,6 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
|
||||
return mode;
|
||||
}
|
||||
|
||||
#define GPU_IDLE_TIMEOUT 500 /* ms */
|
||||
|
||||
/* When this timer fires, we've been idle for awhile */
|
||||
static void intel_gpu_idle_timer(unsigned long arg)
|
||||
{
|
||||
struct drm_device *dev = (struct drm_device *)arg;
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
|
||||
if (!list_empty(&dev_priv->mm.active_list)) {
|
||||
/* Still processing requests, so just re-arm the timer. */
|
||||
mod_timer(&dev_priv->idle_timer, jiffies +
|
||||
msecs_to_jiffies(GPU_IDLE_TIMEOUT));
|
||||
return;
|
||||
}
|
||||
|
||||
dev_priv->busy = false;
|
||||
queue_work(dev_priv->wq, &dev_priv->idle_work);
|
||||
}
|
||||
|
||||
#define CRTC_IDLE_TIMEOUT 1000 /* ms */
|
||||
|
||||
static void intel_crtc_idle_timer(unsigned long arg)
|
||||
{
|
||||
struct intel_crtc *intel_crtc = (struct intel_crtc *)arg;
|
||||
struct drm_crtc *crtc = &intel_crtc->base;
|
||||
drm_i915_private_t *dev_priv = crtc->dev->dev_private;
|
||||
struct intel_framebuffer *intel_fb;
|
||||
|
||||
intel_fb = to_intel_framebuffer(crtc->fb);
|
||||
if (intel_fb && intel_fb->obj->active) {
|
||||
/* The framebuffer is still being accessed by the GPU. */
|
||||
mod_timer(&intel_crtc->idle_timer, jiffies +
|
||||
msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
|
||||
return;
|
||||
}
|
||||
|
||||
intel_crtc->busy = false;
|
||||
queue_work(dev_priv->wq, &dev_priv->idle_work);
|
||||
}
|
||||
|
||||
static void intel_increase_pllclock(struct drm_crtc *crtc)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
@ -5917,10 +5879,6 @@ static void intel_increase_pllclock(struct drm_crtc *crtc)
|
||||
if (dpll & DISPLAY_RATE_SELECT_FPA1)
|
||||
DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
|
||||
}
|
||||
|
||||
/* Schedule downclock */
|
||||
mod_timer(&intel_crtc->idle_timer, jiffies +
|
||||
msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
|
||||
}
|
||||
|
||||
static void intel_decrease_pllclock(struct drm_crtc *crtc)
|
||||
@ -5959,89 +5917,46 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_idle_update - adjust clocks for idleness
|
||||
* @work: work struct
|
||||
*
|
||||
* Either the GPU or display (or both) went idle. Check the busy status
|
||||
* here and adjust the CRTC and GPU clocks as necessary.
|
||||
*/
|
||||
static void intel_idle_update(struct work_struct *work)
|
||||
void intel_mark_busy(struct drm_device *dev)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
|
||||
idle_work);
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
i915_update_gfx_val(dev->dev_private);
|
||||
}
|
||||
|
||||
void intel_mark_idle(struct drm_device *dev)
|
||||
{
|
||||
}
|
||||
|
||||
void intel_mark_fb_busy(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
struct drm_device *dev = obj->base.dev;
|
||||
struct drm_crtc *crtc;
|
||||
struct intel_crtc *intel_crtc;
|
||||
|
||||
if (!i915_powersave)
|
||||
return;
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
|
||||
i915_update_gfx_val(dev_priv);
|
||||
|
||||
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
|
||||
/* Skip inactive CRTCs */
|
||||
if (!crtc->fb)
|
||||
continue;
|
||||
|
||||
intel_crtc = to_intel_crtc(crtc);
|
||||
if (!intel_crtc->busy)
|
||||
intel_decrease_pllclock(crtc);
|
||||
if (to_intel_framebuffer(crtc->fb)->obj == obj)
|
||||
intel_increase_pllclock(crtc);
|
||||
}
|
||||
|
||||
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_mark_busy - mark the GPU and possibly the display busy
|
||||
* @dev: drm device
|
||||
* @obj: object we're operating on
|
||||
*
|
||||
* Callers can use this function to indicate that the GPU is busy processing
|
||||
* commands. If @obj matches one of the CRTC objects (i.e. it's a scanout
|
||||
* buffer), we'll also mark the display as busy, so we know to increase its
|
||||
* clock frequency.
|
||||
*/
|
||||
void intel_mark_busy(struct drm_device *dev, struct drm_i915_gem_object *obj)
|
||||
void intel_mark_fb_idle(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
struct drm_crtc *crtc = NULL;
|
||||
struct intel_framebuffer *intel_fb;
|
||||
struct intel_crtc *intel_crtc;
|
||||
struct drm_device *dev = obj->base.dev;
|
||||
struct drm_crtc *crtc;
|
||||
|
||||
if (!drm_core_check_feature(dev, DRIVER_MODESET))
|
||||
return;
|
||||
|
||||
if (!dev_priv->busy) {
|
||||
intel_sanitize_pm(dev);
|
||||
dev_priv->busy = true;
|
||||
} else
|
||||
mod_timer(&dev_priv->idle_timer, jiffies +
|
||||
msecs_to_jiffies(GPU_IDLE_TIMEOUT));
|
||||
|
||||
if (obj == NULL)
|
||||
if (!i915_powersave)
|
||||
return;
|
||||
|
||||
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
|
||||
if (!crtc->fb)
|
||||
continue;
|
||||
|
||||
intel_crtc = to_intel_crtc(crtc);
|
||||
intel_fb = to_intel_framebuffer(crtc->fb);
|
||||
if (intel_fb->obj == obj) {
|
||||
if (!intel_crtc->busy) {
|
||||
/* Non-busy -> busy, upclock */
|
||||
intel_increase_pllclock(crtc);
|
||||
intel_crtc->busy = true;
|
||||
} else {
|
||||
/* Busy -> busy, put off timer */
|
||||
mod_timer(&intel_crtc->idle_timer, jiffies +
|
||||
msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
|
||||
}
|
||||
}
|
||||
if (to_intel_framebuffer(crtc->fb)->obj == obj)
|
||||
intel_decrease_pllclock(crtc);
|
||||
}
|
||||
}
|
||||
|
||||
@ -6392,7 +6307,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
|
||||
default:
|
||||
WARN_ONCE(1, "unknown plane in flip command\n");
|
||||
ret = -ENODEV;
|
||||
goto err;
|
||||
goto err_unpin;
|
||||
}
|
||||
|
||||
ret = intel_ring_begin(ring, 4);
|
||||
@ -6500,7 +6415,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
|
||||
goto cleanup_pending;
|
||||
|
||||
intel_disable_fbc(dev);
|
||||
intel_mark_busy(dev, obj);
|
||||
intel_mark_fb_busy(obj);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
trace_i915_flip_request(intel_crtc->plane, obj);
|
||||
@ -6666,11 +6581,6 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
|
||||
}
|
||||
|
||||
drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
|
||||
|
||||
intel_crtc->busy = false;
|
||||
|
||||
setup_timer(&intel_crtc->idle_timer, intel_crtc_idle_timer,
|
||||
(unsigned long)intel_crtc);
|
||||
}
|
||||
|
||||
int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
|
||||
@ -6697,15 +6607,23 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int intel_encoder_clones(struct drm_device *dev, int type_mask)
|
||||
static int intel_encoder_clones(struct intel_encoder *encoder)
|
||||
{
|
||||
struct intel_encoder *encoder;
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
struct intel_encoder *source_encoder;
|
||||
int index_mask = 0;
|
||||
int entry = 0;
|
||||
|
||||
list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
|
||||
if (type_mask & encoder->clone_mask)
|
||||
list_for_each_entry(source_encoder,
|
||||
&dev->mode_config.encoder_list, base.head) {
|
||||
|
||||
if (encoder == source_encoder)
|
||||
index_mask |= (1 << entry);
|
||||
|
||||
/* Intel hw has only one MUX where enocoders could be cloned. */
|
||||
if (encoder->cloneable && source_encoder->cloneable)
|
||||
index_mask |= (1 << entry);
|
||||
|
||||
entry++;
|
||||
}
|
||||
|
||||
@ -6746,10 +6664,10 @@ static void intel_setup_outputs(struct drm_device *dev)
|
||||
dpd_is_edp = intel_dpd_is_edp(dev);
|
||||
|
||||
if (has_edp_a(dev))
|
||||
intel_dp_init(dev, DP_A);
|
||||
intel_dp_init(dev, DP_A, PORT_A);
|
||||
|
||||
if (dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
|
||||
intel_dp_init(dev, PCH_DP_D);
|
||||
intel_dp_init(dev, PCH_DP_D, PORT_D);
|
||||
}
|
||||
|
||||
intel_crt_init(dev);
|
||||
@ -6780,22 +6698,22 @@ static void intel_setup_outputs(struct drm_device *dev)
|
||||
/* PCH SDVOB multiplex with HDMIB */
|
||||
found = intel_sdvo_init(dev, PCH_SDVOB, true);
|
||||
if (!found)
|
||||
intel_hdmi_init(dev, HDMIB);
|
||||
intel_hdmi_init(dev, HDMIB, PORT_B);
|
||||
if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
|
||||
intel_dp_init(dev, PCH_DP_B);
|
||||
intel_dp_init(dev, PCH_DP_B, PORT_B);
|
||||
}
|
||||
|
||||
if (I915_READ(HDMIC) & PORT_DETECTED)
|
||||
intel_hdmi_init(dev, HDMIC);
|
||||
intel_hdmi_init(dev, HDMIC, PORT_C);
|
||||
|
||||
if (!dpd_is_edp && I915_READ(HDMID) & PORT_DETECTED)
|
||||
intel_hdmi_init(dev, HDMID);
|
||||
intel_hdmi_init(dev, HDMID, PORT_D);
|
||||
|
||||
if (I915_READ(PCH_DP_C) & DP_DETECTED)
|
||||
intel_dp_init(dev, PCH_DP_C);
|
||||
intel_dp_init(dev, PCH_DP_C, PORT_C);
|
||||
|
||||
if (!dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
|
||||
intel_dp_init(dev, PCH_DP_D);
|
||||
intel_dp_init(dev, PCH_DP_D, PORT_D);
|
||||
} else if (IS_VALLEYVIEW(dev)) {
|
||||
int found;
|
||||
|
||||
@ -6803,17 +6721,17 @@ static void intel_setup_outputs(struct drm_device *dev)
|
||||
/* SDVOB multiplex with HDMIB */
|
||||
found = intel_sdvo_init(dev, SDVOB, true);
|
||||
if (!found)
|
||||
intel_hdmi_init(dev, SDVOB);
|
||||
intel_hdmi_init(dev, SDVOB, PORT_B);
|
||||
if (!found && (I915_READ(DP_B) & DP_DETECTED))
|
||||
intel_dp_init(dev, DP_B);
|
||||
intel_dp_init(dev, DP_B, PORT_B);
|
||||
}
|
||||
|
||||
if (I915_READ(SDVOC) & PORT_DETECTED)
|
||||
intel_hdmi_init(dev, SDVOC);
|
||||
intel_hdmi_init(dev, SDVOC, PORT_C);
|
||||
|
||||
/* Shares lanes with HDMI on SDVOC */
|
||||
if (I915_READ(DP_C) & DP_DETECTED)
|
||||
intel_dp_init(dev, DP_C);
|
||||
intel_dp_init(dev, DP_C, PORT_C);
|
||||
} else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
|
||||
bool found = false;
|
||||
|
||||
@ -6822,12 +6740,12 @@ static void intel_setup_outputs(struct drm_device *dev)
|
||||
found = intel_sdvo_init(dev, SDVOB, true);
|
||||
if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) {
|
||||
DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
|
||||
intel_hdmi_init(dev, SDVOB);
|
||||
intel_hdmi_init(dev, SDVOB, PORT_B);
|
||||
}
|
||||
|
||||
if (!found && SUPPORTS_INTEGRATED_DP(dev)) {
|
||||
DRM_DEBUG_KMS("probing DP_B\n");
|
||||
intel_dp_init(dev, DP_B);
|
||||
intel_dp_init(dev, DP_B, PORT_B);
|
||||
}
|
||||
}
|
||||
|
||||
@ -6842,18 +6760,18 @@ static void intel_setup_outputs(struct drm_device *dev)
|
||||
|
||||
if (SUPPORTS_INTEGRATED_HDMI(dev)) {
|
||||
DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
|
||||
intel_hdmi_init(dev, SDVOC);
|
||||
intel_hdmi_init(dev, SDVOC, PORT_C);
|
||||
}
|
||||
if (SUPPORTS_INTEGRATED_DP(dev)) {
|
||||
DRM_DEBUG_KMS("probing DP_C\n");
|
||||
intel_dp_init(dev, DP_C);
|
||||
intel_dp_init(dev, DP_C, PORT_C);
|
||||
}
|
||||
}
|
||||
|
||||
if (SUPPORTS_INTEGRATED_DP(dev) &&
|
||||
(I915_READ(DP_D) & DP_DETECTED)) {
|
||||
DRM_DEBUG_KMS("probing DP_D\n");
|
||||
intel_dp_init(dev, DP_D);
|
||||
intel_dp_init(dev, DP_D, PORT_D);
|
||||
}
|
||||
} else if (IS_GEN2(dev))
|
||||
intel_dvo_init(dev);
|
||||
@ -6864,7 +6782,7 @@ static void intel_setup_outputs(struct drm_device *dev)
|
||||
list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
|
||||
encoder->base.possible_crtcs = encoder->crtc_mask;
|
||||
encoder->base.possible_clones =
|
||||
intel_encoder_clones(dev, encoder->clone_mask);
|
||||
intel_encoder_clones(encoder);
|
||||
}
|
||||
|
||||
/* disable all the possible outputs/crtcs before entering KMS mode */
|
||||
@ -7229,10 +7147,6 @@ void intel_modeset_init(struct drm_device *dev)
|
||||
/* Just disable it once at startup */
|
||||
i915_disable_vga(dev);
|
||||
intel_setup_outputs(dev);
|
||||
|
||||
INIT_WORK(&dev_priv->idle_work, intel_idle_update);
|
||||
setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer,
|
||||
(unsigned long)dev);
|
||||
}
|
||||
|
||||
void intel_modeset_gem_init(struct drm_device *dev)
|
||||
@ -7278,19 +7192,11 @@ void intel_modeset_cleanup(struct drm_device *dev)
|
||||
* enqueue unpin/hotplug work. */
|
||||
drm_irq_uninstall(dev);
|
||||
cancel_work_sync(&dev_priv->hotplug_work);
|
||||
cancel_work_sync(&dev_priv->rps_work);
|
||||
cancel_work_sync(&dev_priv->rps.work);
|
||||
|
||||
/* flush any delayed tasks or pending work */
|
||||
flush_scheduled_work();
|
||||
|
||||
/* Shut off idle work before the crtcs get freed. */
|
||||
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
|
||||
intel_crtc = to_intel_crtc(crtc);
|
||||
del_timer_sync(&intel_crtc->idle_timer);
|
||||
}
|
||||
del_timer_sync(&dev_priv->idle_timer);
|
||||
cancel_work_sync(&dev_priv->idle_work);
|
||||
|
||||
drm_mode_config_cleanup(dev);
|
||||
}
|
||||
|
||||
|
@ -36,42 +36,10 @@
|
||||
#include "intel_drv.h"
|
||||
#include "i915_drm.h"
|
||||
#include "i915_drv.h"
|
||||
#include "drm_dp_helper.h"
|
||||
|
||||
#define DP_RECEIVER_CAP_SIZE 0xf
|
||||
#define DP_LINK_STATUS_SIZE 6
|
||||
#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
|
||||
|
||||
#define DP_LINK_CONFIGURATION_SIZE 9
|
||||
|
||||
struct intel_dp {
|
||||
struct intel_encoder base;
|
||||
uint32_t output_reg;
|
||||
uint32_t DP;
|
||||
uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE];
|
||||
bool has_audio;
|
||||
enum hdmi_force_audio force_audio;
|
||||
uint32_t color_range;
|
||||
int dpms_mode;
|
||||
uint8_t link_bw;
|
||||
uint8_t lane_count;
|
||||
uint8_t dpcd[DP_RECEIVER_CAP_SIZE];
|
||||
struct i2c_adapter adapter;
|
||||
struct i2c_algo_dp_aux_data algo;
|
||||
bool is_pch_edp;
|
||||
uint8_t train_set[4];
|
||||
int panel_power_up_delay;
|
||||
int panel_power_down_delay;
|
||||
int panel_power_cycle_delay;
|
||||
int backlight_on_delay;
|
||||
int backlight_off_delay;
|
||||
struct drm_display_mode *panel_fixed_mode; /* for eDP */
|
||||
struct delayed_work panel_vdd_work;
|
||||
bool want_panel_vdd;
|
||||
struct edid *edid; /* cached EDID for eDP */
|
||||
int edid_mode_count;
|
||||
};
|
||||
|
||||
/**
|
||||
* is_edp - is the given port attached to an eDP panel (either CPU or PCH)
|
||||
* @intel_dp: DP struct
|
||||
@ -1668,6 +1636,45 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int ret;
|
||||
|
||||
if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) {
|
||||
dp_reg_value &= ~DP_LINK_TRAIN_MASK_CPT;
|
||||
|
||||
switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
|
||||
case DP_TRAINING_PATTERN_DISABLE:
|
||||
dp_reg_value |= DP_LINK_TRAIN_OFF_CPT;
|
||||
break;
|
||||
case DP_TRAINING_PATTERN_1:
|
||||
dp_reg_value |= DP_LINK_TRAIN_PAT_1_CPT;
|
||||
break;
|
||||
case DP_TRAINING_PATTERN_2:
|
||||
dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT;
|
||||
break;
|
||||
case DP_TRAINING_PATTERN_3:
|
||||
DRM_ERROR("DP training pattern 3 not supported\n");
|
||||
dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT;
|
||||
break;
|
||||
}
|
||||
|
||||
} else {
|
||||
dp_reg_value &= ~DP_LINK_TRAIN_MASK;
|
||||
|
||||
switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
|
||||
case DP_TRAINING_PATTERN_DISABLE:
|
||||
dp_reg_value |= DP_LINK_TRAIN_OFF;
|
||||
break;
|
||||
case DP_TRAINING_PATTERN_1:
|
||||
dp_reg_value |= DP_LINK_TRAIN_PAT_1;
|
||||
break;
|
||||
case DP_TRAINING_PATTERN_2:
|
||||
dp_reg_value |= DP_LINK_TRAIN_PAT_2;
|
||||
break;
|
||||
case DP_TRAINING_PATTERN_3:
|
||||
DRM_ERROR("DP training pattern 3 not supported\n");
|
||||
dp_reg_value |= DP_LINK_TRAIN_PAT_2;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
I915_WRITE(intel_dp->output_reg, dp_reg_value);
|
||||
POSTING_READ(intel_dp->output_reg);
|
||||
|
||||
@ -1675,12 +1682,15 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
|
||||
DP_TRAINING_PATTERN_SET,
|
||||
dp_train_pat);
|
||||
|
||||
ret = intel_dp_aux_native_write(intel_dp,
|
||||
DP_TRAINING_LANE0_SET,
|
||||
intel_dp->train_set,
|
||||
intel_dp->lane_count);
|
||||
if (ret != intel_dp->lane_count)
|
||||
return false;
|
||||
if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) !=
|
||||
DP_TRAINING_PATTERN_DISABLE) {
|
||||
ret = intel_dp_aux_native_write(intel_dp,
|
||||
DP_TRAINING_LANE0_SET,
|
||||
intel_dp->train_set,
|
||||
intel_dp->lane_count);
|
||||
if (ret != intel_dp->lane_count)
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
@ -1696,7 +1706,6 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
|
||||
uint8_t voltage;
|
||||
bool clock_recovery = false;
|
||||
int voltage_tries, loop_tries;
|
||||
u32 reg;
|
||||
uint32_t DP = intel_dp->DP;
|
||||
|
||||
/*
|
||||
@ -1717,10 +1726,6 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
|
||||
|
||||
DP |= DP_PORT_EN;
|
||||
|
||||
if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
|
||||
DP &= ~DP_LINK_TRAIN_MASK_CPT;
|
||||
else
|
||||
DP &= ~DP_LINK_TRAIN_MASK;
|
||||
memset(intel_dp->train_set, 0, 4);
|
||||
voltage = 0xff;
|
||||
voltage_tries = 0;
|
||||
@ -1744,12 +1749,7 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
|
||||
DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
|
||||
}
|
||||
|
||||
if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
|
||||
reg = DP | DP_LINK_TRAIN_PAT_1_CPT;
|
||||
else
|
||||
reg = DP | DP_LINK_TRAIN_PAT_1;
|
||||
|
||||
if (!intel_dp_set_link_train(intel_dp, reg,
|
||||
if (!intel_dp_set_link_train(intel_dp, DP,
|
||||
DP_TRAINING_PATTERN_1 |
|
||||
DP_LINK_SCRAMBLING_DISABLE))
|
||||
break;
|
||||
@ -1804,10 +1804,8 @@ static void
|
||||
intel_dp_complete_link_train(struct intel_dp *intel_dp)
|
||||
{
|
||||
struct drm_device *dev = intel_dp->base.base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
bool channel_eq = false;
|
||||
int tries, cr_tries;
|
||||
u32 reg;
|
||||
uint32_t DP = intel_dp->DP;
|
||||
|
||||
/* channel equalization */
|
||||
@ -1836,13 +1834,8 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
|
||||
DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
|
||||
}
|
||||
|
||||
if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
|
||||
reg = DP | DP_LINK_TRAIN_PAT_2_CPT;
|
||||
else
|
||||
reg = DP | DP_LINK_TRAIN_PAT_2;
|
||||
|
||||
/* channel eq pattern */
|
||||
if (!intel_dp_set_link_train(intel_dp, reg,
|
||||
if (!intel_dp_set_link_train(intel_dp, DP,
|
||||
DP_TRAINING_PATTERN_2 |
|
||||
DP_LINK_SCRAMBLING_DISABLE))
|
||||
break;
|
||||
@ -1877,15 +1870,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
|
||||
++tries;
|
||||
}
|
||||
|
||||
if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
|
||||
reg = DP | DP_LINK_TRAIN_OFF_CPT;
|
||||
else
|
||||
reg = DP | DP_LINK_TRAIN_OFF;
|
||||
|
||||
I915_WRITE(intel_dp->output_reg, reg);
|
||||
POSTING_READ(intel_dp->output_reg);
|
||||
intel_dp_aux_native_write_1(intel_dp,
|
||||
DP_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_DISABLE);
|
||||
intel_dp_set_link_train(intel_dp, DP, DP_TRAINING_PATTERN_DISABLE);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -2441,7 +2426,7 @@ intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connect
|
||||
}
|
||||
|
||||
void
|
||||
intel_dp_init(struct drm_device *dev, int output_reg)
|
||||
intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_connector *connector;
|
||||
@ -2456,6 +2441,7 @@ intel_dp_init(struct drm_device *dev, int output_reg)
|
||||
return;
|
||||
|
||||
intel_dp->output_reg = output_reg;
|
||||
intel_dp->port = port;
|
||||
intel_dp->dpms_mode = -1;
|
||||
|
||||
intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
|
||||
@ -2483,18 +2469,10 @@ intel_dp_init(struct drm_device *dev, int output_reg)
|
||||
|
||||
connector->polled = DRM_CONNECTOR_POLL_HPD;
|
||||
|
||||
if (output_reg == DP_B || output_reg == PCH_DP_B)
|
||||
intel_encoder->clone_mask = (1 << INTEL_DP_B_CLONE_BIT);
|
||||
else if (output_reg == DP_C || output_reg == PCH_DP_C)
|
||||
intel_encoder->clone_mask = (1 << INTEL_DP_C_CLONE_BIT);
|
||||
else if (output_reg == DP_D || output_reg == PCH_DP_D)
|
||||
intel_encoder->clone_mask = (1 << INTEL_DP_D_CLONE_BIT);
|
||||
intel_encoder->cloneable = false;
|
||||
|
||||
if (is_edp(intel_dp)) {
|
||||
intel_encoder->clone_mask = (1 << INTEL_EDP_CLONE_BIT);
|
||||
INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
|
||||
ironlake_panel_vdd_work);
|
||||
}
|
||||
INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
|
||||
ironlake_panel_vdd_work);
|
||||
|
||||
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
|
||||
|
||||
@ -2509,28 +2487,25 @@ intel_dp_init(struct drm_device *dev, int output_reg)
|
||||
drm_sysfs_connector_add(connector);
|
||||
|
||||
/* Set up the DDC bus. */
|
||||
switch (output_reg) {
|
||||
case DP_A:
|
||||
name = "DPDDC-A";
|
||||
break;
|
||||
case DP_B:
|
||||
case PCH_DP_B:
|
||||
dev_priv->hotplug_supported_mask |=
|
||||
DPB_HOTPLUG_INT_STATUS;
|
||||
name = "DPDDC-B";
|
||||
break;
|
||||
case DP_C:
|
||||
case PCH_DP_C:
|
||||
dev_priv->hotplug_supported_mask |=
|
||||
DPC_HOTPLUG_INT_STATUS;
|
||||
name = "DPDDC-C";
|
||||
break;
|
||||
case DP_D:
|
||||
case PCH_DP_D:
|
||||
dev_priv->hotplug_supported_mask |=
|
||||
DPD_HOTPLUG_INT_STATUS;
|
||||
name = "DPDDC-D";
|
||||
break;
|
||||
switch (port) {
|
||||
case PORT_A:
|
||||
name = "DPDDC-A";
|
||||
break;
|
||||
case PORT_B:
|
||||
dev_priv->hotplug_supported_mask |= DPB_HOTPLUG_INT_STATUS;
|
||||
name = "DPDDC-B";
|
||||
break;
|
||||
case PORT_C:
|
||||
dev_priv->hotplug_supported_mask |= DPC_HOTPLUG_INT_STATUS;
|
||||
name = "DPDDC-C";
|
||||
break;
|
||||
case PORT_D:
|
||||
dev_priv->hotplug_supported_mask |= DPD_HOTPLUG_INT_STATUS;
|
||||
name = "DPDDC-D";
|
||||
break;
|
||||
default:
|
||||
WARN(1, "Invalid port %c\n", port_name(port));
|
||||
break;
|
||||
}
|
||||
|
||||
intel_dp_i2c_init(intel_dp, intel_connector, name);
|
||||
|
@ -31,6 +31,7 @@
|
||||
#include "drm_crtc.h"
|
||||
#include "drm_crtc_helper.h"
|
||||
#include "drm_fb_helper.h"
|
||||
#include "drm_dp_helper.h"
|
||||
|
||||
#define _wait_for(COND, MS, W) ({ \
|
||||
unsigned long timeout__ = jiffies + msecs_to_jiffies(MS); \
|
||||
@ -90,25 +91,6 @@
|
||||
#define INTEL_OUTPUT_DISPLAYPORT 7
|
||||
#define INTEL_OUTPUT_EDP 8
|
||||
|
||||
/* Intel Pipe Clone Bit */
|
||||
#define INTEL_HDMIB_CLONE_BIT 1
|
||||
#define INTEL_HDMIC_CLONE_BIT 2
|
||||
#define INTEL_HDMID_CLONE_BIT 3
|
||||
#define INTEL_HDMIE_CLONE_BIT 4
|
||||
#define INTEL_HDMIF_CLONE_BIT 5
|
||||
#define INTEL_SDVO_NON_TV_CLONE_BIT 6
|
||||
#define INTEL_SDVO_TV_CLONE_BIT 7
|
||||
#define INTEL_SDVO_LVDS_CLONE_BIT 8
|
||||
#define INTEL_ANALOG_CLONE_BIT 9
|
||||
#define INTEL_TV_CLONE_BIT 10
|
||||
#define INTEL_DP_B_CLONE_BIT 11
|
||||
#define INTEL_DP_C_CLONE_BIT 12
|
||||
#define INTEL_DP_D_CLONE_BIT 13
|
||||
#define INTEL_LVDS_CLONE_BIT 14
|
||||
#define INTEL_DVO_TMDS_CLONE_BIT 15
|
||||
#define INTEL_DVO_LVDS_CLONE_BIT 16
|
||||
#define INTEL_EDP_CLONE_BIT 17
|
||||
|
||||
#define INTEL_DVO_CHIP_NONE 0
|
||||
#define INTEL_DVO_CHIP_LVDS 1
|
||||
#define INTEL_DVO_CHIP_TMDS 2
|
||||
@ -153,9 +135,13 @@ struct intel_encoder {
|
||||
struct drm_encoder base;
|
||||
int type;
|
||||
bool needs_tv_clock;
|
||||
/*
|
||||
* Intel hw has only one MUX where encoders could be clone, hence a
|
||||
* simple flag is enough to compute the possible_clones mask.
|
||||
*/
|
||||
bool cloneable;
|
||||
void (*hot_plug)(struct intel_encoder *);
|
||||
int crtc_mask;
|
||||
int clone_mask;
|
||||
};
|
||||
|
||||
struct intel_connector {
|
||||
@ -171,8 +157,6 @@ struct intel_crtc {
|
||||
int dpms_mode;
|
||||
bool active; /* is the crtc on? independent of the dpms mode */
|
||||
bool primary_disabled; /* is the crtc obscured by a plane? */
|
||||
bool busy; /* is scanout buffer being updated frequently? */
|
||||
struct timer_list idle_timer;
|
||||
bool lowfreq_avail;
|
||||
struct intel_overlay *overlay;
|
||||
struct intel_unpin_work *unpin_work;
|
||||
@ -311,6 +295,38 @@ struct intel_hdmi {
|
||||
struct drm_display_mode *adjusted_mode);
|
||||
};
|
||||
|
||||
#define DP_RECEIVER_CAP_SIZE 0xf
|
||||
#define DP_LINK_CONFIGURATION_SIZE 9
|
||||
|
||||
struct intel_dp {
|
||||
struct intel_encoder base;
|
||||
uint32_t output_reg;
|
||||
uint32_t DP;
|
||||
uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE];
|
||||
bool has_audio;
|
||||
enum hdmi_force_audio force_audio;
|
||||
enum port port;
|
||||
uint32_t color_range;
|
||||
int dpms_mode;
|
||||
uint8_t link_bw;
|
||||
uint8_t lane_count;
|
||||
uint8_t dpcd[DP_RECEIVER_CAP_SIZE];
|
||||
struct i2c_adapter adapter;
|
||||
struct i2c_algo_dp_aux_data algo;
|
||||
bool is_pch_edp;
|
||||
uint8_t train_set[4];
|
||||
int panel_power_up_delay;
|
||||
int panel_power_down_delay;
|
||||
int panel_power_cycle_delay;
|
||||
int backlight_on_delay;
|
||||
int backlight_off_delay;
|
||||
struct drm_display_mode *panel_fixed_mode; /* for eDP */
|
||||
struct delayed_work panel_vdd_work;
|
||||
bool want_panel_vdd;
|
||||
struct edid *edid; /* cached EDID for eDP */
|
||||
int edid_mode_count;
|
||||
};
|
||||
|
||||
static inline struct drm_crtc *
|
||||
intel_get_crtc_for_pipe(struct drm_device *dev, int pipe)
|
||||
{
|
||||
@ -348,17 +364,21 @@ extern void intel_attach_force_audio_property(struct drm_connector *connector);
|
||||
extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector);
|
||||
|
||||
extern void intel_crt_init(struct drm_device *dev);
|
||||
extern void intel_hdmi_init(struct drm_device *dev, int sdvox_reg);
|
||||
extern void intel_hdmi_init(struct drm_device *dev,
|
||||
int sdvox_reg, enum port port);
|
||||
extern struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
|
||||
extern void intel_dip_infoframe_csum(struct dip_infoframe *avi_if);
|
||||
extern bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg,
|
||||
bool is_sdvob);
|
||||
extern void intel_dvo_init(struct drm_device *dev);
|
||||
extern void intel_tv_init(struct drm_device *dev);
|
||||
extern void intel_mark_busy(struct drm_device *dev,
|
||||
struct drm_i915_gem_object *obj);
|
||||
extern void intel_mark_busy(struct drm_device *dev);
|
||||
extern void intel_mark_idle(struct drm_device *dev);
|
||||
extern void intel_mark_fb_busy(struct drm_i915_gem_object *obj);
|
||||
extern void intel_mark_fb_idle(struct drm_i915_gem_object *obj);
|
||||
extern bool intel_lvds_init(struct drm_device *dev);
|
||||
extern void intel_dp_init(struct drm_device *dev, int dp_reg);
|
||||
extern void intel_dp_init(struct drm_device *dev, int output_reg,
|
||||
enum port port);
|
||||
void
|
||||
intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
|
||||
struct drm_display_mode *adjusted_mode);
|
||||
@ -371,8 +391,6 @@ extern int intel_plane_init(struct drm_device *dev, enum pipe pipe);
|
||||
extern void intel_flush_display_plane(struct drm_i915_private *dev_priv,
|
||||
enum plane plane);
|
||||
|
||||
void intel_sanitize_pm(struct drm_device *dev);
|
||||
|
||||
/* intel_panel.c */
|
||||
extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
|
||||
struct drm_display_mode *adjusted_mode);
|
||||
|
@ -37,6 +37,7 @@
|
||||
#define SIL164_ADDR 0x38
|
||||
#define CH7xxx_ADDR 0x76
|
||||
#define TFP410_ADDR 0x38
|
||||
#define NS2501_ADDR 0x38
|
||||
|
||||
static const struct intel_dvo_device intel_dvo_devices[] = {
|
||||
{
|
||||
@ -74,7 +75,14 @@ static const struct intel_dvo_device intel_dvo_devices[] = {
|
||||
.slave_addr = 0x75,
|
||||
.gpio = GMBUS_PORT_DPB,
|
||||
.dev_ops = &ch7017_ops,
|
||||
}
|
||||
},
|
||||
{
|
||||
.type = INTEL_DVO_CHIP_TMDS,
|
||||
.name = "ns2501",
|
||||
.dvo_reg = DVOC,
|
||||
.slave_addr = NS2501_ADDR,
|
||||
.dev_ops = &ns2501_ops,
|
||||
}
|
||||
};
|
||||
|
||||
struct intel_dvo {
|
||||
@ -396,17 +404,14 @@ void intel_dvo_init(struct drm_device *dev)
|
||||
intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
|
||||
switch (dvo->type) {
|
||||
case INTEL_DVO_CHIP_TMDS:
|
||||
intel_encoder->clone_mask =
|
||||
(1 << INTEL_DVO_TMDS_CLONE_BIT) |
|
||||
(1 << INTEL_ANALOG_CLONE_BIT);
|
||||
intel_encoder->cloneable = true;
|
||||
drm_connector_init(dev, connector,
|
||||
&intel_dvo_connector_funcs,
|
||||
DRM_MODE_CONNECTOR_DVII);
|
||||
encoder_type = DRM_MODE_ENCODER_TMDS;
|
||||
break;
|
||||
case INTEL_DVO_CHIP_LVDS:
|
||||
intel_encoder->clone_mask =
|
||||
(1 << INTEL_DVO_LVDS_CLONE_BIT);
|
||||
intel_encoder->cloneable = false;
|
||||
drm_connector_init(dev, connector,
|
||||
&intel_dvo_connector_funcs,
|
||||
DRM_MODE_CONNECTOR_LVDS);
|
||||
|
@ -889,7 +889,7 @@ intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *c
|
||||
intel_attach_broadcast_rgb_property(connector);
|
||||
}
|
||||
|
||||
void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
|
||||
void intel_hdmi_init(struct drm_device *dev, int sdvox_reg, enum port port)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_connector *connector;
|
||||
@ -923,48 +923,25 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
|
||||
connector->doublescan_allowed = 0;
|
||||
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
|
||||
|
||||
/* Set up the DDC bus. */
|
||||
if (sdvox_reg == SDVOB) {
|
||||
intel_encoder->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT);
|
||||
intel_encoder->cloneable = false;
|
||||
|
||||
intel_hdmi->ddi_port = port;
|
||||
switch (port) {
|
||||
case PORT_B:
|
||||
intel_hdmi->ddc_bus = GMBUS_PORT_DPB;
|
||||
dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
|
||||
} else if (sdvox_reg == SDVOC) {
|
||||
intel_encoder->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT);
|
||||
break;
|
||||
case PORT_C:
|
||||
intel_hdmi->ddc_bus = GMBUS_PORT_DPC;
|
||||
dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
|
||||
} else if (sdvox_reg == HDMIB) {
|
||||
intel_encoder->clone_mask = (1 << INTEL_HDMID_CLONE_BIT);
|
||||
intel_hdmi->ddc_bus = GMBUS_PORT_DPB;
|
||||
dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
|
||||
} else if (sdvox_reg == HDMIC) {
|
||||
intel_encoder->clone_mask = (1 << INTEL_HDMIE_CLONE_BIT);
|
||||
intel_hdmi->ddc_bus = GMBUS_PORT_DPC;
|
||||
dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
|
||||
} else if (sdvox_reg == HDMID) {
|
||||
intel_encoder->clone_mask = (1 << INTEL_HDMIF_CLONE_BIT);
|
||||
break;
|
||||
case PORT_D:
|
||||
intel_hdmi->ddc_bus = GMBUS_PORT_DPD;
|
||||
dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS;
|
||||
} else if (sdvox_reg == DDI_BUF_CTL(PORT_B)) {
|
||||
DRM_DEBUG_DRIVER("LPT: detected output on DDI B\n");
|
||||
intel_encoder->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT);
|
||||
intel_hdmi->ddc_bus = GMBUS_PORT_DPB;
|
||||
intel_hdmi->ddi_port = PORT_B;
|
||||
dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
|
||||
} else if (sdvox_reg == DDI_BUF_CTL(PORT_C)) {
|
||||
DRM_DEBUG_DRIVER("LPT: detected output on DDI C\n");
|
||||
intel_encoder->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT);
|
||||
intel_hdmi->ddc_bus = GMBUS_PORT_DPC;
|
||||
intel_hdmi->ddi_port = PORT_C;
|
||||
dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
|
||||
} else if (sdvox_reg == DDI_BUF_CTL(PORT_D)) {
|
||||
DRM_DEBUG_DRIVER("LPT: detected output on DDI D\n");
|
||||
intel_encoder->clone_mask = (1 << INTEL_HDMID_CLONE_BIT);
|
||||
intel_hdmi->ddc_bus = GMBUS_PORT_DPD;
|
||||
intel_hdmi->ddi_port = PORT_D;
|
||||
dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS;
|
||||
} else {
|
||||
/* If we got an unknown sdvox_reg, things are pretty much broken
|
||||
* in a way that we should let the kernel know about it */
|
||||
break;
|
||||
case PORT_A:
|
||||
/* Internal port only for eDP. */
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
|
||||
|
@ -967,7 +967,7 @@ bool intel_lvds_init(struct drm_device *dev)
|
||||
intel_connector_attach_encoder(intel_connector, intel_encoder);
|
||||
intel_encoder->type = INTEL_OUTPUT_LVDS;
|
||||
|
||||
intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT);
|
||||
intel_encoder->cloneable = false;
|
||||
if (HAS_PCH_SPLIT(dev))
|
||||
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
|
||||
else if (IS_GEN4(dev))
|
||||
|
@ -2160,11 +2160,28 @@ err_unref:
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* Lock protecting IPS related data structures
|
||||
* - i915_mch_dev
|
||||
* - dev_priv->max_delay
|
||||
* - dev_priv->min_delay
|
||||
* - dev_priv->fmax
|
||||
* - dev_priv->gpu_busy
|
||||
* - dev_priv->gfx_power
|
||||
*/
|
||||
DEFINE_SPINLOCK(mchdev_lock);
|
||||
|
||||
/* Global for IPS driver to get at the current i915 device. Protected by
|
||||
* mchdev_lock. */
|
||||
static struct drm_i915_private *i915_mch_dev;
|
||||
|
||||
bool ironlake_set_drps(struct drm_device *dev, u8 val)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u16 rgvswctl;
|
||||
|
||||
assert_spin_locked(&mchdev_lock);
|
||||
|
||||
rgvswctl = I915_READ16(MEMSWCTL);
|
||||
if (rgvswctl & MEMCTL_CMD_STS) {
|
||||
DRM_DEBUG("gpu busy, RCS change rejected\n");
|
||||
@ -2188,6 +2205,8 @@ static void ironlake_enable_drps(struct drm_device *dev)
|
||||
u32 rgvmodectl = I915_READ(MEMMODECTL);
|
||||
u8 fmax, fmin, fstart, vstart;
|
||||
|
||||
spin_lock_irq(&mchdev_lock);
|
||||
|
||||
/* Enable temp reporting */
|
||||
I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
|
||||
I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
|
||||
@ -2233,9 +2252,9 @@ static void ironlake_enable_drps(struct drm_device *dev)
|
||||
rgvmodectl |= MEMMODE_SWMODE_EN;
|
||||
I915_WRITE(MEMMODECTL, rgvmodectl);
|
||||
|
||||
if (wait_for((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
|
||||
if (wait_for_atomic((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
|
||||
DRM_ERROR("stuck trying to change perf mode\n");
|
||||
msleep(1);
|
||||
mdelay(1);
|
||||
|
||||
ironlake_set_drps(dev, fstart);
|
||||
|
||||
@ -2244,12 +2263,18 @@ static void ironlake_enable_drps(struct drm_device *dev)
|
||||
dev_priv->last_time1 = jiffies_to_msecs(jiffies);
|
||||
dev_priv->last_count2 = I915_READ(0x112f4);
|
||||
getrawmonotonic(&dev_priv->last_time2);
|
||||
|
||||
spin_unlock_irq(&mchdev_lock);
|
||||
}
|
||||
|
||||
static void ironlake_disable_drps(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u16 rgvswctl = I915_READ16(MEMSWCTL);
|
||||
u16 rgvswctl;
|
||||
|
||||
spin_lock_irq(&mchdev_lock);
|
||||
|
||||
rgvswctl = I915_READ16(MEMSWCTL);
|
||||
|
||||
/* Ack interrupts, disable EFC interrupt */
|
||||
I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
|
||||
@ -2260,30 +2285,51 @@ static void ironlake_disable_drps(struct drm_device *dev)
|
||||
|
||||
/* Go back to the starting frequency */
|
||||
ironlake_set_drps(dev, dev_priv->fstart);
|
||||
msleep(1);
|
||||
mdelay(1);
|
||||
rgvswctl |= MEMCTL_CMD_STS;
|
||||
I915_WRITE(MEMSWCTL, rgvswctl);
|
||||
msleep(1);
|
||||
mdelay(1);
|
||||
|
||||
spin_unlock_irq(&mchdev_lock);
|
||||
}
|
||||
|
||||
/* There's a funny hw issue where the hw returns all 0 when reading from
|
||||
* GEN6_RP_INTERRUPT_LIMITS. Hence we always need to compute the desired value
|
||||
* ourselves, instead of doing a rmw cycle (which might result in us clearing
|
||||
* all limits and the gpu stuck at whatever frequency it is at atm).
|
||||
*/
|
||||
static u32 gen6_rps_limits(struct drm_i915_private *dev_priv, u8 *val)
|
||||
{
|
||||
u32 limits;
|
||||
|
||||
limits = 0;
|
||||
|
||||
if (*val >= dev_priv->rps.max_delay)
|
||||
*val = dev_priv->rps.max_delay;
|
||||
limits |= dev_priv->rps.max_delay << 24;
|
||||
|
||||
/* Only set the down limit when we've reached the lowest level to avoid
|
||||
* getting more interrupts, otherwise leave this clear. This prevents a
|
||||
* race in the hw when coming out of rc6: There's a tiny window where
|
||||
* the hw runs at the minimal clock before selecting the desired
|
||||
* frequency, if the down threshold expires in that window we will not
|
||||
* receive a down interrupt. */
|
||||
if (*val <= dev_priv->rps.min_delay) {
|
||||
*val = dev_priv->rps.min_delay;
|
||||
limits |= dev_priv->rps.min_delay << 16;
|
||||
}
|
||||
|
||||
return limits;
|
||||
}
|
||||
|
||||
void gen6_set_rps(struct drm_device *dev, u8 val)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 limits;
|
||||
u32 limits = gen6_rps_limits(dev_priv, &val);
|
||||
|
||||
limits = 0;
|
||||
if (val >= dev_priv->max_delay)
|
||||
val = dev_priv->max_delay;
|
||||
else
|
||||
limits |= dev_priv->max_delay << 24;
|
||||
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
|
||||
|
||||
if (val <= dev_priv->min_delay)
|
||||
val = dev_priv->min_delay;
|
||||
else
|
||||
limits |= dev_priv->min_delay << 16;
|
||||
|
||||
if (val == dev_priv->cur_delay)
|
||||
if (val == dev_priv->rps.cur_delay)
|
||||
return;
|
||||
|
||||
I915_WRITE(GEN6_RPNSWREQ,
|
||||
@ -2296,7 +2342,7 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
|
||||
*/
|
||||
I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, limits);
|
||||
|
||||
dev_priv->cur_delay = val;
|
||||
dev_priv->rps.cur_delay = val;
|
||||
}
|
||||
|
||||
static void gen6_disable_rps(struct drm_device *dev)
|
||||
@ -2312,40 +2358,35 @@ static void gen6_disable_rps(struct drm_device *dev)
|
||||
* register (PMIMR) to mask PM interrupts. The only risk is in leaving
|
||||
* stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
|
||||
|
||||
spin_lock_irq(&dev_priv->rps_lock);
|
||||
dev_priv->pm_iir = 0;
|
||||
spin_unlock_irq(&dev_priv->rps_lock);
|
||||
spin_lock_irq(&dev_priv->rps.lock);
|
||||
dev_priv->rps.pm_iir = 0;
|
||||
spin_unlock_irq(&dev_priv->rps.lock);
|
||||
|
||||
I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
|
||||
}
|
||||
|
||||
int intel_enable_rc6(const struct drm_device *dev)
|
||||
{
|
||||
/*
|
||||
* Respect the kernel parameter if it is set
|
||||
*/
|
||||
/* Respect the kernel parameter if it is set */
|
||||
if (i915_enable_rc6 >= 0)
|
||||
return i915_enable_rc6;
|
||||
|
||||
/*
|
||||
* Disable RC6 on Ironlake
|
||||
*/
|
||||
if (INTEL_INFO(dev)->gen == 5)
|
||||
return 0;
|
||||
|
||||
/* On Haswell, only RC6 is available. So let's enable it by default to
|
||||
* provide better testing and coverage since the beginning.
|
||||
*/
|
||||
if (IS_HASWELL(dev))
|
||||
if (INTEL_INFO(dev)->gen == 5) {
|
||||
DRM_DEBUG_DRIVER("Ironlake: only RC6 available\n");
|
||||
return INTEL_RC6_ENABLE;
|
||||
}
|
||||
|
||||
/*
|
||||
* Disable rc6 on Sandybridge
|
||||
*/
|
||||
if (IS_HASWELL(dev)) {
|
||||
DRM_DEBUG_DRIVER("Haswell: only RC6 available\n");
|
||||
return INTEL_RC6_ENABLE;
|
||||
}
|
||||
|
||||
/* snb/ivb have more than one rc6 state. */
|
||||
if (INTEL_INFO(dev)->gen == 6) {
|
||||
DRM_DEBUG_DRIVER("Sandybridge: deep RC6 disabled\n");
|
||||
return INTEL_RC6_ENABLE;
|
||||
}
|
||||
|
||||
DRM_DEBUG_DRIVER("RC6 and deep RC6 enabled\n");
|
||||
return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
|
||||
}
|
||||
@ -2383,9 +2424,9 @@ static void gen6_enable_rps(struct drm_device *dev)
|
||||
gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
|
||||
|
||||
/* In units of 100MHz */
|
||||
dev_priv->max_delay = rp_state_cap & 0xff;
|
||||
dev_priv->min_delay = (rp_state_cap & 0xff0000) >> 16;
|
||||
dev_priv->cur_delay = 0;
|
||||
dev_priv->rps.max_delay = rp_state_cap & 0xff;
|
||||
dev_priv->rps.min_delay = (rp_state_cap & 0xff0000) >> 16;
|
||||
dev_priv->rps.cur_delay = 0;
|
||||
|
||||
/* disable the counters and set deterministic thresholds */
|
||||
I915_WRITE(GEN6_RC_CONTROL, 0);
|
||||
@ -2438,8 +2479,8 @@ static void gen6_enable_rps(struct drm_device *dev)
|
||||
|
||||
I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
|
||||
I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
|
||||
dev_priv->max_delay << 24 |
|
||||
dev_priv->min_delay << 16);
|
||||
dev_priv->rps.max_delay << 24 |
|
||||
dev_priv->rps.min_delay << 16);
|
||||
|
||||
if (IS_HASWELL(dev)) {
|
||||
I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
|
||||
@ -2484,7 +2525,7 @@ static void gen6_enable_rps(struct drm_device *dev)
|
||||
500))
|
||||
DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
|
||||
if (pcu_mbox & (1<<31)) { /* OC supported */
|
||||
dev_priv->max_delay = pcu_mbox & 0xff;
|
||||
dev_priv->rps.max_delay = pcu_mbox & 0xff;
|
||||
DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50);
|
||||
}
|
||||
|
||||
@ -2492,10 +2533,10 @@ static void gen6_enable_rps(struct drm_device *dev)
|
||||
|
||||
/* requires MSI enabled */
|
||||
I915_WRITE(GEN6_PMIER, GEN6_PM_DEFERRED_EVENTS);
|
||||
spin_lock_irq(&dev_priv->rps_lock);
|
||||
WARN_ON(dev_priv->pm_iir != 0);
|
||||
spin_lock_irq(&dev_priv->rps.lock);
|
||||
WARN_ON(dev_priv->rps.pm_iir != 0);
|
||||
I915_WRITE(GEN6_PMIMR, 0);
|
||||
spin_unlock_irq(&dev_priv->rps_lock);
|
||||
spin_unlock_irq(&dev_priv->rps.lock);
|
||||
/* enable all PM interrupts */
|
||||
I915_WRITE(GEN6_PMINTRMSK, 0);
|
||||
|
||||
@ -2527,9 +2568,9 @@ static void gen6_update_ring_freq(struct drm_device *dev)
|
||||
* to use for memory access. We do this by specifying the IA frequency
|
||||
* the PCU should use as a reference to determine the ring frequency.
|
||||
*/
|
||||
for (gpu_freq = dev_priv->max_delay; gpu_freq >= dev_priv->min_delay;
|
||||
for (gpu_freq = dev_priv->rps.max_delay; gpu_freq >= dev_priv->rps.min_delay;
|
||||
gpu_freq--) {
|
||||
int diff = dev_priv->max_delay - gpu_freq;
|
||||
int diff = dev_priv->rps.max_delay - gpu_freq;
|
||||
|
||||
/*
|
||||
* For GPU frequencies less than 750MHz, just use the lowest
|
||||
@ -2700,6 +2741,8 @@ unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
|
||||
unsigned long now = jiffies_to_msecs(jiffies), diff1;
|
||||
int i;
|
||||
|
||||
assert_spin_locked(&mchdev_lock);
|
||||
|
||||
diff1 = now - dev_priv->last_time1;
|
||||
|
||||
/* Prevent division-by-zero if we are asking too fast.
|
||||
@ -2901,15 +2944,14 @@ static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
|
||||
return v_table[pxvid].vd;
|
||||
}
|
||||
|
||||
void i915_update_gfx_val(struct drm_i915_private *dev_priv)
|
||||
static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct timespec now, diff1;
|
||||
u64 diff;
|
||||
unsigned long diffms;
|
||||
u32 count;
|
||||
|
||||
if (dev_priv->info->gen != 5)
|
||||
return;
|
||||
assert_spin_locked(&mchdev_lock);
|
||||
|
||||
getrawmonotonic(&now);
|
||||
diff1 = timespec_sub(now, dev_priv->last_time2);
|
||||
@ -2937,12 +2979,26 @@ void i915_update_gfx_val(struct drm_i915_private *dev_priv)
|
||||
dev_priv->gfx_power = diff;
|
||||
}
|
||||
|
||||
void i915_update_gfx_val(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
if (dev_priv->info->gen != 5)
|
||||
return;
|
||||
|
||||
spin_lock_irq(&mchdev_lock);
|
||||
|
||||
__i915_update_gfx_val(dev_priv);
|
||||
|
||||
spin_unlock_irq(&mchdev_lock);
|
||||
}
|
||||
|
||||
unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
unsigned long t, corr, state1, corr2, state2;
|
||||
u32 pxvid, ext_v;
|
||||
|
||||
pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->cur_delay * 4));
|
||||
assert_spin_locked(&mchdev_lock);
|
||||
|
||||
pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->rps.cur_delay * 4));
|
||||
pxvid = (pxvid >> 24) & 0x7f;
|
||||
ext_v = pvid_to_extvid(dev_priv, pxvid);
|
||||
|
||||
@ -2967,23 +3023,11 @@ unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
|
||||
state2 = (corr2 * state1) / 10000;
|
||||
state2 /= 100; /* convert to mW */
|
||||
|
||||
i915_update_gfx_val(dev_priv);
|
||||
__i915_update_gfx_val(dev_priv);
|
||||
|
||||
return dev_priv->gfx_power + state2;
|
||||
}
|
||||
|
||||
/* Global for IPS driver to get at the current i915 device */
|
||||
static struct drm_i915_private *i915_mch_dev;
|
||||
/*
|
||||
* Lock protecting IPS related data structures
|
||||
* - i915_mch_dev
|
||||
* - dev_priv->max_delay
|
||||
* - dev_priv->min_delay
|
||||
* - dev_priv->fmax
|
||||
* - dev_priv->gpu_busy
|
||||
*/
|
||||
static DEFINE_SPINLOCK(mchdev_lock);
|
||||
|
||||
/**
|
||||
* i915_read_mch_val - return value for IPS use
|
||||
*
|
||||
@ -2995,7 +3039,7 @@ unsigned long i915_read_mch_val(void)
|
||||
struct drm_i915_private *dev_priv;
|
||||
unsigned long chipset_val, graphics_val, ret = 0;
|
||||
|
||||
spin_lock(&mchdev_lock);
|
||||
spin_lock_irq(&mchdev_lock);
|
||||
if (!i915_mch_dev)
|
||||
goto out_unlock;
|
||||
dev_priv = i915_mch_dev;
|
||||
@ -3006,7 +3050,7 @@ unsigned long i915_read_mch_val(void)
|
||||
ret = chipset_val + graphics_val;
|
||||
|
||||
out_unlock:
|
||||
spin_unlock(&mchdev_lock);
|
||||
spin_unlock_irq(&mchdev_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -3022,7 +3066,7 @@ bool i915_gpu_raise(void)
|
||||
struct drm_i915_private *dev_priv;
|
||||
bool ret = true;
|
||||
|
||||
spin_lock(&mchdev_lock);
|
||||
spin_lock_irq(&mchdev_lock);
|
||||
if (!i915_mch_dev) {
|
||||
ret = false;
|
||||
goto out_unlock;
|
||||
@ -3033,7 +3077,7 @@ bool i915_gpu_raise(void)
|
||||
dev_priv->max_delay--;
|
||||
|
||||
out_unlock:
|
||||
spin_unlock(&mchdev_lock);
|
||||
spin_unlock_irq(&mchdev_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -3050,7 +3094,7 @@ bool i915_gpu_lower(void)
|
||||
struct drm_i915_private *dev_priv;
|
||||
bool ret = true;
|
||||
|
||||
spin_lock(&mchdev_lock);
|
||||
spin_lock_irq(&mchdev_lock);
|
||||
if (!i915_mch_dev) {
|
||||
ret = false;
|
||||
goto out_unlock;
|
||||
@ -3061,7 +3105,7 @@ bool i915_gpu_lower(void)
|
||||
dev_priv->max_delay++;
|
||||
|
||||
out_unlock:
|
||||
spin_unlock(&mchdev_lock);
|
||||
spin_unlock_irq(&mchdev_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -3075,17 +3119,20 @@ EXPORT_SYMBOL_GPL(i915_gpu_lower);
|
||||
bool i915_gpu_busy(void)
|
||||
{
|
||||
struct drm_i915_private *dev_priv;
|
||||
struct intel_ring_buffer *ring;
|
||||
bool ret = false;
|
||||
int i;
|
||||
|
||||
spin_lock(&mchdev_lock);
|
||||
spin_lock_irq(&mchdev_lock);
|
||||
if (!i915_mch_dev)
|
||||
goto out_unlock;
|
||||
dev_priv = i915_mch_dev;
|
||||
|
||||
ret = dev_priv->busy;
|
||||
for_each_ring(ring, dev_priv, i)
|
||||
ret |= !list_empty(&ring->request_list);
|
||||
|
||||
out_unlock:
|
||||
spin_unlock(&mchdev_lock);
|
||||
spin_unlock_irq(&mchdev_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -3102,7 +3149,7 @@ bool i915_gpu_turbo_disable(void)
|
||||
struct drm_i915_private *dev_priv;
|
||||
bool ret = true;
|
||||
|
||||
spin_lock(&mchdev_lock);
|
||||
spin_lock_irq(&mchdev_lock);
|
||||
if (!i915_mch_dev) {
|
||||
ret = false;
|
||||
goto out_unlock;
|
||||
@ -3115,7 +3162,7 @@ bool i915_gpu_turbo_disable(void)
|
||||
ret = false;
|
||||
|
||||
out_unlock:
|
||||
spin_unlock(&mchdev_lock);
|
||||
spin_unlock_irq(&mchdev_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -3143,19 +3190,20 @@ ips_ping_for_i915_load(void)
|
||||
|
||||
void intel_gpu_ips_init(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
spin_lock(&mchdev_lock);
|
||||
/* We only register the i915 ips part with intel-ips once everything is
|
||||
* set up, to avoid intel-ips sneaking in and reading bogus values. */
|
||||
spin_lock_irq(&mchdev_lock);
|
||||
i915_mch_dev = dev_priv;
|
||||
dev_priv->mchdev_lock = &mchdev_lock;
|
||||
spin_unlock(&mchdev_lock);
|
||||
spin_unlock_irq(&mchdev_lock);
|
||||
|
||||
ips_ping_for_i915_load();
|
||||
}
|
||||
|
||||
void intel_gpu_ips_teardown(void)
|
||||
{
|
||||
spin_lock(&mchdev_lock);
|
||||
spin_lock_irq(&mchdev_lock);
|
||||
i915_mch_dev = NULL;
|
||||
spin_unlock(&mchdev_lock);
|
||||
spin_unlock_irq(&mchdev_lock);
|
||||
}
|
||||
static void intel_init_emon(struct drm_device *dev)
|
||||
{
|
||||
@ -3735,42 +3783,6 @@ void intel_init_clock_gating(struct drm_device *dev)
|
||||
dev_priv->display.init_pch_clock_gating(dev);
|
||||
}
|
||||
|
||||
static void gen6_sanitize_pm(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 limits, delay, old;
|
||||
|
||||
gen6_gt_force_wake_get(dev_priv);
|
||||
|
||||
old = limits = I915_READ(GEN6_RP_INTERRUPT_LIMITS);
|
||||
/* Make sure we continue to get interrupts
|
||||
* until we hit the minimum or maximum frequencies.
|
||||
*/
|
||||
limits &= ~(0x3f << 16 | 0x3f << 24);
|
||||
delay = dev_priv->cur_delay;
|
||||
if (delay < dev_priv->max_delay)
|
||||
limits |= (dev_priv->max_delay & 0x3f) << 24;
|
||||
if (delay > dev_priv->min_delay)
|
||||
limits |= (dev_priv->min_delay & 0x3f) << 16;
|
||||
|
||||
if (old != limits) {
|
||||
/* Note that the known failure case is to read back 0. */
|
||||
DRM_DEBUG_DRIVER("Power management discrepancy: GEN6_RP_INTERRUPT_LIMITS "
|
||||
"expected %08x, was %08x\n", limits, old);
|
||||
I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, limits);
|
||||
}
|
||||
|
||||
gen6_gt_force_wake_put(dev_priv);
|
||||
}
|
||||
|
||||
void intel_sanitize_pm(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
if (dev_priv->display.sanitize_pm)
|
||||
dev_priv->display.sanitize_pm(dev);
|
||||
}
|
||||
|
||||
/* Starting with Haswell, we have different power wells for
|
||||
* different parts of the GPU. This attempts to enable them all.
|
||||
*/
|
||||
@ -3856,7 +3868,6 @@ void intel_init_pm(struct drm_device *dev)
|
||||
dev_priv->display.update_wm = NULL;
|
||||
}
|
||||
dev_priv->display.init_clock_gating = gen6_init_clock_gating;
|
||||
dev_priv->display.sanitize_pm = gen6_sanitize_pm;
|
||||
} else if (IS_IVYBRIDGE(dev)) {
|
||||
/* FIXME: detect B0+ stepping and use auto training */
|
||||
if (SNB_READ_WM0_LATENCY()) {
|
||||
@ -3868,7 +3879,6 @@ void intel_init_pm(struct drm_device *dev)
|
||||
dev_priv->display.update_wm = NULL;
|
||||
}
|
||||
dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
|
||||
dev_priv->display.sanitize_pm = gen6_sanitize_pm;
|
||||
} else if (IS_HASWELL(dev)) {
|
||||
if (SNB_READ_WM0_LATENCY()) {
|
||||
dev_priv->display.update_wm = sandybridge_update_wm;
|
||||
@ -3880,7 +3890,6 @@ void intel_init_pm(struct drm_device *dev)
|
||||
dev_priv->display.update_wm = NULL;
|
||||
}
|
||||
dev_priv->display.init_clock_gating = haswell_init_clock_gating;
|
||||
dev_priv->display.sanitize_pm = gen6_sanitize_pm;
|
||||
} else
|
||||
dev_priv->display.update_wm = NULL;
|
||||
} else if (IS_VALLEYVIEW(dev)) {
|
||||
|
@ -218,11 +218,6 @@ gen6_render_ring_flush(struct intel_ring_buffer *ring,
|
||||
u32 scratch_addr = pc->gtt_offset + 128;
|
||||
int ret;
|
||||
|
||||
/* Force SNB workarounds for PIPE_CONTROL flushes */
|
||||
ret = intel_emit_post_sync_nonzero_flush(ring);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Just flush everything. Experiments have shown that reducing the
|
||||
* number of bits based on the write domains has little performance
|
||||
* impact.
|
||||
@ -262,6 +257,20 @@ gen6_render_ring_flush(struct intel_ring_buffer *ring,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
gen6_render_ring_flush__wa(struct intel_ring_buffer *ring,
|
||||
u32 invalidate_domains, u32 flush_domains)
|
||||
{
|
||||
int ret;
|
||||
|
||||
/* Force SNB workarounds for PIPE_CONTROL flushes */
|
||||
ret = intel_emit_post_sync_nonzero_flush(ring);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return gen6_render_ring_flush(ring, invalidate_domains, flush_domains);
|
||||
}
|
||||
|
||||
static void ring_write_tail(struct intel_ring_buffer *ring,
|
||||
u32 value)
|
||||
{
|
||||
@ -462,7 +471,7 @@ static int init_render_ring(struct intel_ring_buffer *ring)
|
||||
if (INTEL_INFO(dev)->gen >= 6)
|
||||
I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
|
||||
|
||||
if (IS_IVYBRIDGE(dev))
|
||||
if (HAS_L3_GPU_CACHE(dev))
|
||||
I915_WRITE_IMR(ring, ~GEN6_RENDER_L3_PARITY_ERROR);
|
||||
|
||||
return ret;
|
||||
@ -628,26 +637,24 @@ pc_render_add_request(struct intel_ring_buffer *ring,
|
||||
}
|
||||
|
||||
static u32
|
||||
gen6_ring_get_seqno(struct intel_ring_buffer *ring)
|
||||
gen6_ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
|
||||
{
|
||||
struct drm_device *dev = ring->dev;
|
||||
|
||||
/* Workaround to force correct ordering between irq and seqno writes on
|
||||
* ivb (and maybe also on snb) by reading from a CS register (like
|
||||
* ACTHD) before reading the status page. */
|
||||
if (IS_GEN6(dev) || IS_GEN7(dev))
|
||||
if (!lazy_coherency)
|
||||
intel_ring_get_active_head(ring);
|
||||
return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
|
||||
}
|
||||
|
||||
static u32
|
||||
ring_get_seqno(struct intel_ring_buffer *ring)
|
||||
ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
|
||||
{
|
||||
return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
|
||||
}
|
||||
|
||||
static u32
|
||||
pc_render_get_seqno(struct intel_ring_buffer *ring)
|
||||
pc_render_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
|
||||
{
|
||||
struct pipe_control *pc = ring->private;
|
||||
return pc->cpu_page[0];
|
||||
@ -852,7 +859,7 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring)
|
||||
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, flags);
|
||||
if (ring->irq_refcount++ == 0) {
|
||||
if (IS_IVYBRIDGE(dev) && ring->id == RCS)
|
||||
if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
|
||||
I915_WRITE_IMR(ring, ~(ring->irq_enable_mask |
|
||||
GEN6_RENDER_L3_PARITY_ERROR));
|
||||
else
|
||||
@ -875,7 +882,7 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring)
|
||||
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, flags);
|
||||
if (--ring->irq_refcount == 0) {
|
||||
if (IS_IVYBRIDGE(dev) && ring->id == RCS)
|
||||
if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
|
||||
I915_WRITE_IMR(ring, ~GEN6_RENDER_L3_PARITY_ERROR);
|
||||
else
|
||||
I915_WRITE_IMR(ring, ~0);
|
||||
@ -1010,7 +1017,6 @@ static int intel_init_ring_buffer(struct drm_device *dev,
|
||||
ring->dev = dev;
|
||||
INIT_LIST_HEAD(&ring->active_list);
|
||||
INIT_LIST_HEAD(&ring->request_list);
|
||||
INIT_LIST_HEAD(&ring->gpu_write_list);
|
||||
ring->size = 32 * PAGE_SIZE;
|
||||
|
||||
init_waitqueue_head(&ring->irq_queue);
|
||||
@ -1380,6 +1386,8 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
|
||||
if (INTEL_INFO(dev)->gen >= 6) {
|
||||
ring->add_request = gen6_add_request;
|
||||
ring->flush = gen6_render_ring_flush;
|
||||
if (INTEL_INFO(dev)->gen == 6)
|
||||
ring->flush = gen6_render_ring_flush__wa;
|
||||
ring->irq_get = gen6_ring_get_irq;
|
||||
ring->irq_put = gen6_ring_put_irq;
|
||||
ring->irq_enable_mask = GT_USER_INTERRUPT;
|
||||
@ -1481,7 +1489,6 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
|
||||
ring->dev = dev;
|
||||
INIT_LIST_HEAD(&ring->active_list);
|
||||
INIT_LIST_HEAD(&ring->request_list);
|
||||
INIT_LIST_HEAD(&ring->gpu_write_list);
|
||||
|
||||
ring->size = size;
|
||||
ring->effective_size = ring->size;
|
||||
@ -1574,3 +1581,41 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)
|
||||
|
||||
return intel_init_ring_buffer(dev, ring);
|
||||
}
|
||||
|
||||
int
|
||||
intel_ring_flush_all_caches(struct intel_ring_buffer *ring)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (!ring->gpu_caches_dirty)
|
||||
return 0;
|
||||
|
||||
ret = ring->flush(ring, 0, I915_GEM_GPU_DOMAINS);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
trace_i915_gem_ring_flush(ring, 0, I915_GEM_GPU_DOMAINS);
|
||||
|
||||
ring->gpu_caches_dirty = false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring)
|
||||
{
|
||||
uint32_t flush_domains;
|
||||
int ret;
|
||||
|
||||
flush_domains = 0;
|
||||
if (ring->gpu_caches_dirty)
|
||||
flush_domains = I915_GEM_GPU_DOMAINS;
|
||||
|
||||
ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, flush_domains);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
trace_i915_gem_ring_flush(ring, I915_GEM_GPU_DOMAINS, flush_domains);
|
||||
|
||||
ring->gpu_caches_dirty = false;
|
||||
return 0;
|
||||
}
|
||||
|
@ -72,7 +72,14 @@ struct intel_ring_buffer {
|
||||
u32 flush_domains);
|
||||
int (*add_request)(struct intel_ring_buffer *ring,
|
||||
u32 *seqno);
|
||||
u32 (*get_seqno)(struct intel_ring_buffer *ring);
|
||||
/* Some chipsets are not quite as coherent as advertised and need
|
||||
* an expensive kick to force a true read of the up-to-date seqno.
|
||||
* However, the up-to-date seqno is not always required and the last
|
||||
* seen value is good enough. Note that the seqno will always be
|
||||
* monotonic, even if not coherent.
|
||||
*/
|
||||
u32 (*get_seqno)(struct intel_ring_buffer *ring,
|
||||
bool lazy_coherency);
|
||||
int (*dispatch_execbuffer)(struct intel_ring_buffer *ring,
|
||||
u32 offset, u32 length);
|
||||
void (*cleanup)(struct intel_ring_buffer *ring);
|
||||
@ -100,15 +107,6 @@ struct intel_ring_buffer {
|
||||
*/
|
||||
struct list_head request_list;
|
||||
|
||||
/**
|
||||
* List of objects currently pending a GPU write flush.
|
||||
*
|
||||
* All elements on this list will belong to either the
|
||||
* active_list or flushing_list, last_rendering_seqno can
|
||||
* be used to differentiate between the two elements.
|
||||
*/
|
||||
struct list_head gpu_write_list;
|
||||
|
||||
/**
|
||||
* Do we have some not yet emitted requests outstanding?
|
||||
*/
|
||||
@ -204,6 +202,8 @@ static inline void intel_ring_emit(struct intel_ring_buffer *ring,
|
||||
void intel_ring_advance(struct intel_ring_buffer *ring);
|
||||
|
||||
u32 intel_ring_get_seqno(struct intel_ring_buffer *ring);
|
||||
int intel_ring_flush_all_caches(struct intel_ring_buffer *ring);
|
||||
int intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring);
|
||||
|
||||
int intel_init_render_ring_buffer(struct drm_device *dev);
|
||||
int intel_init_bsd_ring_buffer(struct drm_device *dev);
|
||||
|
@ -2081,8 +2081,7 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
|
||||
connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
|
||||
intel_sdvo->is_hdmi = true;
|
||||
}
|
||||
intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
|
||||
(1 << INTEL_ANALOG_CLONE_BIT));
|
||||
intel_sdvo->base.cloneable = true;
|
||||
|
||||
intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
|
||||
if (intel_sdvo->is_hdmi)
|
||||
@ -2113,7 +2112,7 @@ intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type)
|
||||
|
||||
intel_sdvo->is_tv = true;
|
||||
intel_sdvo->base.needs_tv_clock = true;
|
||||
intel_sdvo->base.clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT;
|
||||
intel_sdvo->base.cloneable = false;
|
||||
|
||||
intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
|
||||
|
||||
@ -2156,8 +2155,7 @@ intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, int device)
|
||||
intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1;
|
||||
}
|
||||
|
||||
intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
|
||||
(1 << INTEL_ANALOG_CLONE_BIT));
|
||||
intel_sdvo->base.cloneable = true;
|
||||
|
||||
intel_sdvo_connector_init(intel_sdvo_connector,
|
||||
intel_sdvo);
|
||||
@ -2189,8 +2187,10 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
|
||||
intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1;
|
||||
}
|
||||
|
||||
intel_sdvo->base.clone_mask = ((1 << INTEL_ANALOG_CLONE_BIT) |
|
||||
(1 << INTEL_SDVO_LVDS_CLONE_BIT));
|
||||
/* SDVO LVDS is cloneable because the SDVO encoder does the upscaling,
|
||||
* as opposed to native LVDS, where we upscale with the panel-fitter
|
||||
* (and hence only the native LVDS resolution could be cloned). */
|
||||
intel_sdvo->base.cloneable = true;
|
||||
|
||||
intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
|
||||
if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector))
|
||||
|
@ -1622,7 +1622,7 @@ intel_tv_init(struct drm_device *dev)
|
||||
intel_connector_attach_encoder(intel_connector, intel_encoder);
|
||||
intel_encoder->type = INTEL_OUTPUT_TVOUT;
|
||||
intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
|
||||
intel_encoder->clone_mask = (1 << INTEL_TV_CLONE_BIT);
|
||||
intel_encoder->cloneable = false;
|
||||
intel_encoder->base.possible_crtcs = ((1 << 0) | (1 << 1));
|
||||
intel_encoder->base.possible_clones = (1 << INTEL_OUTPUT_TVOUT);
|
||||
intel_tv->type = DRM_MODE_CONNECTOR_Unknown;
|
||||
|
@ -203,6 +203,9 @@ typedef struct _drm_i915_sarea {
|
||||
#define DRM_I915_GEM_WAIT 0x2c
|
||||
#define DRM_I915_GEM_CONTEXT_CREATE 0x2d
|
||||
#define DRM_I915_GEM_CONTEXT_DESTROY 0x2e
|
||||
#define DRM_I915_GEM_SET_CACHEING 0x2f
|
||||
#define DRM_I915_GEM_GET_CACHEING 0x30
|
||||
#define DRM_I915_REG_READ 0x31
|
||||
|
||||
#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
|
||||
#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
|
||||
@ -227,6 +230,8 @@ typedef struct _drm_i915_sarea {
|
||||
#define DRM_IOCTL_I915_GEM_PIN DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin)
|
||||
#define DRM_IOCTL_I915_GEM_UNPIN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin)
|
||||
#define DRM_IOCTL_I915_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy)
|
||||
#define DRM_IOCTL_I915_GEM_SET_CACHEING DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_SET_CACHEING, struct drm_i915_gem_cacheing)
|
||||
#define DRM_IOCTL_I915_GEM_GET_CACHEING DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_GET_CACHEING, struct drm_i915_gem_cacheing)
|
||||
#define DRM_IOCTL_I915_GEM_THROTTLE DRM_IO ( DRM_COMMAND_BASE + DRM_I915_GEM_THROTTLE)
|
||||
#define DRM_IOCTL_I915_GEM_ENTERVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT)
|
||||
#define DRM_IOCTL_I915_GEM_LEAVEVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT)
|
||||
@ -249,6 +254,7 @@ typedef struct _drm_i915_sarea {
|
||||
#define DRM_IOCTL_I915_GEM_WAIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT, struct drm_i915_gem_wait)
|
||||
#define DRM_IOCTL_I915_GEM_CONTEXT_CREATE DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create)
|
||||
#define DRM_IOCTL_I915_GEM_CONTEXT_DESTROY DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_DESTROY, struct drm_i915_gem_context_destroy)
|
||||
#define DRM_IOCTL_I915_REG_READ DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_REG_READ, struct drm_i915_reg_read)
|
||||
|
||||
/* Allow drivers to submit batchbuffers directly to hardware, relying
|
||||
* on the security mechanisms provided by hardware.
|
||||
@ -305,6 +311,7 @@ typedef struct drm_i915_irq_wait {
|
||||
#define I915_PARAM_HAS_LLC 17
|
||||
#define I915_PARAM_HAS_ALIASING_PPGTT 18
|
||||
#define I915_PARAM_HAS_WAIT_TIMEOUT 19
|
||||
#define I915_PARAM_HAS_SEMAPHORES 20
|
||||
|
||||
typedef struct drm_i915_getparam {
|
||||
int param;
|
||||
@ -698,10 +705,31 @@ struct drm_i915_gem_busy {
|
||||
/** Handle of the buffer to check for busy */
|
||||
__u32 handle;
|
||||
|
||||
/** Return busy status (1 if busy, 0 if idle) */
|
||||
/** Return busy status (1 if busy, 0 if idle).
|
||||
* The high word is used to indicate on which rings the object
|
||||
* currently resides:
|
||||
* 16:31 - busy (r or r/w) rings (16 render, 17 bsd, 18 blt, etc)
|
||||
*/
|
||||
__u32 busy;
|
||||
};
|
||||
|
||||
#define I915_CACHEING_NONE 0
|
||||
#define I915_CACHEING_CACHED 1
|
||||
|
||||
struct drm_i915_gem_cacheing {
|
||||
/**
|
||||
* Handle of the buffer to set/get the cacheing level of. */
|
||||
__u32 handle;
|
||||
|
||||
/**
|
||||
* Cacheing level to apply or return value
|
||||
*
|
||||
* bits0-15 are for generic cacheing control (i.e. the above defined
|
||||
* values). bits16-31 are reserved for platform-specific variations
|
||||
* (e.g. l3$ caching on gen7). */
|
||||
__u32 cacheing;
|
||||
};
|
||||
|
||||
#define I915_TILING_NONE 0
|
||||
#define I915_TILING_X 1
|
||||
#define I915_TILING_Y 2
|
||||
@ -918,4 +946,8 @@ struct drm_i915_gem_context_destroy {
|
||||
__u32 pad;
|
||||
};
|
||||
|
||||
struct drm_i915_reg_read {
|
||||
__u64 offset;
|
||||
__u64 val; /* Return value */
|
||||
};
|
||||
#endif /* _I915_DRM_H_ */
|
||||
|
Loading…
Reference in New Issue
Block a user