Merge branch 'drm-fixes' of git://people.freedesktop.org/~airlied/linux

Pull drm fixes from Dave Airlie:
 "Radeon and intel fixes mostly, one fix to the mgag200 driver to not
  hang on certain server variants."

* 'drm-fixes' of git://people.freedesktop.org/~airlied/linux: (32 commits)
  drm/radeon: fix typo in function header comment
  drm/radeon/kms: implement timestamp userspace query (v2)
  drm/radeon/kms: add MSAA texture support for r600-evergreen
  drm/radeon/kms: reorder code in r600_check_texture_resource
  drm/radeon: fence virtual address and free it once idle v4
  drm/radeon: fix some missing parens in asic macros
  drm/radeon: add some new SI pci ids
  drm/radeon: fix ordering in pll picking on dce4+
  drm/radeon: do not reenable crtc after moving vram start address
  drm/radeon: fix bank tiling parameters on cayman
  drm/radeon: fix bank tiling parameters on evergreen
  drm/radeon: fix bank tiling parameters on SI
  drm/radeon: properly handle crtc powergating
  drm/radeon: properly handle SS overrides on TN (v2)
  drm/radeon/dce4+: set a more reasonable cursor watermark
  drm/radeon: fix handling for ddc type 5 on combios
  drm/mgag200: fix G200ER pll picking algorithm
  drm/edid: Fix potential memory leak in edid_load()
  drm/udl: Use ERR_CAST inlined function instead of ERR_PTR(PTR_ERR(.. [1]
  drm/radeon/kms: allow "invalid" DB formats as a means to disable DB
  ...
This commit is contained in:
Linus Torvalds 2012-08-14 07:52:41 +03:00
commit 930a93a5ef
44 changed files with 516 additions and 239 deletions

View File

@ -239,16 +239,45 @@
#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT2_IG 0x016A
#define PCI_DEVICE_ID_INTEL_VALLEYVIEW_HB 0x0F00 /* VLV1 */
#define PCI_DEVICE_ID_INTEL_VALLEYVIEW_IG 0x0F30
#define PCI_DEVICE_ID_INTEL_HASWELL_HB 0x0400 /* Desktop */
#define PCI_DEVICE_ID_INTEL_HASWELL_HB 0x0400 /* Desktop */
#define PCI_DEVICE_ID_INTEL_HASWELL_D_GT1_IG 0x0402
#define PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_IG 0x0412
#define PCI_DEVICE_ID_INTEL_HASWELL_M_HB 0x0404 /* Mobile */
#define PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_PLUS_IG 0x0422
#define PCI_DEVICE_ID_INTEL_HASWELL_M_HB 0x0404 /* Mobile */
#define PCI_DEVICE_ID_INTEL_HASWELL_M_GT1_IG 0x0406
#define PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_IG 0x0416
#define PCI_DEVICE_ID_INTEL_HASWELL_S_HB 0x0408 /* Server */
#define PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_PLUS_IG 0x0426
#define PCI_DEVICE_ID_INTEL_HASWELL_S_HB 0x0408 /* Server */
#define PCI_DEVICE_ID_INTEL_HASWELL_S_GT1_IG 0x040a
#define PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_IG 0x041a
#define PCI_DEVICE_ID_INTEL_HASWELL_SDV 0x0c16 /* SDV */
#define PCI_DEVICE_ID_INTEL_HASWELL_E_HB 0x0c04
#define PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_PLUS_IG 0x042a
#define PCI_DEVICE_ID_INTEL_HASWELL_E_HB 0x0c04
#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT1_IG 0x0C02
#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_IG 0x0C12
#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_PLUS_IG 0x0C22
#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT1_IG 0x0C06
#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_IG 0x0C16
#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_PLUS_IG 0x0C26
#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT1_IG 0x0C0A
#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_IG 0x0C1A
#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_PLUS_IG 0x0C2A
#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT1_IG 0x0A02
#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_IG 0x0A12
#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_PLUS_IG 0x0A22
#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT1_IG 0x0A06
#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_IG 0x0A16
#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_PLUS_IG 0x0A26
#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT1_IG 0x0A0A
#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_IG 0x0A1A
#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_PLUS_IG 0x0A2A
#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT1_IG 0x0D12
#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_IG 0x0D22
#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_PLUS_IG 0x0D32
#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT1_IG 0x0D16
#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_IG 0x0D26
#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_PLUS_IG 0x0D36
#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT1_IG 0x0D1A
#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_IG 0x0D2A
#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_PLUS_IG 0x0D3A
#endif

View File

@ -1502,15 +1502,73 @@ static const struct intel_gtt_driver_description {
"Haswell", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_IG,
"Haswell", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_PLUS_IG,
"Haswell", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_M_GT1_IG,
"Haswell", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_IG,
"Haswell", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_PLUS_IG,
"Haswell", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_S_GT1_IG,
"Haswell", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_IG,
"Haswell", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_SDV,
{ PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_PLUS_IG,
"Haswell", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT1_IG,
"Haswell", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_IG,
"Haswell", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_PLUS_IG,
"Haswell", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT1_IG,
"Haswell", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_IG,
"Haswell", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_PLUS_IG,
"Haswell", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT1_IG,
"Haswell", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_IG,
"Haswell", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_PLUS_IG,
"Haswell", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT1_IG,
"Haswell", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_IG,
"Haswell", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_PLUS_IG,
"Haswell", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT1_IG,
"Haswell", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_IG,
"Haswell", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_PLUS_IG,
"Haswell", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT1_IG,
"Haswell", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_IG,
"Haswell", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_PLUS_IG,
"Haswell", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT1_IG,
"Haswell", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_IG,
"Haswell", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_PLUS_IG,
"Haswell", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT1_IG,
"Haswell", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_IG,
"Haswell", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_PLUS_IG,
"Haswell", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT1_IG,
"Haswell", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_IG,
"Haswell", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_PLUS_IG,
"Haswell", &sandybridge_gtt_driver },
{ 0, NULL, NULL }
};

View File

@ -119,7 +119,7 @@ static int edid_load(struct drm_connector *connector, char *name,
{
const struct firmware *fw;
struct platform_device *pdev;
u8 *fwdata = NULL, *edid;
u8 *fwdata = NULL, *edid, *new_edid;
int fwsize, expected;
int builtin = 0, err = 0;
int i, valid_extensions = 0;
@ -195,12 +195,14 @@ static int edid_load(struct drm_connector *connector, char *name,
"\"%s\" for connector \"%s\"\n", valid_extensions,
edid[0x7e], name, connector_name);
edid[0x7e] = valid_extensions;
edid = krealloc(edid, (valid_extensions + 1) * EDID_LENGTH,
new_edid = krealloc(edid, (valid_extensions + 1) * EDID_LENGTH,
GFP_KERNEL);
if (edid == NULL) {
if (new_edid == NULL) {
err = -ENOMEM;
kfree(edid);
goto relfw_out;
}
edid = new_edid;
}
connector->display_info.raw_edid = edid;

View File

@ -346,11 +346,40 @@ static const struct pci_device_id pciidlist[] = { /* aka */
INTEL_VGA_DEVICE(0x016a, &intel_ivybridge_d_info), /* GT2 server */
INTEL_VGA_DEVICE(0x0402, &intel_haswell_d_info), /* GT1 desktop */
INTEL_VGA_DEVICE(0x0412, &intel_haswell_d_info), /* GT2 desktop */
INTEL_VGA_DEVICE(0x0422, &intel_haswell_d_info), /* GT2 desktop */
INTEL_VGA_DEVICE(0x040a, &intel_haswell_d_info), /* GT1 server */
INTEL_VGA_DEVICE(0x041a, &intel_haswell_d_info), /* GT2 server */
INTEL_VGA_DEVICE(0x042a, &intel_haswell_d_info), /* GT2 server */
INTEL_VGA_DEVICE(0x0406, &intel_haswell_m_info), /* GT1 mobile */
INTEL_VGA_DEVICE(0x0416, &intel_haswell_m_info), /* GT2 mobile */
INTEL_VGA_DEVICE(0x0c16, &intel_haswell_d_info), /* SDV */
INTEL_VGA_DEVICE(0x0426, &intel_haswell_m_info), /* GT2 mobile */
INTEL_VGA_DEVICE(0x0C02, &intel_haswell_d_info), /* SDV GT1 desktop */
INTEL_VGA_DEVICE(0x0C12, &intel_haswell_d_info), /* SDV GT2 desktop */
INTEL_VGA_DEVICE(0x0C22, &intel_haswell_d_info), /* SDV GT2 desktop */
INTEL_VGA_DEVICE(0x0C0A, &intel_haswell_d_info), /* SDV GT1 server */
INTEL_VGA_DEVICE(0x0C1A, &intel_haswell_d_info), /* SDV GT2 server */
INTEL_VGA_DEVICE(0x0C2A, &intel_haswell_d_info), /* SDV GT2 server */
INTEL_VGA_DEVICE(0x0C06, &intel_haswell_m_info), /* SDV GT1 mobile */
INTEL_VGA_DEVICE(0x0C16, &intel_haswell_m_info), /* SDV GT2 mobile */
INTEL_VGA_DEVICE(0x0C26, &intel_haswell_m_info), /* SDV GT2 mobile */
INTEL_VGA_DEVICE(0x0A02, &intel_haswell_d_info), /* ULT GT1 desktop */
INTEL_VGA_DEVICE(0x0A12, &intel_haswell_d_info), /* ULT GT2 desktop */
INTEL_VGA_DEVICE(0x0A22, &intel_haswell_d_info), /* ULT GT2 desktop */
INTEL_VGA_DEVICE(0x0A0A, &intel_haswell_d_info), /* ULT GT1 server */
INTEL_VGA_DEVICE(0x0A1A, &intel_haswell_d_info), /* ULT GT2 server */
INTEL_VGA_DEVICE(0x0A2A, &intel_haswell_d_info), /* ULT GT2 server */
INTEL_VGA_DEVICE(0x0A06, &intel_haswell_m_info), /* ULT GT1 mobile */
INTEL_VGA_DEVICE(0x0A16, &intel_haswell_m_info), /* ULT GT2 mobile */
INTEL_VGA_DEVICE(0x0A26, &intel_haswell_m_info), /* ULT GT2 mobile */
INTEL_VGA_DEVICE(0x0D12, &intel_haswell_d_info), /* CRW GT1 desktop */
INTEL_VGA_DEVICE(0x0D22, &intel_haswell_d_info), /* CRW GT2 desktop */
INTEL_VGA_DEVICE(0x0D32, &intel_haswell_d_info), /* CRW GT2 desktop */
INTEL_VGA_DEVICE(0x0D1A, &intel_haswell_d_info), /* CRW GT1 server */
INTEL_VGA_DEVICE(0x0D2A, &intel_haswell_d_info), /* CRW GT2 server */
INTEL_VGA_DEVICE(0x0D3A, &intel_haswell_d_info), /* CRW GT2 server */
INTEL_VGA_DEVICE(0x0D16, &intel_haswell_m_info), /* CRW GT1 mobile */
INTEL_VGA_DEVICE(0x0D26, &intel_haswell_m_info), /* CRW GT2 mobile */
INTEL_VGA_DEVICE(0x0D36, &intel_haswell_m_info), /* CRW GT2 mobile */
INTEL_VGA_DEVICE(0x0f30, &intel_valleyview_m_info),
INTEL_VGA_DEVICE(0x0157, &intel_valleyview_m_info),
INTEL_VGA_DEVICE(0x0155, &intel_valleyview_d_info),

View File

@ -451,7 +451,6 @@ int i915_switch_context(struct intel_ring_buffer *ring,
struct drm_i915_file_private *file_priv = NULL;
struct i915_hw_context *to;
struct drm_i915_gem_object *from_obj = ring->last_context_obj;
int ret;
if (dev_priv->hw_contexts_disabled)
return 0;

View File

@ -291,6 +291,16 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
target_i915_obj = to_intel_bo(target_obj);
target_offset = target_i915_obj->gtt_offset;
/* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
* pipe_control writes because the gpu doesn't properly redirect them
* through the ppgtt for non_secure batchbuffers. */
if (unlikely(IS_GEN6(dev) &&
reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
!target_i915_obj->has_global_gtt_mapping)) {
i915_gem_gtt_bind_object(target_i915_obj,
target_i915_obj->cache_level);
}
/* The target buffer should have appeared before us in the
* exec_object list, so it should have a GTT space bound by now.
*/
@ -399,16 +409,6 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
io_mapping_unmap_atomic(reloc_page);
}
/* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
* pipe_control writes because the gpu doesn't properly redirect them
* through the ppgtt for non_secure batchbuffers. */
if (unlikely(IS_GEN6(dev) &&
reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
!target_i915_obj->has_global_gtt_mapping)) {
i915_gem_gtt_bind_object(target_i915_obj,
target_i915_obj->cache_level);
}
/* and update the user's relocation entry */
reloc->presumed_offset = target_offset;

View File

@ -361,7 +361,8 @@ int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
if (dev_priv->mm.gtt->needs_dmar)
/* don't map imported dma buf objects */
if (dev_priv->mm.gtt->needs_dmar && !obj->sg_table)
return intel_gtt_map_memory(obj->pages,
obj->base.size >> PAGE_SHIFT,
&obj->sg_list,

View File

@ -32,6 +32,7 @@
#include "intel_drv.h"
#include "i915_drv.h"
#ifdef CONFIG_PM
static u32 calc_residency(struct drm_device *dev, const u32 reg)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@ -224,3 +225,14 @@ void i915_teardown_sysfs(struct drm_device *dev)
device_remove_bin_file(&dev->primary->kdev, &dpf_attrs);
sysfs_unmerge_group(&dev->primary->kdev.kobj, &rc6_attr_group);
}
#else
void i915_setup_sysfs(struct drm_device *dev)
{
return;
}
void i915_teardown_sysfs(struct drm_device *dev)
{
return;
}
#endif /* CONFIG_PM */

View File

@ -869,6 +869,7 @@ intel_vlv_find_best_pll(const intel_limit_t *limit, struct drm_crtc *crtc,
unsigned long bestppm, ppm, absppm;
int dotclk, flag;
flag = 0;
dotclk = target * 1000;
bestppm = 1000000;
ppm = absppm = 0;

View File

@ -46,15 +46,16 @@
})
#define wait_for_atomic_us(COND, US) ({ \
int i, ret__ = -ETIMEDOUT; \
for (i = 0; i < (US); i++) { \
if ((COND)) { \
ret__ = 0; \
break; \
} \
udelay(1); \
} \
ret__; \
unsigned long timeout__ = jiffies + usecs_to_jiffies(US); \
int ret__ = 0; \
while (!(COND)) { \
if (time_after(jiffies, timeout__)) { \
ret__ = -ETIMEDOUT; \
break; \
} \
cpu_relax(); \
} \
ret__; \
})
#define wait_for(COND, MS) _wait_for(COND, MS, 1)
@ -380,7 +381,6 @@ extern void intel_pch_panel_fitting(struct drm_device *dev,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
extern u32 intel_panel_get_max_backlight(struct drm_device *dev);
extern u32 intel_panel_get_backlight(struct drm_device *dev);
extern void intel_panel_set_backlight(struct drm_device *dev, u32 level);
extern int intel_panel_setup_backlight(struct drm_device *dev);
extern void intel_panel_enable_backlight(struct drm_device *dev,

View File

@ -540,9 +540,6 @@ void intel_teardown_gmbus(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
if (dev_priv->gmbus == NULL)
return;
for (i = 0; i < GMBUS_NUM_PORTS; i++) {
struct intel_gmbus *bus = &dev_priv->gmbus[i];
i2c_del_adapter(&bus->adapter);

View File

@ -213,7 +213,7 @@ static u32 intel_panel_compute_brightness(struct drm_device *dev, u32 val)
return val;
}
u32 intel_panel_get_backlight(struct drm_device *dev)
static u32 intel_panel_get_backlight(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 val;

View File

@ -3963,6 +3963,7 @@ static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
DRM_ERROR("Force wake wait timed out\n");
I915_WRITE_NOTRACE(FORCEWAKE, 1);
POSTING_READ(FORCEWAKE);
if (wait_for_atomic_us((I915_READ_NOTRACE(forcewake_ack) & 1), 500))
DRM_ERROR("Force wake wait timed out\n");
@ -3983,6 +3984,7 @@ static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
DRM_ERROR("Force wake wait timed out\n");
I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(1));
POSTING_READ(FORCEWAKE_MT);
if (wait_for_atomic_us((I915_READ_NOTRACE(forcewake_ack) & 1), 500))
DRM_ERROR("Force wake wait timed out\n");
@ -4018,14 +4020,14 @@ void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
{
I915_WRITE_NOTRACE(FORCEWAKE, 0);
/* The below doubles as a POSTING_READ */
POSTING_READ(FORCEWAKE);
gen6_gt_check_fifodbg(dev_priv);
}
static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
{
I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(1));
/* The below doubles as a POSTING_READ */
POSTING_READ(FORCEWAKE_MT);
gen6_gt_check_fifodbg(dev_priv);
}

View File

@ -289,8 +289,6 @@ static int init_ring_common(struct intel_ring_buffer *ring)
I915_WRITE_HEAD(ring, 0);
ring->write_tail(ring, 0);
/* Initialize the ring. */
I915_WRITE_START(ring, obj->gtt_offset);
head = I915_READ_HEAD(ring) & HEAD_ADDR;
/* G45 ring initialization fails to reset head to zero */
@ -316,6 +314,11 @@ static int init_ring_common(struct intel_ring_buffer *ring)
}
}
/* Initialize the ring. This must happen _after_ we've cleared the ring
* registers with the above sequence (the readback of the HEAD registers
* also enforces ordering), otherwise the hw might lose the new ring
* register values. */
I915_WRITE_START(ring, obj->gtt_offset);
I915_WRITE_CTL(ring,
((ring->size - PAGE_SIZE) & RING_NR_PAGES)
| RING_VALID);

View File

@ -444,13 +444,16 @@ static bool intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd,
struct i2c_msg *msgs;
int i, ret = true;
/* Would be simpler to allocate both in one go ? */
buf = (u8 *)kzalloc(args_len * 2 + 2, GFP_KERNEL);
if (!buf)
return false;
msgs = kcalloc(args_len + 3, sizeof(*msgs), GFP_KERNEL);
if (!msgs)
if (!msgs) {
kfree(buf);
return false;
}
intel_sdvo_debug_write(intel_sdvo, cmd, args, args_len);

View File

@ -468,10 +468,11 @@ static int mga_g200er_set_plls(struct mga_device *mdev, long clock)
{
unsigned int vcomax, vcomin, pllreffreq;
unsigned int delta, tmpdelta;
unsigned int testr, testn, testm, testo;
int testr, testn, testm, testo;
unsigned int p, m, n;
unsigned int computed;
unsigned int computed, vco;
int tmp;
const unsigned int m_div_val[] = { 1, 2, 4, 8 };
m = n = p = 0;
vcomax = 1488000;
@ -490,12 +491,13 @@ static int mga_g200er_set_plls(struct mga_device *mdev, long clock)
if (delta == 0)
break;
for (testo = 5; testo < 33; testo++) {
computed = pllreffreq * (testn + 1) /
vco = pllreffreq * (testn + 1) /
(testr + 1);
if (computed < vcomin)
if (vco < vcomin)
continue;
if (computed > vcomax)
if (vco > vcomax)
continue;
computed = vco / (m_div_val[testm] * (testo + 1));
if (computed > clock)
tmpdelta = computed - clock;
else

View File

@ -259,7 +259,7 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
/* adjust pm to dpms changes BEFORE enabling crtcs */
radeon_pm_compute_clocks(rdev);
/* disable crtc pair power gating before programming */
if (ASIC_IS_DCE6(rdev))
if (ASIC_IS_DCE6(rdev) && !radeon_crtc->in_mode_set)
atombios_powergate_crtc(crtc, ATOM_DISABLE);
atombios_enable_crtc(crtc, ATOM_ENABLE);
if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev))
@ -279,7 +279,7 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
atombios_enable_crtc(crtc, ATOM_DISABLE);
radeon_crtc->enabled = false;
/* power gating is per-pair */
if (ASIC_IS_DCE6(rdev)) {
if (ASIC_IS_DCE6(rdev) && !radeon_crtc->in_mode_set) {
struct drm_crtc *other_crtc;
struct radeon_crtc *other_radeon_crtc;
list_for_each_entry(other_crtc, &rdev->ddev->mode_config.crtc_list, head) {
@ -1531,12 +1531,12 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
* crtc virtual pixel clock.
*/
if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_encoder))) {
if (ASIC_IS_DCE5(rdev))
return ATOM_DCPLL;
if (rdev->clock.dp_extclk)
return ATOM_PPLL_INVALID;
else if (ASIC_IS_DCE6(rdev))
return ATOM_PPLL0;
else if (rdev->clock.dp_extclk)
return ATOM_PPLL_INVALID;
else if (ASIC_IS_DCE5(rdev))
return ATOM_DCPLL;
}
}
}
@ -1635,18 +1635,28 @@ static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc,
static void atombios_crtc_prepare(struct drm_crtc *crtc)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
radeon_crtc->in_mode_set = true;
/* pick pll */
radeon_crtc->pll_id = radeon_atom_pick_pll(crtc);
/* disable crtc pair power gating before programming */
if (ASIC_IS_DCE6(rdev))
atombios_powergate_crtc(crtc, ATOM_DISABLE);
atombios_lock_crtc(crtc, ATOM_ENABLE);
atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
}
static void atombios_crtc_commit(struct drm_crtc *crtc)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
atombios_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
atombios_lock_crtc(crtc, ATOM_DISABLE);
radeon_crtc->in_mode_set = false;
}
static void atombios_crtc_disable(struct drm_crtc *crtc)

View File

@ -1229,24 +1229,8 @@ void evergreen_agp_enable(struct radeon_device *rdev)
void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save)
{
save->vga_control[0] = RREG32(D1VGA_CONTROL);
save->vga_control[1] = RREG32(D2VGA_CONTROL);
save->vga_render_control = RREG32(VGA_RENDER_CONTROL);
save->vga_hdp_control = RREG32(VGA_HDP_CONTROL);
save->crtc_control[0] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET);
save->crtc_control[1] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
if (rdev->num_crtc >= 4) {
save->vga_control[2] = RREG32(EVERGREEN_D3VGA_CONTROL);
save->vga_control[3] = RREG32(EVERGREEN_D4VGA_CONTROL);
save->crtc_control[2] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET);
save->crtc_control[3] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
}
if (rdev->num_crtc >= 6) {
save->vga_control[4] = RREG32(EVERGREEN_D5VGA_CONTROL);
save->vga_control[5] = RREG32(EVERGREEN_D6VGA_CONTROL);
save->crtc_control[4] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET);
save->crtc_control[5] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
}
/* Stop all video */
WREG32(VGA_RENDER_CONTROL, 0);
@ -1357,47 +1341,6 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s
/* Unlock host access */
WREG32(VGA_HDP_CONTROL, save->vga_hdp_control);
mdelay(1);
/* Restore video state */
WREG32(D1VGA_CONTROL, save->vga_control[0]);
WREG32(D2VGA_CONTROL, save->vga_control[1]);
if (rdev->num_crtc >= 4) {
WREG32(EVERGREEN_D3VGA_CONTROL, save->vga_control[2]);
WREG32(EVERGREEN_D4VGA_CONTROL, save->vga_control[3]);
}
if (rdev->num_crtc >= 6) {
WREG32(EVERGREEN_D5VGA_CONTROL, save->vga_control[4]);
WREG32(EVERGREEN_D6VGA_CONTROL, save->vga_control[5]);
}
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 1);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 1);
if (rdev->num_crtc >= 4) {
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1);
}
if (rdev->num_crtc >= 6) {
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1);
}
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, save->crtc_control[0]);
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, save->crtc_control[1]);
if (rdev->num_crtc >= 4) {
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, save->crtc_control[2]);
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, save->crtc_control[3]);
}
if (rdev->num_crtc >= 6) {
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, save->crtc_control[4]);
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, save->crtc_control[5]);
}
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
if (rdev->num_crtc >= 4) {
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
}
if (rdev->num_crtc >= 6) {
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
}
WREG32(VGA_RENDER_CONTROL, save->vga_render_control);
}
@ -1986,10 +1929,18 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
if (rdev->flags & RADEON_IS_IGP)
rdev->config.evergreen.tile_config |= 1 << 4;
else {
if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT)
rdev->config.evergreen.tile_config |= 1 << 4;
else
switch ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) {
case 0: /* four banks */
rdev->config.evergreen.tile_config |= 0 << 4;
break;
case 1: /* eight banks */
rdev->config.evergreen.tile_config |= 1 << 4;
break;
case 2: /* sixteen banks */
default:
rdev->config.evergreen.tile_config |= 2 << 4;
break;
}
}
rdev->config.evergreen.tile_config |= 0 << 8;
rdev->config.evergreen.tile_config |=

View File

@ -788,6 +788,13 @@ static int evergreen_cs_track_validate_texture(struct radeon_cs_parser *p,
case V_030000_SQ_TEX_DIM_1D_ARRAY:
case V_030000_SQ_TEX_DIM_2D_ARRAY:
depth = 1;
break;
case V_030000_SQ_TEX_DIM_2D_MSAA:
case V_030000_SQ_TEX_DIM_2D_ARRAY_MSAA:
surf.nsamples = 1 << llevel;
llevel = 0;
depth = 1;
break;
case V_030000_SQ_TEX_DIM_3D:
break;
default:
@ -961,13 +968,15 @@ static int evergreen_cs_track_check(struct radeon_cs_parser *p)
if (track->db_dirty) {
/* Check stencil buffer */
if (G_028800_STENCIL_ENABLE(track->db_depth_control)) {
if (G_028044_FORMAT(track->db_s_info) != V_028044_STENCIL_INVALID &&
G_028800_STENCIL_ENABLE(track->db_depth_control)) {
r = evergreen_cs_track_validate_stencil(p);
if (r)
return r;
}
/* Check depth buffer */
if (G_028800_Z_ENABLE(track->db_depth_control)) {
if (G_028040_FORMAT(track->db_z_info) != V_028040_Z_INVALID &&
G_028800_Z_ENABLE(track->db_depth_control)) {
r = evergreen_cs_track_validate_depth(p);
if (r)
return r;

View File

@ -1277,6 +1277,8 @@
#define S_028044_FORMAT(x) (((x) & 0x1) << 0)
#define G_028044_FORMAT(x) (((x) >> 0) & 0x1)
#define C_028044_FORMAT 0xFFFFFFFE
#define V_028044_STENCIL_INVALID 0
#define V_028044_STENCIL_8 1
#define G_028044_TILE_SPLIT(x) (((x) >> 8) & 0x7)
#define DB_Z_READ_BASE 0x28048
#define DB_STENCIL_READ_BASE 0x2804c

View File

@ -574,10 +574,18 @@ static void cayman_gpu_init(struct radeon_device *rdev)
if (rdev->flags & RADEON_IS_IGP)
rdev->config.cayman.tile_config |= 1 << 4;
else {
if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT)
rdev->config.cayman.tile_config |= 1 << 4;
else
switch ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) {
case 0: /* four banks */
rdev->config.cayman.tile_config |= 0 << 4;
break;
case 1: /* eight banks */
rdev->config.cayman.tile_config |= 1 << 4;
break;
case 2: /* sixteen banks */
default:
rdev->config.cayman.tile_config |= 2 << 4;
break;
}
}
rdev->config.cayman.tile_config |=
((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8;

View File

@ -3789,3 +3789,23 @@ static void r600_pcie_gen2_enable(struct radeon_device *rdev)
WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
}
}
/**
* r600_get_gpu_clock - return GPU clock counter snapshot
*
* @rdev: radeon_device pointer
*
* Fetches a GPU clock counter snapshot (R6xx-cayman).
* Returns the 64 bit clock counter snapshot.
*/
uint64_t r600_get_gpu_clock(struct radeon_device *rdev)
{
uint64_t clock;
mutex_lock(&rdev->gpu_clock_mutex);
WREG32(RLC_CAPTURE_GPU_CLOCK_COUNT, 1);
clock = (uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_LSB) |
((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
mutex_unlock(&rdev->gpu_clock_mutex);
return clock;
}

View File

@ -764,8 +764,10 @@ static int r600_cs_track_check(struct radeon_cs_parser *p)
}
/* Check depth buffer */
if (track->db_dirty && (G_028800_STENCIL_ENABLE(track->db_depth_control) ||
G_028800_Z_ENABLE(track->db_depth_control))) {
if (track->db_dirty &&
G_028010_FORMAT(track->db_depth_info) != V_028010_DEPTH_INVALID &&
(G_028800_STENCIL_ENABLE(track->db_depth_control) ||
G_028800_Z_ENABLE(track->db_depth_control))) {
r = r600_cs_track_validate_db(p);
if (r)
return r;
@ -1557,13 +1559,14 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx,
u32 tiling_flags)
{
struct r600_cs_track *track = p->track;
u32 nfaces, llevel, blevel, w0, h0, d0;
u32 word0, word1, l0_size, mipmap_size, word2, word3;
u32 dim, nfaces, llevel, blevel, w0, h0, d0;
u32 word0, word1, l0_size, mipmap_size, word2, word3, word4, word5;
u32 height_align, pitch, pitch_align, depth_align;
u32 array, barray, larray;
u32 barray, larray;
u64 base_align;
struct array_mode_checker array_check;
u32 format;
bool is_array;
/* on legacy kernel we don't perform advanced check */
if (p->rdev == NULL)
@ -1581,12 +1584,28 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx,
word0 |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1);
}
word1 = radeon_get_ib_value(p, idx + 1);
word2 = radeon_get_ib_value(p, idx + 2) << 8;
word3 = radeon_get_ib_value(p, idx + 3) << 8;
word4 = radeon_get_ib_value(p, idx + 4);
word5 = radeon_get_ib_value(p, idx + 5);
dim = G_038000_DIM(word0);
w0 = G_038000_TEX_WIDTH(word0) + 1;
pitch = (G_038000_PITCH(word0) + 1) * 8;
h0 = G_038004_TEX_HEIGHT(word1) + 1;
d0 = G_038004_TEX_DEPTH(word1);
format = G_038004_DATA_FORMAT(word1);
blevel = G_038010_BASE_LEVEL(word4);
llevel = G_038014_LAST_LEVEL(word5);
/* pitch in texels */
array_check.array_mode = G_038000_TILE_MODE(word0);
array_check.group_size = track->group_size;
array_check.nbanks = track->nbanks;
array_check.npipes = track->npipes;
array_check.nsamples = 1;
array_check.blocksize = r600_fmt_get_blocksize(format);
nfaces = 1;
array = 0;
switch (G_038000_DIM(word0)) {
is_array = false;
switch (dim) {
case V_038000_SQ_TEX_DIM_1D:
case V_038000_SQ_TEX_DIM_2D:
case V_038000_SQ_TEX_DIM_3D:
@ -1599,29 +1618,25 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx,
break;
case V_038000_SQ_TEX_DIM_1D_ARRAY:
case V_038000_SQ_TEX_DIM_2D_ARRAY:
array = 1;
is_array = true;
break;
case V_038000_SQ_TEX_DIM_2D_MSAA:
case V_038000_SQ_TEX_DIM_2D_ARRAY_MSAA:
is_array = true;
/* fall through */
case V_038000_SQ_TEX_DIM_2D_MSAA:
array_check.nsamples = 1 << llevel;
llevel = 0;
break;
default:
dev_warn(p->dev, "this kernel doesn't support %d texture dim\n", G_038000_DIM(word0));
return -EINVAL;
}
format = G_038004_DATA_FORMAT(word1);
if (!r600_fmt_is_valid_texture(format, p->family)) {
dev_warn(p->dev, "%s:%d texture invalid format %d\n",
__func__, __LINE__, format);
return -EINVAL;
}
/* pitch in texels */
pitch = (G_038000_PITCH(word0) + 1) * 8;
array_check.array_mode = G_038000_TILE_MODE(word0);
array_check.group_size = track->group_size;
array_check.nbanks = track->nbanks;
array_check.npipes = track->npipes;
array_check.nsamples = 1;
array_check.blocksize = r600_fmt_get_blocksize(format);
if (r600_get_array_mode_alignment(&array_check,
&pitch_align, &height_align, &depth_align, &base_align)) {
dev_warn(p->dev, "%s:%d tex array mode (%d) invalid\n",
@ -1647,20 +1662,13 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx,
return -EINVAL;
}
word2 = radeon_get_ib_value(p, idx + 2) << 8;
word3 = radeon_get_ib_value(p, idx + 3) << 8;
word0 = radeon_get_ib_value(p, idx + 4);
word1 = radeon_get_ib_value(p, idx + 5);
blevel = G_038010_BASE_LEVEL(word0);
llevel = G_038014_LAST_LEVEL(word1);
if (blevel > llevel) {
dev_warn(p->dev, "texture blevel %d > llevel %d\n",
blevel, llevel);
}
if (array == 1) {
barray = G_038014_BASE_ARRAY(word1);
larray = G_038014_LAST_ARRAY(word1);
if (is_array) {
barray = G_038014_BASE_ARRAY(word5);
larray = G_038014_LAST_ARRAY(word5);
nfaces = larray - barray + 1;
}
@ -1677,7 +1685,6 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx,
return -EINVAL;
}
/* using get ib will give us the offset into the mipmap bo */
word3 = radeon_get_ib_value(p, idx + 3) << 8;
if ((mipmap_size + word3) > radeon_bo_size(mipmap)) {
/*dev_warn(p->dev, "mipmap bo too small (%d %d %d %d %d %d -> %d have %ld)\n",
w0, h0, format, blevel, nlevels, word3, mipmap_size, radeon_bo_size(texture));*/

View File

@ -602,6 +602,9 @@
#define RLC_HB_WPTR 0x3f1c
#define RLC_HB_WPTR_LSB_ADDR 0x3f14
#define RLC_HB_WPTR_MSB_ADDR 0x3f18
#define RLC_GPU_CLOCK_COUNT_LSB 0x3f38
#define RLC_GPU_CLOCK_COUNT_MSB 0x3f3c
#define RLC_CAPTURE_GPU_CLOCK_COUNT 0x3f40
#define RLC_MC_CNTL 0x3f44
#define RLC_UCODE_CNTL 0x3f48
#define RLC_UCODE_ADDR 0x3f2c

View File

@ -300,6 +300,7 @@ struct radeon_bo_va {
uint64_t soffset;
uint64_t eoffset;
uint32_t flags;
struct radeon_fence *fence;
bool valid;
};
@ -1533,6 +1534,7 @@ struct radeon_device {
unsigned debugfs_count;
/* virtual memory */
struct radeon_vm_manager vm_manager;
struct mutex gpu_clock_mutex;
};
int radeon_device_init(struct radeon_device *rdev,
@ -1733,11 +1735,11 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
#define radeon_pm_finish(rdev) (rdev)->asic->pm.finish((rdev))
#define radeon_pm_init_profile(rdev) (rdev)->asic->pm.init_profile((rdev))
#define radeon_pm_get_dynpm_state(rdev) (rdev)->asic->pm.get_dynpm_state((rdev))
#define radeon_pre_page_flip(rdev, crtc) rdev->asic->pflip.pre_page_flip((rdev), (crtc))
#define radeon_page_flip(rdev, crtc, base) rdev->asic->pflip.page_flip((rdev), (crtc), (base))
#define radeon_post_page_flip(rdev, crtc) rdev->asic->pflip.post_page_flip((rdev), (crtc))
#define radeon_wait_for_vblank(rdev, crtc) rdev->asic->display.wait_for_vblank((rdev), (crtc))
#define radeon_mc_wait_for_idle(rdev) rdev->asic->mc_wait_for_idle((rdev))
#define radeon_pre_page_flip(rdev, crtc) (rdev)->asic->pflip.pre_page_flip((rdev), (crtc))
#define radeon_page_flip(rdev, crtc, base) (rdev)->asic->pflip.page_flip((rdev), (crtc), (base))
#define radeon_post_page_flip(rdev, crtc) (rdev)->asic->pflip.post_page_flip((rdev), (crtc))
#define radeon_wait_for_vblank(rdev, crtc) (rdev)->asic->display.wait_for_vblank((rdev), (crtc))
#define radeon_mc_wait_for_idle(rdev) (rdev)->asic->mc_wait_for_idle((rdev))
/* Common functions */
/* AGP */

View File

@ -255,13 +255,10 @@ extern int rs690_mc_wait_for_idle(struct radeon_device *rdev);
* rv515
*/
struct rv515_mc_save {
u32 d1vga_control;
u32 d2vga_control;
u32 vga_render_control;
u32 vga_hdp_control;
u32 d1crtc_control;
u32 d2crtc_control;
};
int rv515_init(struct radeon_device *rdev);
void rv515_fini(struct radeon_device *rdev);
uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg);
@ -371,6 +368,7 @@ void r600_kms_blit_copy(struct radeon_device *rdev,
unsigned num_gpu_pages,
struct radeon_sa_bo *vb);
int r600_mc_wait_for_idle(struct radeon_device *rdev);
uint64_t r600_get_gpu_clock(struct radeon_device *rdev);
/*
* rv770,rv730,rv710,rv740
@ -389,11 +387,10 @@ void r700_cp_fini(struct radeon_device *rdev);
* evergreen
*/
struct evergreen_mc_save {
u32 vga_control[6];
u32 vga_render_control;
u32 vga_hdp_control;
u32 crtc_control[6];
};
void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev);
int evergreen_init(struct radeon_device *rdev);
void evergreen_fini(struct radeon_device *rdev);
@ -472,5 +469,6 @@ int si_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm, int id);
void si_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm);
void si_vm_tlb_flush(struct radeon_device *rdev, struct radeon_vm *vm);
int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
uint64_t si_get_gpu_clock(struct radeon_device *rdev);
#endif

View File

@ -1263,6 +1263,8 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
union igp_info {
struct _ATOM_INTEGRATED_SYSTEM_INFO info;
struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2;
struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6;
struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7;
};
bool radeon_atombios_sideport_present(struct radeon_device *rdev)
@ -1390,27 +1392,50 @@ static void radeon_atombios_get_igp_ss_overrides(struct radeon_device *rdev,
struct radeon_mode_info *mode_info = &rdev->mode_info;
int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
u16 data_offset, size;
struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 *igp_info;
union igp_info *igp_info;
u8 frev, crev;
u16 percentage = 0, rate = 0;
/* get any igp specific overrides */
if (atom_parse_data_header(mode_info->atom_context, index, &size,
&frev, &crev, &data_offset)) {
igp_info = (struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 *)
igp_info = (union igp_info *)
(mode_info->atom_context->bios + data_offset);
switch (id) {
case ASIC_INTERNAL_SS_ON_TMDS:
percentage = le16_to_cpu(igp_info->usDVISSPercentage);
rate = le16_to_cpu(igp_info->usDVISSpreadRateIn10Hz);
switch (crev) {
case 6:
switch (id) {
case ASIC_INTERNAL_SS_ON_TMDS:
percentage = le16_to_cpu(igp_info->info_6.usDVISSPercentage);
rate = le16_to_cpu(igp_info->info_6.usDVISSpreadRateIn10Hz);
break;
case ASIC_INTERNAL_SS_ON_HDMI:
percentage = le16_to_cpu(igp_info->info_6.usHDMISSPercentage);
rate = le16_to_cpu(igp_info->info_6.usHDMISSpreadRateIn10Hz);
break;
case ASIC_INTERNAL_SS_ON_LVDS:
percentage = le16_to_cpu(igp_info->info_6.usLvdsSSPercentage);
rate = le16_to_cpu(igp_info->info_6.usLvdsSSpreadRateIn10Hz);
break;
}
break;
case ASIC_INTERNAL_SS_ON_HDMI:
percentage = le16_to_cpu(igp_info->usHDMISSPercentage);
rate = le16_to_cpu(igp_info->usHDMISSpreadRateIn10Hz);
case 7:
switch (id) {
case ASIC_INTERNAL_SS_ON_TMDS:
percentage = le16_to_cpu(igp_info->info_7.usDVISSPercentage);
rate = le16_to_cpu(igp_info->info_7.usDVISSpreadRateIn10Hz);
break;
case ASIC_INTERNAL_SS_ON_HDMI:
percentage = le16_to_cpu(igp_info->info_7.usHDMISSPercentage);
rate = le16_to_cpu(igp_info->info_7.usHDMISSpreadRateIn10Hz);
break;
case ASIC_INTERNAL_SS_ON_LVDS:
percentage = le16_to_cpu(igp_info->info_7.usLvdsSSPercentage);
rate = le16_to_cpu(igp_info->info_7.usLvdsSSpreadRateIn10Hz);
break;
}
break;
case ASIC_INTERNAL_SS_ON_LVDS:
percentage = le16_to_cpu(igp_info->usLvdsSSPercentage);
rate = le16_to_cpu(igp_info->usLvdsSSpreadRateIn10Hz);
default:
DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev);
break;
}
if (percentage)

View File

@ -719,6 +719,34 @@ static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rde
return i2c;
}
static struct radeon_i2c_bus_rec radeon_combios_get_i2c_info_from_table(struct radeon_device *rdev)
{
struct drm_device *dev = rdev->ddev;
struct radeon_i2c_bus_rec i2c;
u16 offset;
u8 id, blocks, clk, data;
int i;
i2c.valid = false;
offset = combios_get_table_offset(dev, COMBIOS_I2C_INFO_TABLE);
if (offset) {
blocks = RBIOS8(offset + 2);
for (i = 0; i < blocks; i++) {
id = RBIOS8(offset + 3 + (i * 5) + 0);
if (id == 136) {
clk = RBIOS8(offset + 3 + (i * 5) + 3);
data = RBIOS8(offset + 3 + (i * 5) + 4);
/* gpiopad */
i2c = combios_setup_i2c_bus(rdev, DDC_MONID,
(1 << clk), (1 << data));
break;
}
}
}
return i2c;
}
void radeon_combios_i2c_init(struct radeon_device *rdev)
{
struct drm_device *dev = rdev->ddev;
@ -755,30 +783,14 @@ void radeon_combios_i2c_init(struct radeon_device *rdev)
} else if (rdev->family == CHIP_RS300 ||
rdev->family == CHIP_RS400 ||
rdev->family == CHIP_RS480) {
u16 offset;
u8 id, blocks, clk, data;
int i;
/* 0x68 */
i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0);
rdev->i2c_bus[3] = radeon_i2c_create(dev, &i2c, "MONID");
offset = combios_get_table_offset(dev, COMBIOS_I2C_INFO_TABLE);
if (offset) {
blocks = RBIOS8(offset + 2);
for (i = 0; i < blocks; i++) {
id = RBIOS8(offset + 3 + (i * 5) + 0);
if (id == 136) {
clk = RBIOS8(offset + 3 + (i * 5) + 3);
data = RBIOS8(offset + 3 + (i * 5) + 4);
/* gpiopad */
i2c = combios_setup_i2c_bus(rdev, DDC_MONID,
(1 << clk), (1 << data));
rdev->i2c_bus[4] = radeon_i2c_create(dev, &i2c, "GPIOPAD_MASK");
break;
}
}
}
/* gpiopad */
i2c = radeon_combios_get_i2c_info_from_table(rdev);
if (i2c.valid)
rdev->i2c_bus[4] = radeon_i2c_create(dev, &i2c, "GPIOPAD_MASK");
} else if ((rdev->family == CHIP_R200) ||
(rdev->family >= CHIP_R300)) {
/* 0x68 */
@ -2321,7 +2333,10 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
connector = (tmp >> 12) & 0xf;
ddc_type = (tmp >> 8) & 0xf;
ddc_i2c = combios_setup_i2c_bus(rdev, ddc_type, 0, 0);
if (ddc_type == 5)
ddc_i2c = radeon_combios_get_i2c_info_from_table(rdev);
else
ddc_i2c = combios_setup_i2c_bus(rdev, ddc_type, 0, 0);
switch (connector) {
case CONNECTOR_PROPRIETARY_LEGACY:

View File

@ -278,6 +278,30 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
return 0;
}
static void radeon_bo_vm_fence_va(struct radeon_cs_parser *parser,
struct radeon_fence *fence)
{
struct radeon_fpriv *fpriv = parser->filp->driver_priv;
struct radeon_vm *vm = &fpriv->vm;
struct radeon_bo_list *lobj;
if (parser->chunk_ib_idx == -1) {
return;
}
if ((parser->cs_flags & RADEON_CS_USE_VM) == 0) {
return;
}
list_for_each_entry(lobj, &parser->validated, tv.head) {
struct radeon_bo_va *bo_va;
struct radeon_bo *rbo = lobj->bo;
bo_va = radeon_bo_va(rbo, vm);
radeon_fence_unref(&bo_va->fence);
bo_va->fence = radeon_fence_ref(fence);
}
}
/**
* cs_parser_fini() - clean parser states
* @parser: parser structure holding parsing context.
@ -290,11 +314,14 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
{
unsigned i;
if (!error)
if (!error) {
/* fence all bo va before ttm_eu_fence_buffer_objects so bo are still reserved */
radeon_bo_vm_fence_va(parser, parser->ib.fence);
ttm_eu_fence_buffer_objects(&parser->validated,
parser->ib.fence);
else
} else {
ttm_eu_backoff_reservation(&parser->validated);
}
if (parser->relocs != NULL) {
for (i = 0; i < parser->nrelocs; i++) {
@ -388,7 +415,6 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
if (parser->chunk_ib_idx == -1)
return 0;
if ((parser->cs_flags & RADEON_CS_USE_VM) == 0)
return 0;

View File

@ -67,7 +67,8 @@ static void radeon_hide_cursor(struct drm_crtc *crtc)
if (ASIC_IS_DCE4(rdev)) {
WREG32(RADEON_MM_INDEX, EVERGREEN_CUR_CONTROL + radeon_crtc->crtc_offset);
WREG32(RADEON_MM_DATA, EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT));
WREG32(RADEON_MM_DATA, EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT) |
EVERGREEN_CURSOR_URGENT_CONTROL(EVERGREEN_CURSOR_URGENT_1_2));
} else if (ASIC_IS_AVIVO(rdev)) {
WREG32(RADEON_MM_INDEX, AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset);
WREG32(RADEON_MM_DATA, (AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT));
@ -94,7 +95,8 @@ static void radeon_show_cursor(struct drm_crtc *crtc)
if (ASIC_IS_DCE4(rdev)) {
WREG32(RADEON_MM_INDEX, EVERGREEN_CUR_CONTROL + radeon_crtc->crtc_offset);
WREG32(RADEON_MM_DATA, EVERGREEN_CURSOR_EN |
EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT));
EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT) |
EVERGREEN_CURSOR_URGENT_CONTROL(EVERGREEN_CURSOR_URGENT_1_2));
} else if (ASIC_IS_AVIVO(rdev)) {
WREG32(RADEON_MM_INDEX, AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset);
WREG32(RADEON_MM_DATA, AVIVO_D1CURSOR_EN |

View File

@ -1009,6 +1009,7 @@ int radeon_device_init(struct radeon_device *rdev,
atomic_set(&rdev->ih.lock, 0);
mutex_init(&rdev->gem.mutex);
mutex_init(&rdev->pm.mutex);
mutex_init(&rdev->gpu_clock_mutex);
init_rwsem(&rdev->pm.mclk_lock);
init_rwsem(&rdev->exclusive_lock);
init_waitqueue_head(&rdev->irq.vblank_queue);

View File

@ -59,9 +59,12 @@
* 2.15.0 - add max_pipes query
* 2.16.0 - fix evergreen 2D tiled surface calculation
* 2.17.0 - add STRMOUT_BASE_UPDATE for r7xx
* 2.18.0 - r600-eg: allow "invalid" DB formats
* 2.19.0 - r600-eg: MSAA textures
* 2.20.0 - r600-si: RADEON_INFO_TIMESTAMP query
*/
#define KMS_DRIVER_MAJOR 2
#define KMS_DRIVER_MINOR 17
#define KMS_DRIVER_MINOR 20
#define KMS_DRIVER_PATCHLEVEL 0
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
int radeon_driver_unload_kms(struct drm_device *dev);

View File

@ -814,7 +814,7 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
return -EINVAL;
}
if (bo_va->valid)
if (bo_va->valid && mem)
return 0;
ngpu_pages = radeon_bo_ngpu_pages(bo);
@ -859,11 +859,27 @@ int radeon_vm_bo_rmv(struct radeon_device *rdev,
struct radeon_bo *bo)
{
struct radeon_bo_va *bo_va;
int r;
bo_va = radeon_bo_va(bo, vm);
if (bo_va == NULL)
return 0;
/* wait for va use to end */
while (bo_va->fence) {
r = radeon_fence_wait(bo_va->fence, false);
if (r) {
DRM_ERROR("error while waiting for fence: %d\n", r);
}
if (r == -EDEADLK) {
r = radeon_gpu_reset(rdev);
if (!r)
continue;
}
break;
}
radeon_fence_unref(&bo_va->fence);
mutex_lock(&rdev->vm_manager.lock);
mutex_lock(&vm->mutex);
radeon_vm_bo_update_pte(rdev, vm, bo, NULL);
@ -934,7 +950,7 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
}
/**
* radeon_vm_init - tear down a vm instance
* radeon_vm_fini - tear down a vm instance
*
* @rdev: radeon_device pointer
* @vm: requested vm
@ -952,12 +968,15 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
radeon_vm_unbind_locked(rdev, vm);
mutex_unlock(&rdev->vm_manager.lock);
/* remove all bo */
/* remove all bo at this point non are busy any more because unbind
* waited for the last vm fence to signal
*/
r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
if (!r) {
bo_va = radeon_bo_va(rdev->ring_tmp_bo.bo, vm);
list_del_init(&bo_va->bo_list);
list_del_init(&bo_va->vm_list);
radeon_fence_unref(&bo_va->fence);
radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
kfree(bo_va);
}
@ -969,6 +988,7 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
r = radeon_bo_reserve(bo_va->bo, false);
if (!r) {
list_del_init(&bo_va->bo_list);
radeon_fence_unref(&bo_va->fence);
radeon_bo_unreserve(bo_va->bo);
kfree(bo_va);
}

View File

@ -134,25 +134,16 @@ void radeon_gem_object_close(struct drm_gem_object *obj,
struct radeon_device *rdev = rbo->rdev;
struct radeon_fpriv *fpriv = file_priv->driver_priv;
struct radeon_vm *vm = &fpriv->vm;
struct radeon_bo_va *bo_va, *tmp;
if (rdev->family < CHIP_CAYMAN) {
return;
}
if (radeon_bo_reserve(rbo, false)) {
dev_err(rdev->dev, "leaking bo va because we fail to reserve bo\n");
return;
}
list_for_each_entry_safe(bo_va, tmp, &rbo->va, bo_list) {
if (bo_va->vm == vm) {
/* remove from this vm address space */
mutex_lock(&vm->mutex);
list_del(&bo_va->vm_list);
mutex_unlock(&vm->mutex);
list_del(&bo_va->bo_list);
kfree(bo_va);
}
}
radeon_vm_bo_rmv(rdev, vm, rbo);
radeon_bo_unreserve(rbo);
}

View File

@ -29,6 +29,7 @@
#include "drm_sarea.h"
#include "radeon.h"
#include "radeon_drm.h"
#include "radeon_asic.h"
#include <linux/vga_switcheroo.h>
#include <linux/slab.h>
@ -167,17 +168,39 @@ static void radeon_set_filp_rights(struct drm_device *dev,
int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
{
struct radeon_device *rdev = dev->dev_private;
struct drm_radeon_info *info;
struct drm_radeon_info *info = data;
struct radeon_mode_info *minfo = &rdev->mode_info;
uint32_t *value_ptr;
uint32_t value;
uint32_t value, *value_ptr;
uint64_t value64, *value_ptr64;
struct drm_crtc *crtc;
int i, found;
info = data;
/* TIMESTAMP is a 64-bit value, needs special handling. */
if (info->request == RADEON_INFO_TIMESTAMP) {
if (rdev->family >= CHIP_R600) {
value_ptr64 = (uint64_t*)((unsigned long)info->value);
if (rdev->family >= CHIP_TAHITI) {
value64 = si_get_gpu_clock(rdev);
} else {
value64 = r600_get_gpu_clock(rdev);
}
if (DRM_COPY_TO_USER(value_ptr64, &value64, sizeof(value64))) {
DRM_ERROR("copy_to_user %s:%u\n", __func__, __LINE__);
return -EFAULT;
}
return 0;
} else {
DRM_DEBUG_KMS("timestamp is r6xx+ only!\n");
return -EINVAL;
}
}
value_ptr = (uint32_t *)((unsigned long)info->value);
if (DRM_COPY_FROM_USER(&value, value_ptr, sizeof(value)))
if (DRM_COPY_FROM_USER(&value, value_ptr, sizeof(value))) {
DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
return -EFAULT;
}
switch (info->request) {
case RADEON_INFO_DEVICE_ID:
@ -337,7 +360,7 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
return -EINVAL;
}
if (DRM_COPY_TO_USER(value_ptr, &value, sizeof(uint32_t))) {
DRM_ERROR("copy_to_user\n");
DRM_ERROR("copy_to_user %s:%u\n", __func__, __LINE__);
return -EFAULT;
}
return 0;

View File

@ -1025,9 +1025,11 @@ static int radeon_crtc_mode_set(struct drm_crtc *crtc,
static void radeon_crtc_prepare(struct drm_crtc *crtc)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct drm_crtc *crtci;
radeon_crtc->in_mode_set = true;
/*
* The hardware wedges sometimes if you reconfigure one CRTC
* whilst another is running (see fdo bug #24611).
@ -1038,6 +1040,7 @@ static void radeon_crtc_prepare(struct drm_crtc *crtc)
static void radeon_crtc_commit(struct drm_crtc *crtc)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct drm_crtc *crtci;
@ -1048,6 +1051,7 @@ static void radeon_crtc_commit(struct drm_crtc *crtc)
if (crtci->enabled)
radeon_crtc_dpms(crtci, DRM_MODE_DPMS_ON);
}
radeon_crtc->in_mode_set = false;
}
static const struct drm_crtc_helper_funcs legacy_helper_funcs = {

View File

@ -275,6 +275,7 @@ struct radeon_crtc {
u16 lut_r[256], lut_g[256], lut_b[256];
bool enabled;
bool can_tile;
bool in_mode_set;
uint32_t crtc_offset;
struct drm_gem_object *cursor_bo;
uint64_t cursor_addr;

View File

@ -52,11 +52,7 @@ void radeon_bo_clear_va(struct radeon_bo *bo)
list_for_each_entry_safe(bo_va, tmp, &bo->va, bo_list) {
/* remove from all vm address space */
mutex_lock(&bo_va->vm->mutex);
list_del(&bo_va->vm_list);
mutex_unlock(&bo_va->vm->mutex);
list_del(&bo_va->bo_list);
kfree(bo_va);
radeon_vm_bo_rmv(bo->rdev, bo_va->vm, bo);
}
}

View File

@ -281,12 +281,8 @@ int rv515_debugfs_ga_info_init(struct radeon_device *rdev)
void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save)
{
save->d1vga_control = RREG32(R_000330_D1VGA_CONTROL);
save->d2vga_control = RREG32(R_000338_D2VGA_CONTROL);
save->vga_render_control = RREG32(R_000300_VGA_RENDER_CONTROL);
save->vga_hdp_control = RREG32(R_000328_VGA_HDP_CONTROL);
save->d1crtc_control = RREG32(R_006080_D1CRTC_CONTROL);
save->d2crtc_control = RREG32(R_006880_D2CRTC_CONTROL);
/* Stop all video */
WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 0);
@ -311,15 +307,6 @@ void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save)
/* Unlock host access */
WREG32(R_000328_VGA_HDP_CONTROL, save->vga_hdp_control);
mdelay(1);
/* Restore video state */
WREG32(R_000330_D1VGA_CONTROL, save->d1vga_control);
WREG32(R_000338_D2VGA_CONTROL, save->d2vga_control);
WREG32(R_0060E8_D1CRTC_UPDATE_LOCK, 1);
WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 1);
WREG32(R_006080_D1CRTC_CONTROL, save->d1crtc_control);
WREG32(R_006880_D2CRTC_CONTROL, save->d2crtc_control);
WREG32(R_0060E8_D1CRTC_UPDATE_LOCK, 0);
WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 0);
WREG32(R_000300_VGA_RENDER_CONTROL, save->vga_render_control);
}

View File

@ -1639,11 +1639,19 @@ static void si_gpu_init(struct radeon_device *rdev)
/* XXX what about 12? */
rdev->config.si.tile_config |= (3 << 0);
break;
}
if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT)
rdev->config.si.tile_config |= 1 << 4;
else
}
switch ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) {
case 0: /* four banks */
rdev->config.si.tile_config |= 0 << 4;
break;
case 1: /* eight banks */
rdev->config.si.tile_config |= 1 << 4;
break;
case 2: /* sixteen banks */
default:
rdev->config.si.tile_config |= 2 << 4;
break;
}
rdev->config.si.tile_config |=
((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8;
rdev->config.si.tile_config |=
@ -3960,3 +3968,22 @@ void si_fini(struct radeon_device *rdev)
rdev->bios = NULL;
}
/**
* si_get_gpu_clock - return GPU clock counter snapshot
*
* @rdev: radeon_device pointer
*
* Fetches a GPU clock counter snapshot (SI).
* Returns the 64 bit clock counter snapshot.
*/
uint64_t si_get_gpu_clock(struct radeon_device *rdev)
{
uint64_t clock;
mutex_lock(&rdev->gpu_clock_mutex);
WREG32(RLC_CAPTURE_GPU_CLOCK_COUNT, 1);
clock = (uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_LSB) |
((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
mutex_unlock(&rdev->gpu_clock_mutex);
return clock;
}

View File

@ -698,6 +698,9 @@
#define RLC_UCODE_ADDR 0xC32C
#define RLC_UCODE_DATA 0xC330
#define RLC_GPU_CLOCK_COUNT_LSB 0xC338
#define RLC_GPU_CLOCK_COUNT_MSB 0xC33C
#define RLC_CAPTURE_GPU_CLOCK_COUNT 0xC340
#define RLC_MC_CNTL 0xC344
#define RLC_UCODE_CNTL 0xC348

View File

@ -308,7 +308,7 @@ struct drm_gem_object *udl_gem_prime_import(struct drm_device *dev,
/* need to attach */
attach = dma_buf_attach(dma_buf, dev->dev);
if (IS_ERR(attach))
return ERR_PTR(PTR_ERR(attach));
return ERR_CAST(attach);
sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
if (IS_ERR(sg)) {

View File

@ -213,9 +213,12 @@
{0x1002, 0x6800, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6801, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6802, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6806, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6808, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6809, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6810, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6816, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6817, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6818, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6819, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6820, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \

View File

@ -964,6 +964,8 @@ struct drm_radeon_cs {
#define RADEON_INFO_IB_VM_MAX_SIZE 0x0f
/* max pipes - needed for compute shaders */
#define RADEON_INFO_MAX_PIPES 0x10
/* timestamp for GL_ARB_timer_query (OpenGL), returns the current GPU clock */
#define RADEON_INFO_TIMESTAMP 0x11
struct drm_radeon_info {
uint32_t request;