mirror of
https://github.com/torvalds/linux.git
synced 2024-11-07 12:41:55 +00:00
Merge branch 'backlight-rework' into drm-intel-next-queued
Pull in Jani's backlight rework branch. This was merged through a separate branch to be able to sort out the Broadwell conflicts properly before pulling it into the main development branch. Conflicts: drivers/gpu/drm/i915/intel_display.c Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
This commit is contained in:
commit
c09cd6e969
@ -2834,7 +2834,9 @@ L: dri-devel@lists.freedesktop.org
|
||||
L: linux-tegra@vger.kernel.org
|
||||
T: git git://anongit.freedesktop.org/tegra/linux.git
|
||||
S: Supported
|
||||
F: drivers/gpu/drm/tegra/
|
||||
F: drivers/gpu/host1x/
|
||||
F: include/linux/host1x.h
|
||||
F: include/uapi/drm/tegra_drm.h
|
||||
F: Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt
|
||||
|
||||
|
@ -313,6 +313,16 @@ static size_t __init gen6_stolen_size(int num, int slot, int func)
|
||||
return gmch_ctrl << 25; /* 32 MB units */
|
||||
}
|
||||
|
||||
static inline size_t gen8_stolen_size(int num, int slot, int func)
|
||||
{
|
||||
u16 gmch_ctrl;
|
||||
|
||||
gmch_ctrl = read_pci_config_16(num, slot, func, SNB_GMCH_CTRL);
|
||||
gmch_ctrl >>= BDW_GMCH_GMS_SHIFT;
|
||||
gmch_ctrl &= BDW_GMCH_GMS_MASK;
|
||||
return gmch_ctrl << 25; /* 32 MB units */
|
||||
}
|
||||
|
||||
typedef size_t (*stolen_size_fn)(int num, int slot, int func);
|
||||
|
||||
static struct pci_device_id intel_stolen_ids[] __initdata = {
|
||||
@ -336,6 +346,8 @@ static struct pci_device_id intel_stolen_ids[] __initdata = {
|
||||
INTEL_IVB_D_IDS(gen6_stolen_size),
|
||||
INTEL_HSW_D_IDS(gen6_stolen_size),
|
||||
INTEL_HSW_M_IDS(gen6_stolen_size),
|
||||
INTEL_BDW_M_IDS(gen8_stolen_size),
|
||||
INTEL_BDW_D_IDS(gen8_stolen_size)
|
||||
};
|
||||
|
||||
static void __init intel_graphics_stolen(int num, int slot, int func)
|
||||
|
@ -176,6 +176,8 @@ source "drivers/gpu/drm/mgag200/Kconfig"
|
||||
|
||||
source "drivers/gpu/drm/cirrus/Kconfig"
|
||||
|
||||
source "drivers/gpu/drm/armada/Kconfig"
|
||||
|
||||
source "drivers/gpu/drm/rcar-du/Kconfig"
|
||||
|
||||
source "drivers/gpu/drm/shmobile/Kconfig"
|
||||
@ -187,3 +189,5 @@ source "drivers/gpu/drm/tilcdc/Kconfig"
|
||||
source "drivers/gpu/drm/qxl/Kconfig"
|
||||
|
||||
source "drivers/gpu/drm/msm/Kconfig"
|
||||
|
||||
source "drivers/gpu/drm/tegra/Kconfig"
|
||||
|
@ -50,10 +50,12 @@ obj-$(CONFIG_DRM_EXYNOS) +=exynos/
|
||||
obj-$(CONFIG_DRM_GMA500) += gma500/
|
||||
obj-$(CONFIG_DRM_UDL) += udl/
|
||||
obj-$(CONFIG_DRM_AST) += ast/
|
||||
obj-$(CONFIG_DRM_ARMADA) += armada/
|
||||
obj-$(CONFIG_DRM_RCAR_DU) += rcar-du/
|
||||
obj-$(CONFIG_DRM_SHMOBILE) +=shmobile/
|
||||
obj-$(CONFIG_DRM_OMAP) += omapdrm/
|
||||
obj-$(CONFIG_DRM_TILCDC) += tilcdc/
|
||||
obj-$(CONFIG_DRM_QXL) += qxl/
|
||||
obj-$(CONFIG_DRM_MSM) += msm/
|
||||
obj-$(CONFIG_DRM_TEGRA) += tegra/
|
||||
obj-y += i2c/
|
||||
|
24
drivers/gpu/drm/armada/Kconfig
Normal file
24
drivers/gpu/drm/armada/Kconfig
Normal file
@ -0,0 +1,24 @@
|
||||
config DRM_ARMADA
|
||||
tristate "DRM support for Marvell Armada SoCs"
|
||||
depends on DRM && HAVE_CLK && ARM
|
||||
select FB_CFB_FILLRECT
|
||||
select FB_CFB_COPYAREA
|
||||
select FB_CFB_IMAGEBLIT
|
||||
select DRM_KMS_HELPER
|
||||
help
|
||||
Support the "LCD" controllers found on the Marvell Armada 510
|
||||
devices. There are two controllers on the device, each controller
|
||||
supports graphics and video overlays.
|
||||
|
||||
This driver provides no built-in acceleration; acceleration is
|
||||
performed by other IP found on the SoC. This driver provides
|
||||
kernel mode setting and buffer management to userspace.
|
||||
|
||||
config DRM_ARMADA_TDA1998X
|
||||
bool "Support TDA1998X HDMI output"
|
||||
depends on DRM_ARMADA != n
|
||||
depends on I2C && DRM_I2C_NXP_TDA998X = y
|
||||
default y
|
||||
help
|
||||
Support the TDA1998x HDMI output device found on the Solid-Run
|
||||
CuBox.
|
7
drivers/gpu/drm/armada/Makefile
Normal file
7
drivers/gpu/drm/armada/Makefile
Normal file
@ -0,0 +1,7 @@
|
||||
armada-y := armada_crtc.o armada_drv.o armada_fb.o armada_fbdev.o \
|
||||
armada_gem.o armada_output.o armada_overlay.o \
|
||||
armada_slave.o
|
||||
armada-y += armada_510.o
|
||||
armada-$(CONFIG_DEBUG_FS) += armada_debugfs.o
|
||||
|
||||
obj-$(CONFIG_DRM_ARMADA) := armada.o
|
87
drivers/gpu/drm/armada/armada_510.c
Normal file
87
drivers/gpu/drm/armada/armada_510.c
Normal file
@ -0,0 +1,87 @@
|
||||
/*
|
||||
* Copyright (C) 2012 Russell King
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* Armada 510 (aka Dove) variant support
|
||||
*/
|
||||
#include <linux/clk.h>
|
||||
#include <linux/io.h>
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/drm_crtc_helper.h>
|
||||
#include "armada_crtc.h"
|
||||
#include "armada_drm.h"
|
||||
#include "armada_hw.h"
|
||||
|
||||
static int armada510_init(struct armada_private *priv, struct device *dev)
|
||||
{
|
||||
priv->extclk[0] = devm_clk_get(dev, "ext_ref_clk_1");
|
||||
|
||||
if (IS_ERR(priv->extclk[0]) && PTR_ERR(priv->extclk[0]) == -ENOENT)
|
||||
priv->extclk[0] = ERR_PTR(-EPROBE_DEFER);
|
||||
|
||||
return PTR_RET(priv->extclk[0]);
|
||||
}
|
||||
|
||||
static int armada510_crtc_init(struct armada_crtc *dcrtc)
|
||||
{
|
||||
/* Lower the watermark so to eliminate jitter at higher bandwidths */
|
||||
armada_updatel(0x20, (1 << 11) | 0xff, dcrtc->base + LCD_CFG_RDREG4F);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Armada510 specific SCLK register selection.
|
||||
* This gets called with sclk = NULL to test whether the mode is
|
||||
* supportable, and again with sclk != NULL to set the clocks up for
|
||||
* that. The former can return an error, but the latter is expected
|
||||
* not to.
|
||||
*
|
||||
* We currently are pretty rudimentary here, always selecting
|
||||
* EXT_REF_CLK_1 for LCD0 and erroring LCD1. This needs improvement!
|
||||
*/
|
||||
static int armada510_crtc_compute_clock(struct armada_crtc *dcrtc,
|
||||
const struct drm_display_mode *mode, uint32_t *sclk)
|
||||
{
|
||||
struct armada_private *priv = dcrtc->crtc.dev->dev_private;
|
||||
struct clk *clk = priv->extclk[0];
|
||||
int ret;
|
||||
|
||||
if (dcrtc->num == 1)
|
||||
return -EINVAL;
|
||||
|
||||
if (IS_ERR(clk))
|
||||
return PTR_ERR(clk);
|
||||
|
||||
if (dcrtc->clk != clk) {
|
||||
ret = clk_prepare_enable(clk);
|
||||
if (ret)
|
||||
return ret;
|
||||
dcrtc->clk = clk;
|
||||
}
|
||||
|
||||
if (sclk) {
|
||||
uint32_t rate, ref, div;
|
||||
|
||||
rate = mode->clock * 1000;
|
||||
ref = clk_round_rate(clk, rate);
|
||||
div = DIV_ROUND_UP(ref, rate);
|
||||
if (div < 1)
|
||||
div = 1;
|
||||
|
||||
clk_set_rate(clk, ref);
|
||||
*sclk = div | SCLK_510_EXTCLK1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
const struct armada_variant armada510_ops = {
|
||||
.has_spu_adv_reg = true,
|
||||
.spu_adv_reg = ADV_HWC32ENABLE | ADV_HWC32ARGB | ADV_HWC32BLEND,
|
||||
.init = armada510_init,
|
||||
.crtc_init = armada510_crtc_init,
|
||||
.crtc_compute_clock = armada510_crtc_compute_clock,
|
||||
};
|
1098
drivers/gpu/drm/armada/armada_crtc.c
Normal file
1098
drivers/gpu/drm/armada/armada_crtc.c
Normal file
File diff suppressed because it is too large
Load Diff
83
drivers/gpu/drm/armada/armada_crtc.h
Normal file
83
drivers/gpu/drm/armada/armada_crtc.h
Normal file
@ -0,0 +1,83 @@
|
||||
/*
|
||||
* Copyright (C) 2012 Russell King
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
#ifndef ARMADA_CRTC_H
|
||||
#define ARMADA_CRTC_H
|
||||
|
||||
struct armada_gem_object;
|
||||
|
||||
struct armada_regs {
|
||||
uint32_t offset;
|
||||
uint32_t mask;
|
||||
uint32_t val;
|
||||
};
|
||||
|
||||
#define armada_reg_queue_mod(_r, _i, _v, _m, _o) \
|
||||
do { \
|
||||
struct armada_regs *__reg = _r; \
|
||||
__reg[_i].offset = _o; \
|
||||
__reg[_i].mask = ~(_m); \
|
||||
__reg[_i].val = _v; \
|
||||
_i++; \
|
||||
} while (0)
|
||||
|
||||
#define armada_reg_queue_set(_r, _i, _v, _o) \
|
||||
armada_reg_queue_mod(_r, _i, _v, ~0, _o)
|
||||
|
||||
#define armada_reg_queue_end(_r, _i) \
|
||||
armada_reg_queue_mod(_r, _i, 0, 0, ~0)
|
||||
|
||||
struct armada_frame_work;
|
||||
|
||||
struct armada_crtc {
|
||||
struct drm_crtc crtc;
|
||||
unsigned num;
|
||||
void __iomem *base;
|
||||
struct clk *clk;
|
||||
struct {
|
||||
uint32_t spu_v_h_total;
|
||||
uint32_t spu_v_porch;
|
||||
uint32_t spu_adv_reg;
|
||||
} v[2];
|
||||
bool interlaced;
|
||||
bool cursor_update;
|
||||
uint8_t csc_yuv_mode;
|
||||
uint8_t csc_rgb_mode;
|
||||
|
||||
struct drm_plane *plane;
|
||||
|
||||
struct armada_gem_object *cursor_obj;
|
||||
int cursor_x;
|
||||
int cursor_y;
|
||||
uint32_t cursor_hw_pos;
|
||||
uint32_t cursor_hw_sz;
|
||||
uint32_t cursor_w;
|
||||
uint32_t cursor_h;
|
||||
|
||||
int dpms;
|
||||
uint32_t cfg_dumb_ctrl;
|
||||
uint32_t dumb_ctrl;
|
||||
uint32_t spu_iopad_ctrl;
|
||||
|
||||
wait_queue_head_t frame_wait;
|
||||
struct armada_frame_work *frame_work;
|
||||
|
||||
spinlock_t irq_lock;
|
||||
uint32_t irq_ena;
|
||||
struct list_head vbl_list;
|
||||
};
|
||||
#define drm_to_armada_crtc(c) container_of(c, struct armada_crtc, crtc)
|
||||
|
||||
int armada_drm_crtc_create(struct drm_device *, unsigned, struct resource *);
|
||||
void armada_drm_crtc_gamma_set(struct drm_crtc *, u16, u16, u16, int);
|
||||
void armada_drm_crtc_gamma_get(struct drm_crtc *, u16 *, u16 *, u16 *, int);
|
||||
void armada_drm_crtc_irq(struct armada_crtc *, u32);
|
||||
void armada_drm_crtc_disable_irq(struct armada_crtc *, u32);
|
||||
void armada_drm_crtc_enable_irq(struct armada_crtc *, u32);
|
||||
void armada_drm_crtc_update_regs(struct armada_crtc *, struct armada_regs *);
|
||||
|
||||
#endif
|
177
drivers/gpu/drm/armada/armada_debugfs.c
Normal file
177
drivers/gpu/drm/armada/armada_debugfs.c
Normal file
@ -0,0 +1,177 @@
|
||||
/*
|
||||
* Copyright (C) 2012 Russell King
|
||||
* Rewritten from the dovefb driver, and Armada510 manuals.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
#include <linux/ctype.h>
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <drm/drmP.h>
|
||||
#include "armada_crtc.h"
|
||||
#include "armada_drm.h"
|
||||
|
||||
static int armada_debugfs_gem_linear_show(struct seq_file *m, void *data)
|
||||
{
|
||||
struct drm_info_node *node = m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
struct armada_private *priv = dev->dev_private;
|
||||
int ret;
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
ret = drm_mm_dump_table(m, &priv->linear);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int armada_debugfs_reg_show(struct seq_file *m, void *data)
|
||||
{
|
||||
struct drm_device *dev = m->private;
|
||||
struct armada_private *priv = dev->dev_private;
|
||||
int n, i;
|
||||
|
||||
if (priv) {
|
||||
for (n = 0; n < ARRAY_SIZE(priv->dcrtc); n++) {
|
||||
struct armada_crtc *dcrtc = priv->dcrtc[n];
|
||||
if (!dcrtc)
|
||||
continue;
|
||||
|
||||
for (i = 0x84; i <= 0x1c4; i += 4) {
|
||||
uint32_t v = readl_relaxed(dcrtc->base + i);
|
||||
seq_printf(m, "%u: 0x%04x: 0x%08x\n", n, i, v);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int armada_debugfs_reg_r_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
return single_open(file, armada_debugfs_reg_show, inode->i_private);
|
||||
}
|
||||
|
||||
static const struct file_operations fops_reg_r = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = armada_debugfs_reg_r_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = single_release,
|
||||
};
|
||||
|
||||
static int armada_debugfs_write(struct file *file, const char __user *ptr,
|
||||
size_t len, loff_t *off)
|
||||
{
|
||||
struct drm_device *dev = file->private_data;
|
||||
struct armada_private *priv = dev->dev_private;
|
||||
struct armada_crtc *dcrtc = priv->dcrtc[0];
|
||||
char buf[32], *p;
|
||||
uint32_t reg, val;
|
||||
int ret;
|
||||
|
||||
if (*off != 0)
|
||||
return 0;
|
||||
|
||||
if (len > sizeof(buf) - 1)
|
||||
len = sizeof(buf) - 1;
|
||||
|
||||
ret = strncpy_from_user(buf, ptr, len);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
buf[len] = '\0';
|
||||
|
||||
reg = simple_strtoul(buf, &p, 16);
|
||||
if (!isspace(*p))
|
||||
return -EINVAL;
|
||||
val = simple_strtoul(p + 1, NULL, 16);
|
||||
|
||||
if (reg >= 0x84 && reg <= 0x1c4)
|
||||
writel(val, dcrtc->base + reg);
|
||||
|
||||
return len;
|
||||
}
|
||||
|
||||
static const struct file_operations fops_reg_w = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = simple_open,
|
||||
.write = armada_debugfs_write,
|
||||
.llseek = noop_llseek,
|
||||
};
|
||||
|
||||
static struct drm_info_list armada_debugfs_list[] = {
|
||||
{ "gem_linear", armada_debugfs_gem_linear_show, 0 },
|
||||
};
|
||||
#define ARMADA_DEBUGFS_ENTRIES ARRAY_SIZE(armada_debugfs_list)
|
||||
|
||||
static int drm_add_fake_info_node(struct drm_minor *minor, struct dentry *ent,
|
||||
const void *key)
|
||||
{
|
||||
struct drm_info_node *node;
|
||||
|
||||
node = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL);
|
||||
if (node == NULL) {
|
||||
debugfs_remove(ent);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
node->minor = minor;
|
||||
node->dent = ent;
|
||||
node->info_ent = (void *) key;
|
||||
|
||||
mutex_lock(&minor->debugfs_lock);
|
||||
list_add(&node->list, &minor->debugfs_list);
|
||||
mutex_unlock(&minor->debugfs_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int armada_debugfs_create(struct dentry *root, struct drm_minor *minor,
|
||||
const char *name, umode_t mode, const struct file_operations *fops)
|
||||
{
|
||||
struct dentry *de;
|
||||
|
||||
de = debugfs_create_file(name, mode, root, minor->dev, fops);
|
||||
|
||||
return drm_add_fake_info_node(minor, de, fops);
|
||||
}
|
||||
|
||||
int armada_drm_debugfs_init(struct drm_minor *minor)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = drm_debugfs_create_files(armada_debugfs_list,
|
||||
ARMADA_DEBUGFS_ENTRIES,
|
||||
minor->debugfs_root, minor);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = armada_debugfs_create(minor->debugfs_root, minor,
|
||||
"reg", S_IFREG | S_IRUSR, &fops_reg_r);
|
||||
if (ret)
|
||||
goto err_1;
|
||||
|
||||
ret = armada_debugfs_create(minor->debugfs_root, minor,
|
||||
"reg_wr", S_IFREG | S_IWUSR, &fops_reg_w);
|
||||
if (ret)
|
||||
goto err_2;
|
||||
return ret;
|
||||
|
||||
err_2:
|
||||
drm_debugfs_remove_files((struct drm_info_list *)&fops_reg_r, 1, minor);
|
||||
err_1:
|
||||
drm_debugfs_remove_files(armada_debugfs_list, ARMADA_DEBUGFS_ENTRIES,
|
||||
minor);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void armada_drm_debugfs_cleanup(struct drm_minor *minor)
|
||||
{
|
||||
drm_debugfs_remove_files((struct drm_info_list *)&fops_reg_w, 1, minor);
|
||||
drm_debugfs_remove_files((struct drm_info_list *)&fops_reg_r, 1, minor);
|
||||
drm_debugfs_remove_files(armada_debugfs_list, ARMADA_DEBUGFS_ENTRIES,
|
||||
minor);
|
||||
}
|
113
drivers/gpu/drm/armada/armada_drm.h
Normal file
113
drivers/gpu/drm/armada/armada_drm.h
Normal file
@ -0,0 +1,113 @@
|
||||
/*
|
||||
* Copyright (C) 2012 Russell King
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
#ifndef ARMADA_DRM_H
|
||||
#define ARMADA_DRM_H
|
||||
|
||||
#include <linux/kfifo.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <drm/drmP.h>
|
||||
|
||||
struct armada_crtc;
|
||||
struct armada_gem_object;
|
||||
struct clk;
|
||||
struct drm_fb_helper;
|
||||
|
||||
static inline void
|
||||
armada_updatel(uint32_t val, uint32_t mask, void __iomem *ptr)
|
||||
{
|
||||
uint32_t ov, v;
|
||||
|
||||
ov = v = readl_relaxed(ptr);
|
||||
v = (v & ~mask) | val;
|
||||
if (ov != v)
|
||||
writel_relaxed(v, ptr);
|
||||
}
|
||||
|
||||
static inline uint32_t armada_pitch(uint32_t width, uint32_t bpp)
|
||||
{
|
||||
uint32_t pitch = bpp != 4 ? width * ((bpp + 7) / 8) : width / 2;
|
||||
|
||||
/* 88AP510 spec recommends pitch be a multiple of 128 */
|
||||
return ALIGN(pitch, 128);
|
||||
}
|
||||
|
||||
struct armada_vbl_event {
|
||||
struct list_head node;
|
||||
void *data;
|
||||
void (*fn)(struct armada_crtc *, void *);
|
||||
};
|
||||
void armada_drm_vbl_event_add(struct armada_crtc *,
|
||||
struct armada_vbl_event *);
|
||||
void armada_drm_vbl_event_remove(struct armada_crtc *,
|
||||
struct armada_vbl_event *);
|
||||
void armada_drm_vbl_event_remove_unlocked(struct armada_crtc *,
|
||||
struct armada_vbl_event *);
|
||||
#define armada_drm_vbl_event_init(_e, _f, _d) do { \
|
||||
struct armada_vbl_event *__e = _e; \
|
||||
INIT_LIST_HEAD(&__e->node); \
|
||||
__e->data = _d; \
|
||||
__e->fn = _f; \
|
||||
} while (0)
|
||||
|
||||
|
||||
struct armada_private;
|
||||
|
||||
struct armada_variant {
|
||||
bool has_spu_adv_reg;
|
||||
uint32_t spu_adv_reg;
|
||||
int (*init)(struct armada_private *, struct device *);
|
||||
int (*crtc_init)(struct armada_crtc *);
|
||||
int (*crtc_compute_clock)(struct armada_crtc *,
|
||||
const struct drm_display_mode *,
|
||||
uint32_t *);
|
||||
};
|
||||
|
||||
/* Variant ops */
|
||||
extern const struct armada_variant armada510_ops;
|
||||
|
||||
struct armada_private {
|
||||
const struct armada_variant *variant;
|
||||
struct work_struct fb_unref_work;
|
||||
DECLARE_KFIFO(fb_unref, struct drm_framebuffer *, 8);
|
||||
struct drm_fb_helper *fbdev;
|
||||
struct armada_crtc *dcrtc[2];
|
||||
struct drm_mm linear;
|
||||
struct clk *extclk[2];
|
||||
struct drm_property *csc_yuv_prop;
|
||||
struct drm_property *csc_rgb_prop;
|
||||
struct drm_property *colorkey_prop;
|
||||
struct drm_property *colorkey_min_prop;
|
||||
struct drm_property *colorkey_max_prop;
|
||||
struct drm_property *colorkey_val_prop;
|
||||
struct drm_property *colorkey_alpha_prop;
|
||||
struct drm_property *colorkey_mode_prop;
|
||||
struct drm_property *brightness_prop;
|
||||
struct drm_property *contrast_prop;
|
||||
struct drm_property *saturation_prop;
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
struct dentry *de;
|
||||
#endif
|
||||
};
|
||||
|
||||
void __armada_drm_queue_unref_work(struct drm_device *,
|
||||
struct drm_framebuffer *);
|
||||
void armada_drm_queue_unref_work(struct drm_device *,
|
||||
struct drm_framebuffer *);
|
||||
|
||||
extern const struct drm_mode_config_funcs armada_drm_mode_config_funcs;
|
||||
|
||||
int armada_fbdev_init(struct drm_device *);
|
||||
void armada_fbdev_fini(struct drm_device *);
|
||||
|
||||
int armada_overlay_plane_create(struct drm_device *, unsigned long);
|
||||
|
||||
int armada_drm_debugfs_init(struct drm_minor *);
|
||||
void armada_drm_debugfs_cleanup(struct drm_minor *);
|
||||
|
||||
#endif
|
421
drivers/gpu/drm/armada/armada_drv.c
Normal file
421
drivers/gpu/drm/armada/armada_drv.c
Normal file
@ -0,0 +1,421 @@
|
||||
/*
|
||||
* Copyright (C) 2012 Russell King
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
#include <linux/clk.h>
|
||||
#include <linux/module.h>
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/drm_crtc_helper.h>
|
||||
#include "armada_crtc.h"
|
||||
#include "armada_drm.h"
|
||||
#include "armada_gem.h"
|
||||
#include "armada_hw.h"
|
||||
#include <drm/armada_drm.h>
|
||||
#include "armada_ioctlP.h"
|
||||
|
||||
#ifdef CONFIG_DRM_ARMADA_TDA1998X
|
||||
#include <drm/i2c/tda998x.h>
|
||||
#include "armada_slave.h"
|
||||
|
||||
static struct tda998x_encoder_params params = {
|
||||
/* With 0x24, there is no translation between vp_out and int_vp
|
||||
FB LCD out Pins VIP Int Vp
|
||||
R:23:16 R:7:0 VPC7:0 7:0 7:0[R]
|
||||
G:15:8 G:15:8 VPB7:0 23:16 23:16[G]
|
||||
B:7:0 B:23:16 VPA7:0 15:8 15:8[B]
|
||||
*/
|
||||
.swap_a = 2,
|
||||
.swap_b = 3,
|
||||
.swap_c = 4,
|
||||
.swap_d = 5,
|
||||
.swap_e = 0,
|
||||
.swap_f = 1,
|
||||
.audio_cfg = BIT(2),
|
||||
.audio_frame[1] = 1,
|
||||
.audio_format = AFMT_SPDIF,
|
||||
.audio_sample_rate = 44100,
|
||||
};
|
||||
|
||||
static const struct armada_drm_slave_config tda19988_config = {
|
||||
.i2c_adapter_id = 0,
|
||||
.crtcs = 1 << 0, /* Only LCD0 at the moment */
|
||||
.polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT,
|
||||
.interlace_allowed = true,
|
||||
.info = {
|
||||
.type = "tda998x",
|
||||
.addr = 0x70,
|
||||
.platform_data = ¶ms,
|
||||
},
|
||||
};
|
||||
#endif
|
||||
|
||||
static void armada_drm_unref_work(struct work_struct *work)
|
||||
{
|
||||
struct armada_private *priv =
|
||||
container_of(work, struct armada_private, fb_unref_work);
|
||||
struct drm_framebuffer *fb;
|
||||
|
||||
while (kfifo_get(&priv->fb_unref, &fb))
|
||||
drm_framebuffer_unreference(fb);
|
||||
}
|
||||
|
||||
/* Must be called with dev->event_lock held */
|
||||
void __armada_drm_queue_unref_work(struct drm_device *dev,
|
||||
struct drm_framebuffer *fb)
|
||||
{
|
||||
struct armada_private *priv = dev->dev_private;
|
||||
|
||||
/*
|
||||
* Yes, we really must jump through these hoops just to store a
|
||||
* _pointer_ to something into the kfifo. This is utterly insane
|
||||
* and idiotic, because it kfifo requires the _data_ pointed to by
|
||||
* the pointer const, not the pointer itself. Not only that, but
|
||||
* you have to pass a pointer _to_ the pointer you want stored.
|
||||
*/
|
||||
const struct drm_framebuffer *silly_api_alert = fb;
|
||||
WARN_ON(!kfifo_put(&priv->fb_unref, &silly_api_alert));
|
||||
schedule_work(&priv->fb_unref_work);
|
||||
}
|
||||
|
||||
void armada_drm_queue_unref_work(struct drm_device *dev,
|
||||
struct drm_framebuffer *fb)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&dev->event_lock, flags);
|
||||
__armada_drm_queue_unref_work(dev, fb);
|
||||
spin_unlock_irqrestore(&dev->event_lock, flags);
|
||||
}
|
||||
|
||||
static int armada_drm_load(struct drm_device *dev, unsigned long flags)
|
||||
{
|
||||
const struct platform_device_id *id;
|
||||
struct armada_private *priv;
|
||||
struct resource *res[ARRAY_SIZE(priv->dcrtc)];
|
||||
struct resource *mem = NULL;
|
||||
int ret, n, i;
|
||||
|
||||
memset(res, 0, sizeof(res));
|
||||
|
||||
for (n = i = 0; ; n++) {
|
||||
struct resource *r = platform_get_resource(dev->platformdev,
|
||||
IORESOURCE_MEM, n);
|
||||
if (!r)
|
||||
break;
|
||||
|
||||
/* Resources above 64K are graphics memory */
|
||||
if (resource_size(r) > SZ_64K)
|
||||
mem = r;
|
||||
else if (i < ARRAY_SIZE(priv->dcrtc))
|
||||
res[i++] = r;
|
||||
else
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!res[0] || !mem)
|
||||
return -ENXIO;
|
||||
|
||||
if (!devm_request_mem_region(dev->dev, mem->start,
|
||||
resource_size(mem), "armada-drm"))
|
||||
return -EBUSY;
|
||||
|
||||
priv = devm_kzalloc(dev->dev, sizeof(*priv), GFP_KERNEL);
|
||||
if (!priv) {
|
||||
DRM_ERROR("failed to allocate private\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
dev->dev_private = priv;
|
||||
|
||||
/* Get the implementation specific driver data. */
|
||||
id = platform_get_device_id(dev->platformdev);
|
||||
if (!id)
|
||||
return -ENXIO;
|
||||
|
||||
priv->variant = (struct armada_variant *)id->driver_data;
|
||||
|
||||
ret = priv->variant->init(priv, dev->dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
INIT_WORK(&priv->fb_unref_work, armada_drm_unref_work);
|
||||
INIT_KFIFO(priv->fb_unref);
|
||||
|
||||
/* Mode setting support */
|
||||
drm_mode_config_init(dev);
|
||||
dev->mode_config.min_width = 320;
|
||||
dev->mode_config.min_height = 200;
|
||||
|
||||
/*
|
||||
* With vscale enabled, the maximum width is 1920 due to the
|
||||
* 1920 by 3 lines RAM
|
||||
*/
|
||||
dev->mode_config.max_width = 1920;
|
||||
dev->mode_config.max_height = 2048;
|
||||
|
||||
dev->mode_config.preferred_depth = 24;
|
||||
dev->mode_config.funcs = &armada_drm_mode_config_funcs;
|
||||
drm_mm_init(&priv->linear, mem->start, resource_size(mem));
|
||||
|
||||
/* Create all LCD controllers */
|
||||
for (n = 0; n < ARRAY_SIZE(priv->dcrtc); n++) {
|
||||
if (!res[n])
|
||||
break;
|
||||
|
||||
ret = armada_drm_crtc_create(dev, n, res[n]);
|
||||
if (ret)
|
||||
goto err_kms;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DRM_ARMADA_TDA1998X
|
||||
ret = armada_drm_connector_slave_create(dev, &tda19988_config);
|
||||
if (ret)
|
||||
goto err_kms;
|
||||
#endif
|
||||
|
||||
ret = drm_vblank_init(dev, n);
|
||||
if (ret)
|
||||
goto err_kms;
|
||||
|
||||
ret = drm_irq_install(dev);
|
||||
if (ret)
|
||||
goto err_kms;
|
||||
|
||||
dev->vblank_disable_allowed = 1;
|
||||
|
||||
ret = armada_fbdev_init(dev);
|
||||
if (ret)
|
||||
goto err_irq;
|
||||
|
||||
drm_kms_helper_poll_init(dev);
|
||||
|
||||
return 0;
|
||||
|
||||
err_irq:
|
||||
drm_irq_uninstall(dev);
|
||||
err_kms:
|
||||
drm_mode_config_cleanup(dev);
|
||||
drm_mm_takedown(&priv->linear);
|
||||
flush_work(&priv->fb_unref_work);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int armada_drm_unload(struct drm_device *dev)
|
||||
{
|
||||
struct armada_private *priv = dev->dev_private;
|
||||
|
||||
drm_kms_helper_poll_fini(dev);
|
||||
armada_fbdev_fini(dev);
|
||||
drm_irq_uninstall(dev);
|
||||
drm_mode_config_cleanup(dev);
|
||||
drm_mm_takedown(&priv->linear);
|
||||
flush_work(&priv->fb_unref_work);
|
||||
dev->dev_private = NULL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void armada_drm_vbl_event_add(struct armada_crtc *dcrtc,
|
||||
struct armada_vbl_event *evt)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&dcrtc->irq_lock, flags);
|
||||
if (list_empty(&evt->node)) {
|
||||
list_add_tail(&evt->node, &dcrtc->vbl_list);
|
||||
|
||||
drm_vblank_get(dcrtc->crtc.dev, dcrtc->num);
|
||||
}
|
||||
spin_unlock_irqrestore(&dcrtc->irq_lock, flags);
|
||||
}
|
||||
|
||||
void armada_drm_vbl_event_remove(struct armada_crtc *dcrtc,
|
||||
struct armada_vbl_event *evt)
|
||||
{
|
||||
if (!list_empty(&evt->node)) {
|
||||
list_del_init(&evt->node);
|
||||
drm_vblank_put(dcrtc->crtc.dev, dcrtc->num);
|
||||
}
|
||||
}
|
||||
|
||||
void armada_drm_vbl_event_remove_unlocked(struct armada_crtc *dcrtc,
|
||||
struct armada_vbl_event *evt)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&dcrtc->irq_lock, flags);
|
||||
armada_drm_vbl_event_remove(dcrtc, evt);
|
||||
spin_unlock_irqrestore(&dcrtc->irq_lock, flags);
|
||||
}
|
||||
|
||||
/* These are called under the vbl_lock. */
|
||||
static int armada_drm_enable_vblank(struct drm_device *dev, int crtc)
|
||||
{
|
||||
struct armada_private *priv = dev->dev_private;
|
||||
armada_drm_crtc_enable_irq(priv->dcrtc[crtc], VSYNC_IRQ_ENA);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void armada_drm_disable_vblank(struct drm_device *dev, int crtc)
|
||||
{
|
||||
struct armada_private *priv = dev->dev_private;
|
||||
armada_drm_crtc_disable_irq(priv->dcrtc[crtc], VSYNC_IRQ_ENA);
|
||||
}
|
||||
|
||||
static irqreturn_t armada_drm_irq_handler(int irq, void *arg)
|
||||
{
|
||||
struct drm_device *dev = arg;
|
||||
struct armada_private *priv = dev->dev_private;
|
||||
struct armada_crtc *dcrtc = priv->dcrtc[0];
|
||||
uint32_t v, stat = readl_relaxed(dcrtc->base + LCD_SPU_IRQ_ISR);
|
||||
irqreturn_t handled = IRQ_NONE;
|
||||
|
||||
/*
|
||||
* This is rediculous - rather than writing bits to clear, we
|
||||
* have to set the actual status register value. This is racy.
|
||||
*/
|
||||
writel_relaxed(0, dcrtc->base + LCD_SPU_IRQ_ISR);
|
||||
|
||||
/* Mask out those interrupts we haven't enabled */
|
||||
v = stat & dcrtc->irq_ena;
|
||||
|
||||
if (v & (VSYNC_IRQ|GRA_FRAME_IRQ|DUMB_FRAMEDONE)) {
|
||||
armada_drm_crtc_irq(dcrtc, stat);
|
||||
handled = IRQ_HANDLED;
|
||||
}
|
||||
|
||||
return handled;
|
||||
}
|
||||
|
||||
static int armada_drm_irq_postinstall(struct drm_device *dev)
|
||||
{
|
||||
struct armada_private *priv = dev->dev_private;
|
||||
struct armada_crtc *dcrtc = priv->dcrtc[0];
|
||||
|
||||
spin_lock_irq(&dev->vbl_lock);
|
||||
writel_relaxed(dcrtc->irq_ena, dcrtc->base + LCD_SPU_IRQ_ENA);
|
||||
writel(0, dcrtc->base + LCD_SPU_IRQ_ISR);
|
||||
spin_unlock_irq(&dev->vbl_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void armada_drm_irq_uninstall(struct drm_device *dev)
|
||||
{
|
||||
struct armada_private *priv = dev->dev_private;
|
||||
struct armada_crtc *dcrtc = priv->dcrtc[0];
|
||||
|
||||
writel(0, dcrtc->base + LCD_SPU_IRQ_ENA);
|
||||
}
|
||||
|
||||
static struct drm_ioctl_desc armada_ioctls[] = {
|
||||
DRM_IOCTL_DEF_DRV(ARMADA_GEM_CREATE, armada_gem_create_ioctl,
|
||||
DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF_DRV(ARMADA_GEM_MMAP, armada_gem_mmap_ioctl,
|
||||
DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF_DRV(ARMADA_GEM_PWRITE, armada_gem_pwrite_ioctl,
|
||||
DRM_UNLOCKED),
|
||||
};
|
||||
|
||||
static const struct file_operations armada_drm_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.llseek = no_llseek,
|
||||
.read = drm_read,
|
||||
.poll = drm_poll,
|
||||
.unlocked_ioctl = drm_ioctl,
|
||||
.mmap = drm_gem_mmap,
|
||||
.open = drm_open,
|
||||
.release = drm_release,
|
||||
};
|
||||
|
||||
static struct drm_driver armada_drm_driver = {
|
||||
.load = armada_drm_load,
|
||||
.open = NULL,
|
||||
.preclose = NULL,
|
||||
.postclose = NULL,
|
||||
.lastclose = NULL,
|
||||
.unload = armada_drm_unload,
|
||||
.get_vblank_counter = drm_vblank_count,
|
||||
.enable_vblank = armada_drm_enable_vblank,
|
||||
.disable_vblank = armada_drm_disable_vblank,
|
||||
.irq_handler = armada_drm_irq_handler,
|
||||
.irq_postinstall = armada_drm_irq_postinstall,
|
||||
.irq_uninstall = armada_drm_irq_uninstall,
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
.debugfs_init = armada_drm_debugfs_init,
|
||||
.debugfs_cleanup = armada_drm_debugfs_cleanup,
|
||||
#endif
|
||||
.gem_free_object = armada_gem_free_object,
|
||||
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
|
||||
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
|
||||
.gem_prime_export = armada_gem_prime_export,
|
||||
.gem_prime_import = armada_gem_prime_import,
|
||||
.dumb_create = armada_gem_dumb_create,
|
||||
.dumb_map_offset = armada_gem_dumb_map_offset,
|
||||
.dumb_destroy = armada_gem_dumb_destroy,
|
||||
.gem_vm_ops = &armada_gem_vm_ops,
|
||||
.major = 1,
|
||||
.minor = 0,
|
||||
.name = "armada-drm",
|
||||
.desc = "Armada SoC DRM",
|
||||
.date = "20120730",
|
||||
.driver_features = DRIVER_GEM | DRIVER_MODESET |
|
||||
DRIVER_HAVE_IRQ | DRIVER_PRIME,
|
||||
.ioctls = armada_ioctls,
|
||||
.fops = &armada_drm_fops,
|
||||
};
|
||||
|
||||
static int armada_drm_probe(struct platform_device *pdev)
|
||||
{
|
||||
return drm_platform_init(&armada_drm_driver, pdev);
|
||||
}
|
||||
|
||||
static int armada_drm_remove(struct platform_device *pdev)
|
||||
{
|
||||
drm_platform_exit(&armada_drm_driver, pdev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct platform_device_id armada_drm_platform_ids[] = {
|
||||
{
|
||||
.name = "armada-drm",
|
||||
.driver_data = (unsigned long)&armada510_ops,
|
||||
}, {
|
||||
.name = "armada-510-drm",
|
||||
.driver_data = (unsigned long)&armada510_ops,
|
||||
},
|
||||
{ },
|
||||
};
|
||||
MODULE_DEVICE_TABLE(platform, armada_drm_platform_ids);
|
||||
|
||||
static struct platform_driver armada_drm_platform_driver = {
|
||||
.probe = armada_drm_probe,
|
||||
.remove = armada_drm_remove,
|
||||
.driver = {
|
||||
.name = "armada-drm",
|
||||
.owner = THIS_MODULE,
|
||||
},
|
||||
.id_table = armada_drm_platform_ids,
|
||||
};
|
||||
|
||||
static int __init armada_drm_init(void)
|
||||
{
|
||||
armada_drm_driver.num_ioctls = DRM_ARRAY_SIZE(armada_ioctls);
|
||||
return platform_driver_register(&armada_drm_platform_driver);
|
||||
}
|
||||
module_init(armada_drm_init);
|
||||
|
||||
static void __exit armada_drm_exit(void)
|
||||
{
|
||||
platform_driver_unregister(&armada_drm_platform_driver);
|
||||
}
|
||||
module_exit(armada_drm_exit);
|
||||
|
||||
MODULE_AUTHOR("Russell King <rmk+kernel@arm.linux.org.uk>");
|
||||
MODULE_DESCRIPTION("Armada DRM Driver");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_ALIAS("platform:armada-drm");
|
170
drivers/gpu/drm/armada/armada_fb.c
Normal file
170
drivers/gpu/drm/armada/armada_fb.c
Normal file
@ -0,0 +1,170 @@
|
||||
/*
|
||||
* Copyright (C) 2012 Russell King
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/drm_crtc_helper.h>
|
||||
#include <drm/drm_fb_helper.h>
|
||||
#include "armada_drm.h"
|
||||
#include "armada_fb.h"
|
||||
#include "armada_gem.h"
|
||||
#include "armada_hw.h"
|
||||
|
||||
static void armada_fb_destroy(struct drm_framebuffer *fb)
|
||||
{
|
||||
struct armada_framebuffer *dfb = drm_fb_to_armada_fb(fb);
|
||||
|
||||
drm_framebuffer_cleanup(&dfb->fb);
|
||||
drm_gem_object_unreference_unlocked(&dfb->obj->obj);
|
||||
kfree(dfb);
|
||||
}
|
||||
|
||||
static int armada_fb_create_handle(struct drm_framebuffer *fb,
|
||||
struct drm_file *dfile, unsigned int *handle)
|
||||
{
|
||||
struct armada_framebuffer *dfb = drm_fb_to_armada_fb(fb);
|
||||
return drm_gem_handle_create(dfile, &dfb->obj->obj, handle);
|
||||
}
|
||||
|
||||
static const struct drm_framebuffer_funcs armada_fb_funcs = {
|
||||
.destroy = armada_fb_destroy,
|
||||
.create_handle = armada_fb_create_handle,
|
||||
};
|
||||
|
||||
struct armada_framebuffer *armada_framebuffer_create(struct drm_device *dev,
|
||||
struct drm_mode_fb_cmd2 *mode, struct armada_gem_object *obj)
|
||||
{
|
||||
struct armada_framebuffer *dfb;
|
||||
uint8_t format, config;
|
||||
int ret;
|
||||
|
||||
switch (mode->pixel_format) {
|
||||
#define FMT(drm, fmt, mod) \
|
||||
case DRM_FORMAT_##drm: \
|
||||
format = CFG_##fmt; \
|
||||
config = mod; \
|
||||
break
|
||||
FMT(RGB565, 565, CFG_SWAPRB);
|
||||
FMT(BGR565, 565, 0);
|
||||
FMT(ARGB1555, 1555, CFG_SWAPRB);
|
||||
FMT(ABGR1555, 1555, 0);
|
||||
FMT(RGB888, 888PACK, CFG_SWAPRB);
|
||||
FMT(BGR888, 888PACK, 0);
|
||||
FMT(XRGB8888, X888, CFG_SWAPRB);
|
||||
FMT(XBGR8888, X888, 0);
|
||||
FMT(ARGB8888, 8888, CFG_SWAPRB);
|
||||
FMT(ABGR8888, 8888, 0);
|
||||
FMT(YUYV, 422PACK, CFG_YUV2RGB | CFG_SWAPYU | CFG_SWAPUV);
|
||||
FMT(UYVY, 422PACK, CFG_YUV2RGB);
|
||||
FMT(VYUY, 422PACK, CFG_YUV2RGB | CFG_SWAPUV);
|
||||
FMT(YVYU, 422PACK, CFG_YUV2RGB | CFG_SWAPYU);
|
||||
FMT(YUV422, 422, CFG_YUV2RGB);
|
||||
FMT(YVU422, 422, CFG_YUV2RGB | CFG_SWAPUV);
|
||||
FMT(YUV420, 420, CFG_YUV2RGB);
|
||||
FMT(YVU420, 420, CFG_YUV2RGB | CFG_SWAPUV);
|
||||
FMT(C8, PSEUDO8, 0);
|
||||
#undef FMT
|
||||
default:
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
dfb = kzalloc(sizeof(*dfb), GFP_KERNEL);
|
||||
if (!dfb) {
|
||||
DRM_ERROR("failed to allocate Armada fb object\n");
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
dfb->fmt = format;
|
||||
dfb->mod = config;
|
||||
dfb->obj = obj;
|
||||
|
||||
drm_helper_mode_fill_fb_struct(&dfb->fb, mode);
|
||||
|
||||
ret = drm_framebuffer_init(dev, &dfb->fb, &armada_fb_funcs);
|
||||
if (ret) {
|
||||
kfree(dfb);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
/*
|
||||
* Take a reference on our object as we're successful - the
|
||||
* caller already holds a reference, which keeps us safe for
|
||||
* the above call, but the caller will drop their reference
|
||||
* to it. Hence we need to take our own reference.
|
||||
*/
|
||||
drm_gem_object_reference(&obj->obj);
|
||||
|
||||
return dfb;
|
||||
}
|
||||
|
||||
static struct drm_framebuffer *armada_fb_create(struct drm_device *dev,
|
||||
struct drm_file *dfile, struct drm_mode_fb_cmd2 *mode)
|
||||
{
|
||||
struct armada_gem_object *obj;
|
||||
struct armada_framebuffer *dfb;
|
||||
int ret;
|
||||
|
||||
DRM_DEBUG_DRIVER("w%u h%u pf%08x f%u p%u,%u,%u\n",
|
||||
mode->width, mode->height, mode->pixel_format,
|
||||
mode->flags, mode->pitches[0], mode->pitches[1],
|
||||
mode->pitches[2]);
|
||||
|
||||
/* We can only handle a single plane at the moment */
|
||||
if (drm_format_num_planes(mode->pixel_format) > 1 &&
|
||||
(mode->handles[0] != mode->handles[1] ||
|
||||
mode->handles[0] != mode->handles[2])) {
|
||||
ret = -EINVAL;
|
||||
goto err;
|
||||
}
|
||||
|
||||
obj = armada_gem_object_lookup(dev, dfile, mode->handles[0]);
|
||||
if (!obj) {
|
||||
ret = -ENOENT;
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (obj->obj.import_attach && !obj->sgt) {
|
||||
ret = armada_gem_map_import(obj);
|
||||
if (ret)
|
||||
goto err_unref;
|
||||
}
|
||||
|
||||
/* Framebuffer objects must have a valid device address for scanout */
|
||||
if (obj->dev_addr == DMA_ERROR_CODE) {
|
||||
ret = -EINVAL;
|
||||
goto err_unref;
|
||||
}
|
||||
|
||||
dfb = armada_framebuffer_create(dev, mode, obj);
|
||||
if (IS_ERR(dfb)) {
|
||||
ret = PTR_ERR(dfb);
|
||||
goto err;
|
||||
}
|
||||
|
||||
drm_gem_object_unreference_unlocked(&obj->obj);
|
||||
|
||||
return &dfb->fb;
|
||||
|
||||
err_unref:
|
||||
drm_gem_object_unreference_unlocked(&obj->obj);
|
||||
err:
|
||||
DRM_ERROR("failed to initialize framebuffer: %d\n", ret);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
static void armada_output_poll_changed(struct drm_device *dev)
|
||||
{
|
||||
struct armada_private *priv = dev->dev_private;
|
||||
struct drm_fb_helper *fbh = priv->fbdev;
|
||||
|
||||
if (fbh)
|
||||
drm_fb_helper_hotplug_event(fbh);
|
||||
}
|
||||
|
||||
const struct drm_mode_config_funcs armada_drm_mode_config_funcs = {
|
||||
.fb_create = armada_fb_create,
|
||||
.output_poll_changed = armada_output_poll_changed,
|
||||
};
|
24
drivers/gpu/drm/armada/armada_fb.h
Normal file
24
drivers/gpu/drm/armada/armada_fb.h
Normal file
@ -0,0 +1,24 @@
|
||||
/*
|
||||
* Copyright (C) 2012 Russell King
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
#ifndef ARMADA_FB_H
|
||||
#define ARMADA_FB_H
|
||||
|
||||
struct armada_framebuffer {
|
||||
struct drm_framebuffer fb;
|
||||
struct armada_gem_object *obj;
|
||||
uint8_t fmt;
|
||||
uint8_t mod;
|
||||
};
|
||||
#define drm_fb_to_armada_fb(dfb) \
|
||||
container_of(dfb, struct armada_framebuffer, fb)
|
||||
#define drm_fb_obj(fb) drm_fb_to_armada_fb(fb)->obj
|
||||
|
||||
struct armada_framebuffer *armada_framebuffer_create(struct drm_device *,
|
||||
struct drm_mode_fb_cmd2 *, struct armada_gem_object *);
|
||||
|
||||
#endif
|
202
drivers/gpu/drm/armada/armada_fbdev.c
Normal file
202
drivers/gpu/drm/armada/armada_fbdev.c
Normal file
@ -0,0 +1,202 @@
|
||||
/*
|
||||
* Copyright (C) 2012 Russell King
|
||||
* Written from the i915 driver.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
#include <linux/errno.h>
|
||||
#include <linux/fb.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/drm_fb_helper.h>
|
||||
#include "armada_crtc.h"
|
||||
#include "armada_drm.h"
|
||||
#include "armada_fb.h"
|
||||
#include "armada_gem.h"
|
||||
|
||||
static /*const*/ struct fb_ops armada_fb_ops = {
|
||||
.owner = THIS_MODULE,
|
||||
.fb_check_var = drm_fb_helper_check_var,
|
||||
.fb_set_par = drm_fb_helper_set_par,
|
||||
.fb_fillrect = cfb_fillrect,
|
||||
.fb_copyarea = cfb_copyarea,
|
||||
.fb_imageblit = cfb_imageblit,
|
||||
.fb_pan_display = drm_fb_helper_pan_display,
|
||||
.fb_blank = drm_fb_helper_blank,
|
||||
.fb_setcmap = drm_fb_helper_setcmap,
|
||||
.fb_debug_enter = drm_fb_helper_debug_enter,
|
||||
.fb_debug_leave = drm_fb_helper_debug_leave,
|
||||
};
|
||||
|
||||
static int armada_fb_create(struct drm_fb_helper *fbh,
|
||||
struct drm_fb_helper_surface_size *sizes)
|
||||
{
|
||||
struct drm_device *dev = fbh->dev;
|
||||
struct drm_mode_fb_cmd2 mode;
|
||||
struct armada_framebuffer *dfb;
|
||||
struct armada_gem_object *obj;
|
||||
struct fb_info *info;
|
||||
int size, ret;
|
||||
void *ptr;
|
||||
|
||||
memset(&mode, 0, sizeof(mode));
|
||||
mode.width = sizes->surface_width;
|
||||
mode.height = sizes->surface_height;
|
||||
mode.pitches[0] = armada_pitch(mode.width, sizes->surface_bpp);
|
||||
mode.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
|
||||
sizes->surface_depth);
|
||||
|
||||
size = mode.pitches[0] * mode.height;
|
||||
obj = armada_gem_alloc_private_object(dev, size);
|
||||
if (!obj) {
|
||||
DRM_ERROR("failed to allocate fb memory\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ret = armada_gem_linear_back(dev, obj);
|
||||
if (ret) {
|
||||
drm_gem_object_unreference_unlocked(&obj->obj);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ptr = armada_gem_map_object(dev, obj);
|
||||
if (!ptr) {
|
||||
drm_gem_object_unreference_unlocked(&obj->obj);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
dfb = armada_framebuffer_create(dev, &mode, obj);
|
||||
|
||||
/*
|
||||
* A reference is now held by the framebuffer object if
|
||||
* successful, otherwise this drops the ref for the error path.
|
||||
*/
|
||||
drm_gem_object_unreference_unlocked(&obj->obj);
|
||||
|
||||
if (IS_ERR(dfb))
|
||||
return PTR_ERR(dfb);
|
||||
|
||||
info = framebuffer_alloc(0, dev->dev);
|
||||
if (!info) {
|
||||
ret = -ENOMEM;
|
||||
goto err_fballoc;
|
||||
}
|
||||
|
||||
ret = fb_alloc_cmap(&info->cmap, 256, 0);
|
||||
if (ret) {
|
||||
ret = -ENOMEM;
|
||||
goto err_fbcmap;
|
||||
}
|
||||
|
||||
strlcpy(info->fix.id, "armada-drmfb", sizeof(info->fix.id));
|
||||
info->par = fbh;
|
||||
info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
|
||||
info->fbops = &armada_fb_ops;
|
||||
info->fix.smem_start = obj->phys_addr;
|
||||
info->fix.smem_len = obj->obj.size;
|
||||
info->screen_size = obj->obj.size;
|
||||
info->screen_base = ptr;
|
||||
fbh->fb = &dfb->fb;
|
||||
fbh->fbdev = info;
|
||||
drm_fb_helper_fill_fix(info, dfb->fb.pitches[0], dfb->fb.depth);
|
||||
drm_fb_helper_fill_var(info, fbh, sizes->fb_width, sizes->fb_height);
|
||||
|
||||
DRM_DEBUG_KMS("allocated %dx%d %dbpp fb: 0x%08x\n",
|
||||
dfb->fb.width, dfb->fb.height,
|
||||
dfb->fb.bits_per_pixel, obj->phys_addr);
|
||||
|
||||
return 0;
|
||||
|
||||
err_fbcmap:
|
||||
framebuffer_release(info);
|
||||
err_fballoc:
|
||||
dfb->fb.funcs->destroy(&dfb->fb);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int armada_fb_probe(struct drm_fb_helper *fbh,
|
||||
struct drm_fb_helper_surface_size *sizes)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (!fbh->fb) {
|
||||
ret = armada_fb_create(fbh, sizes);
|
||||
if (ret == 0)
|
||||
ret = 1;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct drm_fb_helper_funcs armada_fb_helper_funcs = {
|
||||
.gamma_set = armada_drm_crtc_gamma_set,
|
||||
.gamma_get = armada_drm_crtc_gamma_get,
|
||||
.fb_probe = armada_fb_probe,
|
||||
};
|
||||
|
||||
int armada_fbdev_init(struct drm_device *dev)
|
||||
{
|
||||
struct armada_private *priv = dev->dev_private;
|
||||
struct drm_fb_helper *fbh;
|
||||
int ret;
|
||||
|
||||
fbh = devm_kzalloc(dev->dev, sizeof(*fbh), GFP_KERNEL);
|
||||
if (!fbh)
|
||||
return -ENOMEM;
|
||||
|
||||
priv->fbdev = fbh;
|
||||
|
||||
fbh->funcs = &armada_fb_helper_funcs;
|
||||
|
||||
ret = drm_fb_helper_init(dev, fbh, 1, 1);
|
||||
if (ret) {
|
||||
DRM_ERROR("failed to initialize drm fb helper\n");
|
||||
goto err_fb_helper;
|
||||
}
|
||||
|
||||
ret = drm_fb_helper_single_add_all_connectors(fbh);
|
||||
if (ret) {
|
||||
DRM_ERROR("failed to add fb connectors\n");
|
||||
goto err_fb_setup;
|
||||
}
|
||||
|
||||
ret = drm_fb_helper_initial_config(fbh, 32);
|
||||
if (ret) {
|
||||
DRM_ERROR("failed to set initial config\n");
|
||||
goto err_fb_setup;
|
||||
}
|
||||
|
||||
return 0;
|
||||
err_fb_setup:
|
||||
drm_fb_helper_fini(fbh);
|
||||
err_fb_helper:
|
||||
priv->fbdev = NULL;
|
||||
return ret;
|
||||
}
|
||||
|
||||
void armada_fbdev_fini(struct drm_device *dev)
|
||||
{
|
||||
struct armada_private *priv = dev->dev_private;
|
||||
struct drm_fb_helper *fbh = priv->fbdev;
|
||||
|
||||
if (fbh) {
|
||||
struct fb_info *info = fbh->fbdev;
|
||||
|
||||
if (info) {
|
||||
unregister_framebuffer(info);
|
||||
if (info->cmap.len)
|
||||
fb_dealloc_cmap(&info->cmap);
|
||||
framebuffer_release(info);
|
||||
}
|
||||
|
||||
if (fbh->fb)
|
||||
fbh->fb->funcs->destroy(fbh->fb);
|
||||
|
||||
drm_fb_helper_fini(fbh);
|
||||
|
||||
priv->fbdev = NULL;
|
||||
}
|
||||
}
|
611
drivers/gpu/drm/armada/armada_gem.c
Normal file
611
drivers/gpu/drm/armada/armada_gem.c
Normal file
@ -0,0 +1,611 @@
|
||||
/*
|
||||
* Copyright (C) 2012 Russell King
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
#include <linux/dma-buf.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/shmem_fs.h>
|
||||
#include <drm/drmP.h>
|
||||
#include "armada_drm.h"
|
||||
#include "armada_gem.h"
|
||||
#include <drm/armada_drm.h>
|
||||
#include "armada_ioctlP.h"
|
||||
|
||||
static int armada_gem_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
|
||||
{
|
||||
struct armada_gem_object *obj = drm_to_armada_gem(vma->vm_private_data);
|
||||
unsigned long addr = (unsigned long)vmf->virtual_address;
|
||||
unsigned long pfn = obj->phys_addr >> PAGE_SHIFT;
|
||||
int ret;
|
||||
|
||||
pfn += (addr - vma->vm_start) >> PAGE_SHIFT;
|
||||
ret = vm_insert_pfn(vma, addr, pfn);
|
||||
|
||||
switch (ret) {
|
||||
case 0:
|
||||
case -EBUSY:
|
||||
return VM_FAULT_NOPAGE;
|
||||
case -ENOMEM:
|
||||
return VM_FAULT_OOM;
|
||||
default:
|
||||
return VM_FAULT_SIGBUS;
|
||||
}
|
||||
}
|
||||
|
||||
const struct vm_operations_struct armada_gem_vm_ops = {
|
||||
.fault = armada_gem_vm_fault,
|
||||
.open = drm_gem_vm_open,
|
||||
.close = drm_gem_vm_close,
|
||||
};
|
||||
|
||||
static size_t roundup_gem_size(size_t size)
|
||||
{
|
||||
return roundup(size, PAGE_SIZE);
|
||||
}
|
||||
|
||||
/* dev->struct_mutex is held here */
|
||||
void armada_gem_free_object(struct drm_gem_object *obj)
|
||||
{
|
||||
struct armada_gem_object *dobj = drm_to_armada_gem(obj);
|
||||
|
||||
DRM_DEBUG_DRIVER("release obj %p\n", dobj);
|
||||
|
||||
drm_gem_free_mmap_offset(&dobj->obj);
|
||||
|
||||
if (dobj->page) {
|
||||
/* page backed memory */
|
||||
unsigned int order = get_order(dobj->obj.size);
|
||||
__free_pages(dobj->page, order);
|
||||
} else if (dobj->linear) {
|
||||
/* linear backed memory */
|
||||
drm_mm_remove_node(dobj->linear);
|
||||
kfree(dobj->linear);
|
||||
if (dobj->addr)
|
||||
iounmap(dobj->addr);
|
||||
}
|
||||
|
||||
if (dobj->obj.import_attach) {
|
||||
/* We only ever display imported data */
|
||||
dma_buf_unmap_attachment(dobj->obj.import_attach, dobj->sgt,
|
||||
DMA_TO_DEVICE);
|
||||
drm_prime_gem_destroy(&dobj->obj, NULL);
|
||||
}
|
||||
|
||||
drm_gem_object_release(&dobj->obj);
|
||||
|
||||
kfree(dobj);
|
||||
}
|
||||
|
||||
int
|
||||
armada_gem_linear_back(struct drm_device *dev, struct armada_gem_object *obj)
|
||||
{
|
||||
struct armada_private *priv = dev->dev_private;
|
||||
size_t size = obj->obj.size;
|
||||
|
||||
if (obj->page || obj->linear)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* If it is a small allocation (typically cursor, which will
|
||||
* be 32x64 or 64x32 ARGB pixels) try to get it from the system.
|
||||
* Framebuffers will never be this small (our minimum size for
|
||||
* framebuffers is larger than this anyway.) Such objects are
|
||||
* only accessed by the CPU so we don't need any special handing
|
||||
* here.
|
||||
*/
|
||||
if (size <= 8192) {
|
||||
unsigned int order = get_order(size);
|
||||
struct page *p = alloc_pages(GFP_KERNEL, order);
|
||||
|
||||
if (p) {
|
||||
obj->addr = page_address(p);
|
||||
obj->phys_addr = page_to_phys(p);
|
||||
obj->page = p;
|
||||
|
||||
memset(obj->addr, 0, PAGE_ALIGN(size));
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* We could grab something from CMA if it's enabled, but that
|
||||
* involves building in a problem:
|
||||
*
|
||||
* CMA's interface uses dma_alloc_coherent(), which provides us
|
||||
* with an CPU virtual address and a device address.
|
||||
*
|
||||
* The CPU virtual address may be either an address in the kernel
|
||||
* direct mapped region (for example, as it would be on x86) or
|
||||
* it may be remapped into another part of kernel memory space
|
||||
* (eg, as it would be on ARM.) This means virt_to_phys() on the
|
||||
* returned virtual address is invalid depending on the architecture
|
||||
* implementation.
|
||||
*
|
||||
* The device address may also not be a physical address; it may
|
||||
* be that there is some kind of remapping between the device and
|
||||
* system RAM, which makes the use of the device address also
|
||||
* unsafe to re-use as a physical address.
|
||||
*
|
||||
* This makes DRM usage of dma_alloc_coherent() in a generic way
|
||||
* at best very questionable and unsafe.
|
||||
*/
|
||||
|
||||
/* Otherwise, grab it from our linear allocation */
|
||||
if (!obj->page) {
|
||||
struct drm_mm_node *node;
|
||||
unsigned align = min_t(unsigned, size, SZ_2M);
|
||||
void __iomem *ptr;
|
||||
int ret;
|
||||
|
||||
node = kzalloc(sizeof(*node), GFP_KERNEL);
|
||||
if (!node)
|
||||
return -ENOSPC;
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
ret = drm_mm_insert_node(&priv->linear, node, size, align,
|
||||
DRM_MM_SEARCH_DEFAULT);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
if (ret) {
|
||||
kfree(node);
|
||||
return ret;
|
||||
}
|
||||
|
||||
obj->linear = node;
|
||||
|
||||
/* Ensure that the memory we're returning is cleared. */
|
||||
ptr = ioremap_wc(obj->linear->start, size);
|
||||
if (!ptr) {
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
drm_mm_remove_node(obj->linear);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
kfree(obj->linear);
|
||||
obj->linear = NULL;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
memset_io(ptr, 0, size);
|
||||
iounmap(ptr);
|
||||
|
||||
obj->phys_addr = obj->linear->start;
|
||||
obj->dev_addr = obj->linear->start;
|
||||
}
|
||||
|
||||
DRM_DEBUG_DRIVER("obj %p phys %#x dev %#x\n",
|
||||
obj, obj->phys_addr, obj->dev_addr);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void *
|
||||
armada_gem_map_object(struct drm_device *dev, struct armada_gem_object *dobj)
|
||||
{
|
||||
/* only linear objects need to be ioremap'd */
|
||||
if (!dobj->addr && dobj->linear)
|
||||
dobj->addr = ioremap_wc(dobj->phys_addr, dobj->obj.size);
|
||||
return dobj->addr;
|
||||
}
|
||||
|
||||
struct armada_gem_object *
|
||||
armada_gem_alloc_private_object(struct drm_device *dev, size_t size)
|
||||
{
|
||||
struct armada_gem_object *obj;
|
||||
|
||||
size = roundup_gem_size(size);
|
||||
|
||||
obj = kzalloc(sizeof(*obj), GFP_KERNEL);
|
||||
if (!obj)
|
||||
return NULL;
|
||||
|
||||
drm_gem_private_object_init(dev, &obj->obj, size);
|
||||
obj->dev_addr = DMA_ERROR_CODE;
|
||||
|
||||
DRM_DEBUG_DRIVER("alloc private obj %p size %zu\n", obj, size);
|
||||
|
||||
return obj;
|
||||
}
|
||||
|
||||
struct armada_gem_object *armada_gem_alloc_object(struct drm_device *dev,
|
||||
size_t size)
|
||||
{
|
||||
struct armada_gem_object *obj;
|
||||
struct address_space *mapping;
|
||||
|
||||
size = roundup_gem_size(size);
|
||||
|
||||
obj = kzalloc(sizeof(*obj), GFP_KERNEL);
|
||||
if (!obj)
|
||||
return NULL;
|
||||
|
||||
if (drm_gem_object_init(dev, &obj->obj, size)) {
|
||||
kfree(obj);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
obj->dev_addr = DMA_ERROR_CODE;
|
||||
|
||||
mapping = obj->obj.filp->f_path.dentry->d_inode->i_mapping;
|
||||
mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE);
|
||||
|
||||
DRM_DEBUG_DRIVER("alloc obj %p size %zu\n", obj, size);
|
||||
|
||||
return obj;
|
||||
}
|
||||
|
||||
/* Dumb alloc support */
|
||||
int armada_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
|
||||
struct drm_mode_create_dumb *args)
|
||||
{
|
||||
struct armada_gem_object *dobj;
|
||||
u32 handle;
|
||||
size_t size;
|
||||
int ret;
|
||||
|
||||
args->pitch = armada_pitch(args->width, args->bpp);
|
||||
args->size = size = args->pitch * args->height;
|
||||
|
||||
dobj = armada_gem_alloc_private_object(dev, size);
|
||||
if (dobj == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = armada_gem_linear_back(dev, dobj);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
ret = drm_gem_handle_create(file, &dobj->obj, &handle);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
args->handle = handle;
|
||||
|
||||
/* drop reference from allocate - handle holds it now */
|
||||
DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle);
|
||||
err:
|
||||
drm_gem_object_unreference_unlocked(&dobj->obj);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int armada_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
|
||||
uint32_t handle, uint64_t *offset)
|
||||
{
|
||||
struct armada_gem_object *obj;
|
||||
int ret = 0;
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
obj = armada_gem_object_lookup(dev, file, handle);
|
||||
if (!obj) {
|
||||
DRM_ERROR("failed to lookup gem object\n");
|
||||
ret = -EINVAL;
|
||||
goto err_unlock;
|
||||
}
|
||||
|
||||
/* Don't allow imported objects to be mapped */
|
||||
if (obj->obj.import_attach) {
|
||||
ret = -EINVAL;
|
||||
goto err_unlock;
|
||||
}
|
||||
|
||||
ret = drm_gem_create_mmap_offset(&obj->obj);
|
||||
if (ret == 0) {
|
||||
*offset = drm_vma_node_offset_addr(&obj->obj.vma_node);
|
||||
DRM_DEBUG_DRIVER("handle %#x offset %llx\n", handle, *offset);
|
||||
}
|
||||
|
||||
drm_gem_object_unreference(&obj->obj);
|
||||
err_unlock:
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int armada_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
|
||||
uint32_t handle)
|
||||
{
|
||||
return drm_gem_handle_delete(file, handle);
|
||||
}
|
||||
|
||||
/* Private driver gem ioctls */
|
||||
int armada_gem_create_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file)
|
||||
{
|
||||
struct drm_armada_gem_create *args = data;
|
||||
struct armada_gem_object *dobj;
|
||||
size_t size;
|
||||
u32 handle;
|
||||
int ret;
|
||||
|
||||
if (args->size == 0)
|
||||
return -ENOMEM;
|
||||
|
||||
size = args->size;
|
||||
|
||||
dobj = armada_gem_alloc_object(dev, size);
|
||||
if (dobj == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = drm_gem_handle_create(file, &dobj->obj, &handle);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
args->handle = handle;
|
||||
|
||||
/* drop reference from allocate - handle holds it now */
|
||||
DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle);
|
||||
err:
|
||||
drm_gem_object_unreference_unlocked(&dobj->obj);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Map a shmem-backed object into process memory space */
|
||||
int armada_gem_mmap_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file)
|
||||
{
|
||||
struct drm_armada_gem_mmap *args = data;
|
||||
struct armada_gem_object *dobj;
|
||||
unsigned long addr;
|
||||
|
||||
dobj = armada_gem_object_lookup(dev, file, args->handle);
|
||||
if (dobj == NULL)
|
||||
return -ENOENT;
|
||||
|
||||
if (!dobj->obj.filp) {
|
||||
drm_gem_object_unreference(&dobj->obj);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
addr = vm_mmap(dobj->obj.filp, 0, args->size, PROT_READ | PROT_WRITE,
|
||||
MAP_SHARED, args->offset);
|
||||
drm_gem_object_unreference(&dobj->obj);
|
||||
if (IS_ERR_VALUE(addr))
|
||||
return addr;
|
||||
|
||||
args->addr = addr;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int armada_gem_pwrite_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file)
|
||||
{
|
||||
struct drm_armada_gem_pwrite *args = data;
|
||||
struct armada_gem_object *dobj;
|
||||
char __user *ptr;
|
||||
int ret;
|
||||
|
||||
DRM_DEBUG_DRIVER("handle %u off %u size %u ptr 0x%llx\n",
|
||||
args->handle, args->offset, args->size, args->ptr);
|
||||
|
||||
if (args->size == 0)
|
||||
return 0;
|
||||
|
||||
ptr = (char __user *)(uintptr_t)args->ptr;
|
||||
|
||||
if (!access_ok(VERIFY_READ, ptr, args->size))
|
||||
return -EFAULT;
|
||||
|
||||
ret = fault_in_multipages_readable(ptr, args->size);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
dobj = armada_gem_object_lookup(dev, file, args->handle);
|
||||
if (dobj == NULL)
|
||||
return -ENOENT;
|
||||
|
||||
/* Must be a kernel-mapped object */
|
||||
if (!dobj->addr)
|
||||
return -EINVAL;
|
||||
|
||||
if (args->offset > dobj->obj.size ||
|
||||
args->size > dobj->obj.size - args->offset) {
|
||||
DRM_ERROR("invalid size: object size %u\n", dobj->obj.size);
|
||||
ret = -EINVAL;
|
||||
goto unref;
|
||||
}
|
||||
|
||||
if (copy_from_user(dobj->addr + args->offset, ptr, args->size)) {
|
||||
ret = -EFAULT;
|
||||
} else if (dobj->update) {
|
||||
dobj->update(dobj->update_data);
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
unref:
|
||||
drm_gem_object_unreference_unlocked(&dobj->obj);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Prime support */
|
||||
struct sg_table *
|
||||
armada_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
struct drm_gem_object *obj = attach->dmabuf->priv;
|
||||
struct armada_gem_object *dobj = drm_to_armada_gem(obj);
|
||||
struct scatterlist *sg;
|
||||
struct sg_table *sgt;
|
||||
int i, num;
|
||||
|
||||
sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
|
||||
if (!sgt)
|
||||
return NULL;
|
||||
|
||||
if (dobj->obj.filp) {
|
||||
struct address_space *mapping;
|
||||
gfp_t gfp;
|
||||
int count;
|
||||
|
||||
count = dobj->obj.size / PAGE_SIZE;
|
||||
if (sg_alloc_table(sgt, count, GFP_KERNEL))
|
||||
goto free_sgt;
|
||||
|
||||
mapping = file_inode(dobj->obj.filp)->i_mapping;
|
||||
gfp = mapping_gfp_mask(mapping);
|
||||
|
||||
for_each_sg(sgt->sgl, sg, count, i) {
|
||||
struct page *page;
|
||||
|
||||
page = shmem_read_mapping_page_gfp(mapping, i, gfp);
|
||||
if (IS_ERR(page)) {
|
||||
num = i;
|
||||
goto release;
|
||||
}
|
||||
|
||||
sg_set_page(sg, page, PAGE_SIZE, 0);
|
||||
}
|
||||
|
||||
if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0) {
|
||||
num = sgt->nents;
|
||||
goto release;
|
||||
}
|
||||
} else if (dobj->page) {
|
||||
/* Single contiguous page */
|
||||
if (sg_alloc_table(sgt, 1, GFP_KERNEL))
|
||||
goto free_sgt;
|
||||
|
||||
sg_set_page(sgt->sgl, dobj->page, dobj->obj.size, 0);
|
||||
|
||||
if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0)
|
||||
goto free_table;
|
||||
} else if (dobj->linear) {
|
||||
/* Single contiguous physical region - no struct page */
|
||||
if (sg_alloc_table(sgt, 1, GFP_KERNEL))
|
||||
goto free_sgt;
|
||||
sg_dma_address(sgt->sgl) = dobj->dev_addr;
|
||||
sg_dma_len(sgt->sgl) = dobj->obj.size;
|
||||
} else {
|
||||
goto free_sgt;
|
||||
}
|
||||
return sgt;
|
||||
|
||||
release:
|
||||
for_each_sg(sgt->sgl, sg, num, i)
|
||||
page_cache_release(sg_page(sg));
|
||||
free_table:
|
||||
sg_free_table(sgt);
|
||||
free_sgt:
|
||||
kfree(sgt);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void armada_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
|
||||
struct sg_table *sgt, enum dma_data_direction dir)
|
||||
{
|
||||
struct drm_gem_object *obj = attach->dmabuf->priv;
|
||||
struct armada_gem_object *dobj = drm_to_armada_gem(obj);
|
||||
int i;
|
||||
|
||||
if (!dobj->linear)
|
||||
dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
|
||||
|
||||
if (dobj->obj.filp) {
|
||||
struct scatterlist *sg;
|
||||
for_each_sg(sgt->sgl, sg, sgt->nents, i)
|
||||
page_cache_release(sg_page(sg));
|
||||
}
|
||||
|
||||
sg_free_table(sgt);
|
||||
kfree(sgt);
|
||||
}
|
||||
|
||||
static void *armada_gem_dmabuf_no_kmap(struct dma_buf *buf, unsigned long n)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void
|
||||
armada_gem_dmabuf_no_kunmap(struct dma_buf *buf, unsigned long n, void *addr)
|
||||
{
|
||||
}
|
||||
|
||||
static int
|
||||
armada_gem_dmabuf_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static const struct dma_buf_ops armada_gem_prime_dmabuf_ops = {
|
||||
.map_dma_buf = armada_gem_prime_map_dma_buf,
|
||||
.unmap_dma_buf = armada_gem_prime_unmap_dma_buf,
|
||||
.release = drm_gem_dmabuf_release,
|
||||
.kmap_atomic = armada_gem_dmabuf_no_kmap,
|
||||
.kunmap_atomic = armada_gem_dmabuf_no_kunmap,
|
||||
.kmap = armada_gem_dmabuf_no_kmap,
|
||||
.kunmap = armada_gem_dmabuf_no_kunmap,
|
||||
.mmap = armada_gem_dmabuf_mmap,
|
||||
};
|
||||
|
||||
struct dma_buf *
|
||||
armada_gem_prime_export(struct drm_device *dev, struct drm_gem_object *obj,
|
||||
int flags)
|
||||
{
|
||||
return dma_buf_export(obj, &armada_gem_prime_dmabuf_ops, obj->size,
|
||||
O_RDWR);
|
||||
}
|
||||
|
||||
struct drm_gem_object *
|
||||
armada_gem_prime_import(struct drm_device *dev, struct dma_buf *buf)
|
||||
{
|
||||
struct dma_buf_attachment *attach;
|
||||
struct armada_gem_object *dobj;
|
||||
|
||||
if (buf->ops == &armada_gem_prime_dmabuf_ops) {
|
||||
struct drm_gem_object *obj = buf->priv;
|
||||
if (obj->dev == dev) {
|
||||
/*
|
||||
* Importing our own dmabuf(s) increases the
|
||||
* refcount on the gem object itself.
|
||||
*/
|
||||
drm_gem_object_reference(obj);
|
||||
dma_buf_put(buf);
|
||||
return obj;
|
||||
}
|
||||
}
|
||||
|
||||
attach = dma_buf_attach(buf, dev->dev);
|
||||
if (IS_ERR(attach))
|
||||
return ERR_CAST(attach);
|
||||
|
||||
dobj = armada_gem_alloc_private_object(dev, buf->size);
|
||||
if (!dobj) {
|
||||
dma_buf_detach(buf, attach);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
dobj->obj.import_attach = attach;
|
||||
|
||||
/*
|
||||
* Don't call dma_buf_map_attachment() here - it maps the
|
||||
* scatterlist immediately for DMA, and this is not always
|
||||
* an appropriate thing to do.
|
||||
*/
|
||||
return &dobj->obj;
|
||||
}
|
||||
|
||||
int armada_gem_map_import(struct armada_gem_object *dobj)
|
||||
{
|
||||
int ret;
|
||||
|
||||
dobj->sgt = dma_buf_map_attachment(dobj->obj.import_attach,
|
||||
DMA_TO_DEVICE);
|
||||
if (!dobj->sgt) {
|
||||
DRM_ERROR("dma_buf_map_attachment() returned NULL\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (IS_ERR(dobj->sgt)) {
|
||||
ret = PTR_ERR(dobj->sgt);
|
||||
dobj->sgt = NULL;
|
||||
DRM_ERROR("dma_buf_map_attachment() error: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
if (dobj->sgt->nents > 1) {
|
||||
DRM_ERROR("dma_buf_map_attachment() returned an (unsupported) scattered list\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (sg_dma_len(dobj->sgt->sgl) < dobj->obj.size) {
|
||||
DRM_ERROR("dma_buf_map_attachment() returned a small buffer\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
dobj->dev_addr = sg_dma_address(dobj->sgt->sgl);
|
||||
return 0;
|
||||
}
|
52
drivers/gpu/drm/armada/armada_gem.h
Normal file
52
drivers/gpu/drm/armada/armada_gem.h
Normal file
@ -0,0 +1,52 @@
|
||||
/*
|
||||
* Copyright (C) 2012 Russell King
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
#ifndef ARMADA_GEM_H
|
||||
#define ARMADA_GEM_H
|
||||
|
||||
/* GEM */
|
||||
struct armada_gem_object {
|
||||
struct drm_gem_object obj;
|
||||
void *addr;
|
||||
phys_addr_t phys_addr;
|
||||
resource_size_t dev_addr;
|
||||
struct drm_mm_node *linear; /* for linear backed */
|
||||
struct page *page; /* for page backed */
|
||||
struct sg_table *sgt; /* for imported */
|
||||
void (*update)(void *);
|
||||
void *update_data;
|
||||
};
|
||||
|
||||
extern const struct vm_operations_struct armada_gem_vm_ops;
|
||||
|
||||
#define drm_to_armada_gem(o) container_of(o, struct armada_gem_object, obj)
|
||||
|
||||
void armada_gem_free_object(struct drm_gem_object *);
|
||||
int armada_gem_linear_back(struct drm_device *, struct armada_gem_object *);
|
||||
void *armada_gem_map_object(struct drm_device *, struct armada_gem_object *);
|
||||
struct armada_gem_object *armada_gem_alloc_private_object(struct drm_device *,
|
||||
size_t);
|
||||
int armada_gem_dumb_create(struct drm_file *, struct drm_device *,
|
||||
struct drm_mode_create_dumb *);
|
||||
int armada_gem_dumb_map_offset(struct drm_file *, struct drm_device *,
|
||||
uint32_t, uint64_t *);
|
||||
int armada_gem_dumb_destroy(struct drm_file *, struct drm_device *,
|
||||
uint32_t);
|
||||
struct dma_buf *armada_gem_prime_export(struct drm_device *dev,
|
||||
struct drm_gem_object *obj, int flags);
|
||||
struct drm_gem_object *armada_gem_prime_import(struct drm_device *,
|
||||
struct dma_buf *);
|
||||
int armada_gem_map_import(struct armada_gem_object *);
|
||||
|
||||
static inline struct armada_gem_object *armada_gem_object_lookup(
|
||||
struct drm_device *dev, struct drm_file *dfile, unsigned handle)
|
||||
{
|
||||
struct drm_gem_object *obj = drm_gem_object_lookup(dev, dfile, handle);
|
||||
|
||||
return obj ? drm_to_armada_gem(obj) : NULL;
|
||||
}
|
||||
#endif
|
318
drivers/gpu/drm/armada/armada_hw.h
Normal file
318
drivers/gpu/drm/armada/armada_hw.h
Normal file
@ -0,0 +1,318 @@
|
||||
/*
|
||||
* Copyright (C) 2012 Russell King
|
||||
* Rewritten from the dovefb driver, and Armada510 manuals.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
#ifndef ARMADA_HW_H
|
||||
#define ARMADA_HW_H
|
||||
|
||||
/*
|
||||
* Note: the following registers are written from IRQ context:
|
||||
* LCD_SPU_V_PORCH, LCD_SPU_ADV_REG, LCD_SPUT_V_H_TOTAL
|
||||
* LCD_SPU_DMA_START_ADDR_[YUV][01], LCD_SPU_DMA_PITCH_YC,
|
||||
* LCD_SPU_DMA_PITCH_UV, LCD_SPU_DMA_OVSA_HPXL_VLN,
|
||||
* LCD_SPU_DMA_HPXL_VLN, LCD_SPU_DZM_HPXL_VLN, LCD_SPU_DMA_CTRL0
|
||||
*/
|
||||
enum {
|
||||
LCD_SPU_ADV_REG = 0x0084, /* Armada 510 */
|
||||
LCD_SPU_DMA_START_ADDR_Y0 = 0x00c0,
|
||||
LCD_SPU_DMA_START_ADDR_U0 = 0x00c4,
|
||||
LCD_SPU_DMA_START_ADDR_V0 = 0x00c8,
|
||||
LCD_CFG_DMA_START_ADDR_0 = 0x00cc,
|
||||
LCD_SPU_DMA_START_ADDR_Y1 = 0x00d0,
|
||||
LCD_SPU_DMA_START_ADDR_U1 = 0x00d4,
|
||||
LCD_SPU_DMA_START_ADDR_V1 = 0x00d8,
|
||||
LCD_CFG_DMA_START_ADDR_1 = 0x00dc,
|
||||
LCD_SPU_DMA_PITCH_YC = 0x00e0,
|
||||
LCD_SPU_DMA_PITCH_UV = 0x00e4,
|
||||
LCD_SPU_DMA_OVSA_HPXL_VLN = 0x00e8,
|
||||
LCD_SPU_DMA_HPXL_VLN = 0x00ec,
|
||||
LCD_SPU_DZM_HPXL_VLN = 0x00f0,
|
||||
LCD_CFG_GRA_START_ADDR0 = 0x00f4,
|
||||
LCD_CFG_GRA_START_ADDR1 = 0x00f8,
|
||||
LCD_CFG_GRA_PITCH = 0x00fc,
|
||||
LCD_SPU_GRA_OVSA_HPXL_VLN = 0x0100,
|
||||
LCD_SPU_GRA_HPXL_VLN = 0x0104,
|
||||
LCD_SPU_GZM_HPXL_VLN = 0x0108,
|
||||
LCD_SPU_HWC_OVSA_HPXL_VLN = 0x010c,
|
||||
LCD_SPU_HWC_HPXL_VLN = 0x0110,
|
||||
LCD_SPUT_V_H_TOTAL = 0x0114,
|
||||
LCD_SPU_V_H_ACTIVE = 0x0118,
|
||||
LCD_SPU_H_PORCH = 0x011c,
|
||||
LCD_SPU_V_PORCH = 0x0120,
|
||||
LCD_SPU_BLANKCOLOR = 0x0124,
|
||||
LCD_SPU_ALPHA_COLOR1 = 0x0128,
|
||||
LCD_SPU_ALPHA_COLOR2 = 0x012c,
|
||||
LCD_SPU_COLORKEY_Y = 0x0130,
|
||||
LCD_SPU_COLORKEY_U = 0x0134,
|
||||
LCD_SPU_COLORKEY_V = 0x0138,
|
||||
LCD_CFG_RDREG4F = 0x013c, /* Armada 510 */
|
||||
LCD_SPU_SPI_RXDATA = 0x0140,
|
||||
LCD_SPU_ISA_RXDATA = 0x0144,
|
||||
LCD_SPU_HWC_RDDAT = 0x0158,
|
||||
LCD_SPU_GAMMA_RDDAT = 0x015c,
|
||||
LCD_SPU_PALETTE_RDDAT = 0x0160,
|
||||
LCD_SPU_IOPAD_IN = 0x0178,
|
||||
LCD_CFG_RDREG5F = 0x017c,
|
||||
LCD_SPU_SPI_CTRL = 0x0180,
|
||||
LCD_SPU_SPI_TXDATA = 0x0184,
|
||||
LCD_SPU_SMPN_CTRL = 0x0188,
|
||||
LCD_SPU_DMA_CTRL0 = 0x0190,
|
||||
LCD_SPU_DMA_CTRL1 = 0x0194,
|
||||
LCD_SPU_SRAM_CTRL = 0x0198,
|
||||
LCD_SPU_SRAM_WRDAT = 0x019c,
|
||||
LCD_SPU_SRAM_PARA0 = 0x01a0, /* Armada 510 */
|
||||
LCD_SPU_SRAM_PARA1 = 0x01a4,
|
||||
LCD_CFG_SCLK_DIV = 0x01a8,
|
||||
LCD_SPU_CONTRAST = 0x01ac,
|
||||
LCD_SPU_SATURATION = 0x01b0,
|
||||
LCD_SPU_CBSH_HUE = 0x01b4,
|
||||
LCD_SPU_DUMB_CTRL = 0x01b8,
|
||||
LCD_SPU_IOPAD_CONTROL = 0x01bc,
|
||||
LCD_SPU_IRQ_ENA = 0x01c0,
|
||||
LCD_SPU_IRQ_ISR = 0x01c4,
|
||||
};
|
||||
|
||||
/* For LCD_SPU_ADV_REG */
|
||||
enum {
|
||||
ADV_VSYNC_L_OFF = 0xfff << 20,
|
||||
ADV_GRACOLORKEY = 1 << 19,
|
||||
ADV_VIDCOLORKEY = 1 << 18,
|
||||
ADV_HWC32BLEND = 1 << 15,
|
||||
ADV_HWC32ARGB = 1 << 14,
|
||||
ADV_HWC32ENABLE = 1 << 13,
|
||||
ADV_VSYNCOFFEN = 1 << 12,
|
||||
ADV_VSYNC_H_OFF = 0xfff << 0,
|
||||
};
|
||||
|
||||
enum {
|
||||
CFG_565 = 0,
|
||||
CFG_1555 = 1,
|
||||
CFG_888PACK = 2,
|
||||
CFG_X888 = 3,
|
||||
CFG_8888 = 4,
|
||||
CFG_422PACK = 5,
|
||||
CFG_422 = 6,
|
||||
CFG_420 = 7,
|
||||
CFG_PSEUDO4 = 9,
|
||||
CFG_PSEUDO8 = 10,
|
||||
CFG_SWAPRB = 1 << 4,
|
||||
CFG_SWAPUV = 1 << 3,
|
||||
CFG_SWAPYU = 1 << 2,
|
||||
CFG_YUV2RGB = 1 << 1,
|
||||
};
|
||||
|
||||
/* For LCD_SPU_DMA_CTRL0 */
|
||||
enum {
|
||||
CFG_NOBLENDING = 1 << 31,
|
||||
CFG_GAMMA_ENA = 1 << 30,
|
||||
CFG_CBSH_ENA = 1 << 29,
|
||||
CFG_PALETTE_ENA = 1 << 28,
|
||||
CFG_ARBFAST_ENA = 1 << 27,
|
||||
CFG_HWC_1BITMOD = 1 << 26,
|
||||
CFG_HWC_1BITENA = 1 << 25,
|
||||
CFG_HWC_ENA = 1 << 24,
|
||||
CFG_DMAFORMAT = 0xf << 20,
|
||||
#define CFG_DMA_FMT(x) ((x) << 20)
|
||||
CFG_GRAFORMAT = 0xf << 16,
|
||||
#define CFG_GRA_FMT(x) ((x) << 16)
|
||||
#define CFG_GRA_MOD(x) ((x) << 8)
|
||||
CFG_GRA_FTOGGLE = 1 << 15,
|
||||
CFG_GRA_HSMOOTH = 1 << 14,
|
||||
CFG_GRA_TSTMODE = 1 << 13,
|
||||
CFG_GRA_ENA = 1 << 8,
|
||||
#define CFG_DMA_MOD(x) ((x) << 0)
|
||||
CFG_DMA_FTOGGLE = 1 << 7,
|
||||
CFG_DMA_HSMOOTH = 1 << 6,
|
||||
CFG_DMA_TSTMODE = 1 << 5,
|
||||
CFG_DMA_ENA = 1 << 0,
|
||||
};
|
||||
|
||||
enum {
|
||||
CKMODE_DISABLE = 0,
|
||||
CKMODE_Y = 1,
|
||||
CKMODE_U = 2,
|
||||
CKMODE_RGB = 3,
|
||||
CKMODE_V = 4,
|
||||
CKMODE_R = 5,
|
||||
CKMODE_G = 6,
|
||||
CKMODE_B = 7,
|
||||
};
|
||||
|
||||
/* For LCD_SPU_DMA_CTRL1 */
|
||||
enum {
|
||||
CFG_FRAME_TRIG = 1 << 31,
|
||||
CFG_VSYNC_INV = 1 << 27,
|
||||
CFG_CKMODE_MASK = 0x7 << 24,
|
||||
#define CFG_CKMODE(x) ((x) << 24)
|
||||
CFG_CARRY = 1 << 23,
|
||||
CFG_GATED_CLK = 1 << 21,
|
||||
CFG_PWRDN_ENA = 1 << 20,
|
||||
CFG_DSCALE_MASK = 0x3 << 18,
|
||||
CFG_DSCALE_NONE = 0x0 << 18,
|
||||
CFG_DSCALE_HALF = 0x1 << 18,
|
||||
CFG_DSCALE_QUAR = 0x2 << 18,
|
||||
CFG_ALPHAM_MASK = 0x3 << 16,
|
||||
CFG_ALPHAM_VIDEO = 0x0 << 16,
|
||||
CFG_ALPHAM_GRA = 0x1 << 16,
|
||||
CFG_ALPHAM_CFG = 0x2 << 16,
|
||||
CFG_ALPHA_MASK = 0xff << 8,
|
||||
CFG_PIXCMD_MASK = 0xff,
|
||||
};
|
||||
|
||||
/* For LCD_SPU_SRAM_CTRL */
|
||||
enum {
|
||||
SRAM_READ = 0 << 14,
|
||||
SRAM_WRITE = 2 << 14,
|
||||
SRAM_INIT = 3 << 14,
|
||||
SRAM_HWC32_RAM1 = 0xc << 8,
|
||||
SRAM_HWC32_RAM2 = 0xd << 8,
|
||||
SRAM_HWC32_RAMR = SRAM_HWC32_RAM1,
|
||||
SRAM_HWC32_RAMG = SRAM_HWC32_RAM2,
|
||||
SRAM_HWC32_RAMB = 0xe << 8,
|
||||
SRAM_HWC32_TRAN = 0xf << 8,
|
||||
SRAM_HWC = 0xf << 8,
|
||||
};
|
||||
|
||||
/* For LCD_SPU_SRAM_PARA1 */
|
||||
enum {
|
||||
CFG_CSB_256x32 = 1 << 15, /* cursor */
|
||||
CFG_CSB_256x24 = 1 << 14, /* palette */
|
||||
CFG_CSB_256x8 = 1 << 13, /* gamma */
|
||||
CFG_PDWN1920x32 = 1 << 8, /* Armada 510: power down vscale ram */
|
||||
CFG_PDWN256x32 = 1 << 7, /* power down cursor */
|
||||
CFG_PDWN256x24 = 1 << 6, /* power down palette */
|
||||
CFG_PDWN256x8 = 1 << 5, /* power down gamma */
|
||||
CFG_PDWNHWC = 1 << 4, /* Armada 510: power down all hwc ram */
|
||||
CFG_PDWN32x32 = 1 << 3, /* power down slave->smart ram */
|
||||
CFG_PDWN16x66 = 1 << 2, /* power down UV fifo */
|
||||
CFG_PDWN32x66 = 1 << 1, /* power down Y fifo */
|
||||
CFG_PDWN64x66 = 1 << 0, /* power down graphic fifo */
|
||||
};
|
||||
|
||||
/* For LCD_CFG_SCLK_DIV */
|
||||
enum {
|
||||
/* Armada 510 */
|
||||
SCLK_510_AXI = 0x0 << 30,
|
||||
SCLK_510_EXTCLK0 = 0x1 << 30,
|
||||
SCLK_510_PLL = 0x2 << 30,
|
||||
SCLK_510_EXTCLK1 = 0x3 << 30,
|
||||
SCLK_510_DIV_CHANGE = 1 << 29,
|
||||
SCLK_510_FRAC_DIV_MASK = 0xfff << 16,
|
||||
SCLK_510_INT_DIV_MASK = 0xffff << 0,
|
||||
|
||||
/* Armada 16x */
|
||||
SCLK_16X_AHB = 0x0 << 28,
|
||||
SCLK_16X_PCLK = 0x1 << 28,
|
||||
SCLK_16X_AXI = 0x4 << 28,
|
||||
SCLK_16X_PLL = 0x8 << 28,
|
||||
SCLK_16X_FRAC_DIV_MASK = 0xfff << 16,
|
||||
SCLK_16X_INT_DIV_MASK = 0xffff << 0,
|
||||
};
|
||||
|
||||
/* For LCD_SPU_DUMB_CTRL */
|
||||
enum {
|
||||
DUMB16_RGB565_0 = 0x0 << 28,
|
||||
DUMB16_RGB565_1 = 0x1 << 28,
|
||||
DUMB18_RGB666_0 = 0x2 << 28,
|
||||
DUMB18_RGB666_1 = 0x3 << 28,
|
||||
DUMB12_RGB444_0 = 0x4 << 28,
|
||||
DUMB12_RGB444_1 = 0x5 << 28,
|
||||
DUMB24_RGB888_0 = 0x6 << 28,
|
||||
DUMB_BLANK = 0x7 << 28,
|
||||
DUMB_MASK = 0xf << 28,
|
||||
CFG_BIAS_OUT = 1 << 8,
|
||||
CFG_REV_RGB = 1 << 7,
|
||||
CFG_INV_CBLANK = 1 << 6,
|
||||
CFG_INV_CSYNC = 1 << 5, /* Normally active high */
|
||||
CFG_INV_HENA = 1 << 4,
|
||||
CFG_INV_VSYNC = 1 << 3, /* Normally active high */
|
||||
CFG_INV_HSYNC = 1 << 2, /* Normally active high */
|
||||
CFG_INV_PCLK = 1 << 1,
|
||||
CFG_DUMB_ENA = 1 << 0,
|
||||
};
|
||||
|
||||
/* For LCD_SPU_IOPAD_CONTROL */
|
||||
enum {
|
||||
CFG_VSCALE_LN_EN = 3 << 18,
|
||||
CFG_GRA_VM_ENA = 1 << 15,
|
||||
CFG_DMA_VM_ENA = 1 << 13,
|
||||
CFG_CMD_VM_ENA = 1 << 11,
|
||||
CFG_CSC_MASK = 3 << 8,
|
||||
CFG_CSC_YUV_CCIR709 = 1 << 9,
|
||||
CFG_CSC_YUV_CCIR601 = 0 << 9,
|
||||
CFG_CSC_RGB_STUDIO = 1 << 8,
|
||||
CFG_CSC_RGB_COMPUTER = 0 << 8,
|
||||
CFG_IOPAD_MASK = 0xf << 0,
|
||||
CFG_IOPAD_DUMB24 = 0x0 << 0,
|
||||
CFG_IOPAD_DUMB18SPI = 0x1 << 0,
|
||||
CFG_IOPAD_DUMB18GPIO = 0x2 << 0,
|
||||
CFG_IOPAD_DUMB16SPI = 0x3 << 0,
|
||||
CFG_IOPAD_DUMB16GPIO = 0x4 << 0,
|
||||
CFG_IOPAD_DUMB12GPIO = 0x5 << 0,
|
||||
CFG_IOPAD_SMART18 = 0x6 << 0,
|
||||
CFG_IOPAD_SMART16 = 0x7 << 0,
|
||||
CFG_IOPAD_SMART8 = 0x8 << 0,
|
||||
};
|
||||
|
||||
#define IOPAD_DUMB24 0x0
|
||||
|
||||
/* For LCD_SPU_IRQ_ENA */
|
||||
enum {
|
||||
DMA_FRAME_IRQ0_ENA = 1 << 31,
|
||||
DMA_FRAME_IRQ1_ENA = 1 << 30,
|
||||
DMA_FRAME_IRQ_ENA = DMA_FRAME_IRQ0_ENA | DMA_FRAME_IRQ1_ENA,
|
||||
DMA_FF_UNDERFLOW_ENA = 1 << 29,
|
||||
GRA_FRAME_IRQ0_ENA = 1 << 27,
|
||||
GRA_FRAME_IRQ1_ENA = 1 << 26,
|
||||
GRA_FRAME_IRQ_ENA = GRA_FRAME_IRQ0_ENA | GRA_FRAME_IRQ1_ENA,
|
||||
GRA_FF_UNDERFLOW_ENA = 1 << 25,
|
||||
VSYNC_IRQ_ENA = 1 << 23,
|
||||
DUMB_FRAMEDONE_ENA = 1 << 22,
|
||||
TWC_FRAMEDONE_ENA = 1 << 21,
|
||||
HWC_FRAMEDONE_ENA = 1 << 20,
|
||||
SLV_IRQ_ENA = 1 << 19,
|
||||
SPI_IRQ_ENA = 1 << 18,
|
||||
PWRDN_IRQ_ENA = 1 << 17,
|
||||
ERR_IRQ_ENA = 1 << 16,
|
||||
CLEAN_SPU_IRQ_ISR = 0xffff,
|
||||
};
|
||||
|
||||
/* For LCD_SPU_IRQ_ISR */
|
||||
enum {
|
||||
DMA_FRAME_IRQ0 = 1 << 31,
|
||||
DMA_FRAME_IRQ1 = 1 << 30,
|
||||
DMA_FRAME_IRQ = DMA_FRAME_IRQ0 | DMA_FRAME_IRQ1,
|
||||
DMA_FF_UNDERFLOW = 1 << 29,
|
||||
GRA_FRAME_IRQ0 = 1 << 27,
|
||||
GRA_FRAME_IRQ1 = 1 << 26,
|
||||
GRA_FRAME_IRQ = GRA_FRAME_IRQ0 | GRA_FRAME_IRQ1,
|
||||
GRA_FF_UNDERFLOW = 1 << 25,
|
||||
VSYNC_IRQ = 1 << 23,
|
||||
DUMB_FRAMEDONE = 1 << 22,
|
||||
TWC_FRAMEDONE = 1 << 21,
|
||||
HWC_FRAMEDONE = 1 << 20,
|
||||
SLV_IRQ = 1 << 19,
|
||||
SPI_IRQ = 1 << 18,
|
||||
PWRDN_IRQ = 1 << 17,
|
||||
ERR_IRQ = 1 << 16,
|
||||
DMA_FRAME_IRQ0_LEVEL = 1 << 15,
|
||||
DMA_FRAME_IRQ1_LEVEL = 1 << 14,
|
||||
DMA_FRAME_CNT_ISR = 3 << 12,
|
||||
GRA_FRAME_IRQ0_LEVEL = 1 << 11,
|
||||
GRA_FRAME_IRQ1_LEVEL = 1 << 10,
|
||||
GRA_FRAME_CNT_ISR = 3 << 8,
|
||||
VSYNC_IRQ_LEVEL = 1 << 7,
|
||||
DUMB_FRAMEDONE_LEVEL = 1 << 6,
|
||||
TWC_FRAMEDONE_LEVEL = 1 << 5,
|
||||
HWC_FRAMEDONE_LEVEL = 1 << 4,
|
||||
SLV_FF_EMPTY = 1 << 3,
|
||||
DMA_FF_ALLEMPTY = 1 << 2,
|
||||
GRA_FF_ALLEMPTY = 1 << 1,
|
||||
PWRDN_IRQ_LEVEL = 1 << 0,
|
||||
};
|
||||
|
||||
#endif
|
18
drivers/gpu/drm/armada/armada_ioctlP.h
Normal file
18
drivers/gpu/drm/armada/armada_ioctlP.h
Normal file
@ -0,0 +1,18 @@
|
||||
/*
|
||||
* Copyright (C) 2012 Russell King
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
#ifndef ARMADA_IOCTLP_H
|
||||
#define ARMADA_IOCTLP_H
|
||||
|
||||
#define ARMADA_IOCTL_PROTO(name)\
|
||||
extern int armada_##name##_ioctl(struct drm_device *, void *, struct drm_file *)
|
||||
|
||||
ARMADA_IOCTL_PROTO(gem_create);
|
||||
ARMADA_IOCTL_PROTO(gem_mmap);
|
||||
ARMADA_IOCTL_PROTO(gem_pwrite);
|
||||
|
||||
#endif
|
158
drivers/gpu/drm/armada/armada_output.c
Normal file
158
drivers/gpu/drm/armada/armada_output.c
Normal file
@ -0,0 +1,158 @@
|
||||
/*
|
||||
* Copyright (C) 2012 Russell King
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/drm_crtc_helper.h>
|
||||
#include <drm/drm_edid.h>
|
||||
#include <drm/drm_encoder_slave.h>
|
||||
#include "armada_output.h"
|
||||
#include "armada_drm.h"
|
||||
|
||||
struct armada_connector {
|
||||
struct drm_connector conn;
|
||||
const struct armada_output_type *type;
|
||||
};
|
||||
|
||||
#define drm_to_armada_conn(c) container_of(c, struct armada_connector, conn)
|
||||
|
||||
struct drm_encoder *armada_drm_connector_encoder(struct drm_connector *conn)
|
||||
{
|
||||
struct drm_encoder *enc = conn->encoder;
|
||||
|
||||
return enc ? enc : drm_encoder_find(conn->dev, conn->encoder_ids[0]);
|
||||
}
|
||||
|
||||
static enum drm_connector_status armada_drm_connector_detect(
|
||||
struct drm_connector *conn, bool force)
|
||||
{
|
||||
struct armada_connector *dconn = drm_to_armada_conn(conn);
|
||||
enum drm_connector_status status = connector_status_disconnected;
|
||||
|
||||
if (dconn->type->detect) {
|
||||
status = dconn->type->detect(conn, force);
|
||||
} else {
|
||||
struct drm_encoder *enc = armada_drm_connector_encoder(conn);
|
||||
|
||||
if (enc)
|
||||
status = encoder_helper_funcs(enc)->detect(enc, conn);
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static void armada_drm_connector_destroy(struct drm_connector *conn)
|
||||
{
|
||||
struct armada_connector *dconn = drm_to_armada_conn(conn);
|
||||
|
||||
drm_sysfs_connector_remove(conn);
|
||||
drm_connector_cleanup(conn);
|
||||
kfree(dconn);
|
||||
}
|
||||
|
||||
static int armada_drm_connector_set_property(struct drm_connector *conn,
|
||||
struct drm_property *property, uint64_t value)
|
||||
{
|
||||
struct armada_connector *dconn = drm_to_armada_conn(conn);
|
||||
|
||||
if (!dconn->type->set_property)
|
||||
return -EINVAL;
|
||||
|
||||
return dconn->type->set_property(conn, property, value);
|
||||
}
|
||||
|
||||
static const struct drm_connector_funcs armada_drm_conn_funcs = {
|
||||
.dpms = drm_helper_connector_dpms,
|
||||
.fill_modes = drm_helper_probe_single_connector_modes,
|
||||
.detect = armada_drm_connector_detect,
|
||||
.destroy = armada_drm_connector_destroy,
|
||||
.set_property = armada_drm_connector_set_property,
|
||||
};
|
||||
|
||||
void armada_drm_encoder_prepare(struct drm_encoder *encoder)
|
||||
{
|
||||
encoder_helper_funcs(encoder)->dpms(encoder, DRM_MODE_DPMS_OFF);
|
||||
}
|
||||
|
||||
void armada_drm_encoder_commit(struct drm_encoder *encoder)
|
||||
{
|
||||
encoder_helper_funcs(encoder)->dpms(encoder, DRM_MODE_DPMS_ON);
|
||||
}
|
||||
|
||||
bool armada_drm_encoder_mode_fixup(struct drm_encoder *encoder,
|
||||
const struct drm_display_mode *mode, struct drm_display_mode *adjusted)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Shouldn't this be a generic helper function? */
|
||||
int armada_drm_slave_encoder_mode_valid(struct drm_connector *conn,
|
||||
struct drm_display_mode *mode)
|
||||
{
|
||||
struct drm_encoder *encoder = armada_drm_connector_encoder(conn);
|
||||
int valid = MODE_BAD;
|
||||
|
||||
if (encoder) {
|
||||
struct drm_encoder_slave *slave = to_encoder_slave(encoder);
|
||||
|
||||
valid = slave->slave_funcs->mode_valid(encoder, mode);
|
||||
}
|
||||
return valid;
|
||||
}
|
||||
|
||||
int armada_drm_slave_encoder_set_property(struct drm_connector *conn,
|
||||
struct drm_property *property, uint64_t value)
|
||||
{
|
||||
struct drm_encoder *encoder = armada_drm_connector_encoder(conn);
|
||||
int rc = -EINVAL;
|
||||
|
||||
if (encoder) {
|
||||
struct drm_encoder_slave *slave = to_encoder_slave(encoder);
|
||||
|
||||
rc = slave->slave_funcs->set_property(encoder, conn, property,
|
||||
value);
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
|
||||
int armada_output_create(struct drm_device *dev,
|
||||
const struct armada_output_type *type, const void *data)
|
||||
{
|
||||
struct armada_connector *dconn;
|
||||
int ret;
|
||||
|
||||
dconn = kzalloc(sizeof(*dconn), GFP_KERNEL);
|
||||
if (!dconn)
|
||||
return -ENOMEM;
|
||||
|
||||
dconn->type = type;
|
||||
|
||||
ret = drm_connector_init(dev, &dconn->conn, &armada_drm_conn_funcs,
|
||||
type->connector_type);
|
||||
if (ret) {
|
||||
DRM_ERROR("unable to init connector\n");
|
||||
goto err_destroy_dconn;
|
||||
}
|
||||
|
||||
ret = type->create(&dconn->conn, data);
|
||||
if (ret)
|
||||
goto err_conn;
|
||||
|
||||
ret = drm_sysfs_connector_add(&dconn->conn);
|
||||
if (ret)
|
||||
goto err_sysfs;
|
||||
|
||||
return 0;
|
||||
|
||||
err_sysfs:
|
||||
if (dconn->conn.encoder)
|
||||
dconn->conn.encoder->funcs->destroy(dconn->conn.encoder);
|
||||
err_conn:
|
||||
drm_connector_cleanup(&dconn->conn);
|
||||
err_destroy_dconn:
|
||||
kfree(dconn);
|
||||
return ret;
|
||||
}
|
39
drivers/gpu/drm/armada/armada_output.h
Normal file
39
drivers/gpu/drm/armada/armada_output.h
Normal file
@ -0,0 +1,39 @@
|
||||
/*
|
||||
* Copyright (C) 2012 Russell King
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
#ifndef ARMADA_CONNETOR_H
|
||||
#define ARMADA_CONNETOR_H
|
||||
|
||||
#define encoder_helper_funcs(encoder) \
|
||||
((struct drm_encoder_helper_funcs *)encoder->helper_private)
|
||||
|
||||
struct armada_output_type {
|
||||
int connector_type;
|
||||
enum drm_connector_status (*detect)(struct drm_connector *, bool);
|
||||
int (*create)(struct drm_connector *, const void *);
|
||||
int (*set_property)(struct drm_connector *, struct drm_property *,
|
||||
uint64_t);
|
||||
};
|
||||
|
||||
struct drm_encoder *armada_drm_connector_encoder(struct drm_connector *conn);
|
||||
|
||||
void armada_drm_encoder_prepare(struct drm_encoder *encoder);
|
||||
void armada_drm_encoder_commit(struct drm_encoder *encoder);
|
||||
|
||||
bool armada_drm_encoder_mode_fixup(struct drm_encoder *encoder,
|
||||
const struct drm_display_mode *mode, struct drm_display_mode *adj);
|
||||
|
||||
int armada_drm_slave_encoder_mode_valid(struct drm_connector *conn,
|
||||
struct drm_display_mode *mode);
|
||||
|
||||
int armada_drm_slave_encoder_set_property(struct drm_connector *conn,
|
||||
struct drm_property *property, uint64_t value);
|
||||
|
||||
int armada_output_create(struct drm_device *dev,
|
||||
const struct armada_output_type *type, const void *data);
|
||||
|
||||
#endif
|
477
drivers/gpu/drm/armada/armada_overlay.c
Normal file
477
drivers/gpu/drm/armada/armada_overlay.c
Normal file
@ -0,0 +1,477 @@
|
||||
/*
|
||||
* Copyright (C) 2012 Russell King
|
||||
* Rewritten from the dovefb driver, and Armada510 manuals.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
#include <drm/drmP.h>
|
||||
#include "armada_crtc.h"
|
||||
#include "armada_drm.h"
|
||||
#include "armada_fb.h"
|
||||
#include "armada_gem.h"
|
||||
#include "armada_hw.h"
|
||||
#include <drm/armada_drm.h>
|
||||
#include "armada_ioctlP.h"
|
||||
|
||||
struct armada_plane_properties {
|
||||
uint32_t colorkey_yr;
|
||||
uint32_t colorkey_ug;
|
||||
uint32_t colorkey_vb;
|
||||
#define K2R(val) (((val) >> 0) & 0xff)
|
||||
#define K2G(val) (((val) >> 8) & 0xff)
|
||||
#define K2B(val) (((val) >> 16) & 0xff)
|
||||
int16_t brightness;
|
||||
uint16_t contrast;
|
||||
uint16_t saturation;
|
||||
uint32_t colorkey_mode;
|
||||
};
|
||||
|
||||
struct armada_plane {
|
||||
struct drm_plane base;
|
||||
spinlock_t lock;
|
||||
struct drm_framebuffer *old_fb;
|
||||
uint32_t src_hw;
|
||||
uint32_t dst_hw;
|
||||
uint32_t dst_yx;
|
||||
uint32_t ctrl0;
|
||||
struct {
|
||||
struct armada_vbl_event update;
|
||||
struct armada_regs regs[13];
|
||||
wait_queue_head_t wait;
|
||||
} vbl;
|
||||
struct armada_plane_properties prop;
|
||||
};
|
||||
#define drm_to_armada_plane(p) container_of(p, struct armada_plane, base)
|
||||
|
||||
|
||||
static void
|
||||
armada_ovl_update_attr(struct armada_plane_properties *prop,
|
||||
struct armada_crtc *dcrtc)
|
||||
{
|
||||
writel_relaxed(prop->colorkey_yr, dcrtc->base + LCD_SPU_COLORKEY_Y);
|
||||
writel_relaxed(prop->colorkey_ug, dcrtc->base + LCD_SPU_COLORKEY_U);
|
||||
writel_relaxed(prop->colorkey_vb, dcrtc->base + LCD_SPU_COLORKEY_V);
|
||||
|
||||
writel_relaxed(prop->brightness << 16 | prop->contrast,
|
||||
dcrtc->base + LCD_SPU_CONTRAST);
|
||||
/* Docs say 15:0, but it seems to actually be 31:16 on Armada 510 */
|
||||
writel_relaxed(prop->saturation << 16,
|
||||
dcrtc->base + LCD_SPU_SATURATION);
|
||||
writel_relaxed(0x00002000, dcrtc->base + LCD_SPU_CBSH_HUE);
|
||||
|
||||
spin_lock_irq(&dcrtc->irq_lock);
|
||||
armada_updatel(prop->colorkey_mode | CFG_ALPHAM_GRA,
|
||||
CFG_CKMODE_MASK | CFG_ALPHAM_MASK | CFG_ALPHA_MASK,
|
||||
dcrtc->base + LCD_SPU_DMA_CTRL1);
|
||||
|
||||
armada_updatel(ADV_GRACOLORKEY, 0, dcrtc->base + LCD_SPU_ADV_REG);
|
||||
spin_unlock_irq(&dcrtc->irq_lock);
|
||||
}
|
||||
|
||||
/* === Plane support === */
|
||||
static void armada_plane_vbl(struct armada_crtc *dcrtc, void *data)
|
||||
{
|
||||
struct armada_plane *dplane = data;
|
||||
struct drm_framebuffer *fb;
|
||||
|
||||
armada_drm_crtc_update_regs(dcrtc, dplane->vbl.regs);
|
||||
|
||||
spin_lock(&dplane->lock);
|
||||
fb = dplane->old_fb;
|
||||
dplane->old_fb = NULL;
|
||||
spin_unlock(&dplane->lock);
|
||||
|
||||
if (fb)
|
||||
armada_drm_queue_unref_work(dcrtc->crtc.dev, fb);
|
||||
}
|
||||
|
||||
static unsigned armada_limit(int start, unsigned size, unsigned max)
|
||||
{
|
||||
int end = start + size;
|
||||
if (end < 0)
|
||||
return 0;
|
||||
if (start < 0)
|
||||
start = 0;
|
||||
return (unsigned)end > max ? max - start : end - start;
|
||||
}
|
||||
|
||||
static int
|
||||
armada_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
|
||||
struct drm_framebuffer *fb,
|
||||
int crtc_x, int crtc_y, unsigned crtc_w, unsigned crtc_h,
|
||||
uint32_t src_x, uint32_t src_y, uint32_t src_w, uint32_t src_h)
|
||||
{
|
||||
struct armada_plane *dplane = drm_to_armada_plane(plane);
|
||||
struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
|
||||
uint32_t val, ctrl0;
|
||||
unsigned idx = 0;
|
||||
int ret;
|
||||
|
||||
crtc_w = armada_limit(crtc_x, crtc_w, dcrtc->crtc.mode.hdisplay);
|
||||
crtc_h = armada_limit(crtc_y, crtc_h, dcrtc->crtc.mode.vdisplay);
|
||||
ctrl0 = CFG_DMA_FMT(drm_fb_to_armada_fb(fb)->fmt) |
|
||||
CFG_DMA_MOD(drm_fb_to_armada_fb(fb)->mod) |
|
||||
CFG_CBSH_ENA | CFG_DMA_HSMOOTH | CFG_DMA_ENA;
|
||||
|
||||
/* Does the position/size result in nothing to display? */
|
||||
if (crtc_w == 0 || crtc_h == 0) {
|
||||
ctrl0 &= ~CFG_DMA_ENA;
|
||||
}
|
||||
|
||||
/*
|
||||
* FIXME: if the starting point is off screen, we need to
|
||||
* adjust src_x, src_y, src_w, src_h appropriately, and
|
||||
* according to the scale.
|
||||
*/
|
||||
|
||||
if (!dcrtc->plane) {
|
||||
dcrtc->plane = plane;
|
||||
armada_ovl_update_attr(&dplane->prop, dcrtc);
|
||||
}
|
||||
|
||||
/* FIXME: overlay on an interlaced display */
|
||||
/* Just updating the position/size? */
|
||||
if (plane->fb == fb && dplane->ctrl0 == ctrl0) {
|
||||
val = (src_h & 0xffff0000) | src_w >> 16;
|
||||
dplane->src_hw = val;
|
||||
writel_relaxed(val, dcrtc->base + LCD_SPU_DMA_HPXL_VLN);
|
||||
val = crtc_h << 16 | crtc_w;
|
||||
dplane->dst_hw = val;
|
||||
writel_relaxed(val, dcrtc->base + LCD_SPU_DZM_HPXL_VLN);
|
||||
val = crtc_y << 16 | crtc_x;
|
||||
dplane->dst_yx = val;
|
||||
writel_relaxed(val, dcrtc->base + LCD_SPU_DMA_OVSA_HPXL_VLN);
|
||||
return 0;
|
||||
} else if (~dplane->ctrl0 & ctrl0 & CFG_DMA_ENA) {
|
||||
/* Power up the Y/U/V FIFOs on ENA 0->1 transitions */
|
||||
armada_updatel(0, CFG_PDWN16x66 | CFG_PDWN32x66,
|
||||
dcrtc->base + LCD_SPU_SRAM_PARA1);
|
||||
}
|
||||
|
||||
ret = wait_event_timeout(dplane->vbl.wait,
|
||||
list_empty(&dplane->vbl.update.node),
|
||||
HZ/25);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
if (plane->fb != fb) {
|
||||
struct armada_gem_object *obj = drm_fb_obj(fb);
|
||||
uint32_t sy, su, sv;
|
||||
|
||||
/*
|
||||
* Take a reference on the new framebuffer - we want to
|
||||
* hold on to it while the hardware is displaying it.
|
||||
*/
|
||||
drm_framebuffer_reference(fb);
|
||||
|
||||
if (plane->fb) {
|
||||
struct drm_framebuffer *older_fb;
|
||||
|
||||
spin_lock_irq(&dplane->lock);
|
||||
older_fb = dplane->old_fb;
|
||||
dplane->old_fb = plane->fb;
|
||||
spin_unlock_irq(&dplane->lock);
|
||||
if (older_fb)
|
||||
armada_drm_queue_unref_work(dcrtc->crtc.dev,
|
||||
older_fb);
|
||||
}
|
||||
|
||||
src_y >>= 16;
|
||||
src_x >>= 16;
|
||||
sy = obj->dev_addr + fb->offsets[0] + src_y * fb->pitches[0] +
|
||||
src_x * fb->bits_per_pixel / 8;
|
||||
su = obj->dev_addr + fb->offsets[1] + src_y * fb->pitches[1] +
|
||||
src_x;
|
||||
sv = obj->dev_addr + fb->offsets[2] + src_y * fb->pitches[2] +
|
||||
src_x;
|
||||
|
||||
armada_reg_queue_set(dplane->vbl.regs, idx, sy,
|
||||
LCD_SPU_DMA_START_ADDR_Y0);
|
||||
armada_reg_queue_set(dplane->vbl.regs, idx, su,
|
||||
LCD_SPU_DMA_START_ADDR_U0);
|
||||
armada_reg_queue_set(dplane->vbl.regs, idx, sv,
|
||||
LCD_SPU_DMA_START_ADDR_V0);
|
||||
armada_reg_queue_set(dplane->vbl.regs, idx, sy,
|
||||
LCD_SPU_DMA_START_ADDR_Y1);
|
||||
armada_reg_queue_set(dplane->vbl.regs, idx, su,
|
||||
LCD_SPU_DMA_START_ADDR_U1);
|
||||
armada_reg_queue_set(dplane->vbl.regs, idx, sv,
|
||||
LCD_SPU_DMA_START_ADDR_V1);
|
||||
|
||||
val = fb->pitches[0] << 16 | fb->pitches[0];
|
||||
armada_reg_queue_set(dplane->vbl.regs, idx, val,
|
||||
LCD_SPU_DMA_PITCH_YC);
|
||||
val = fb->pitches[1] << 16 | fb->pitches[2];
|
||||
armada_reg_queue_set(dplane->vbl.regs, idx, val,
|
||||
LCD_SPU_DMA_PITCH_UV);
|
||||
}
|
||||
|
||||
val = (src_h & 0xffff0000) | src_w >> 16;
|
||||
if (dplane->src_hw != val) {
|
||||
dplane->src_hw = val;
|
||||
armada_reg_queue_set(dplane->vbl.regs, idx, val,
|
||||
LCD_SPU_DMA_HPXL_VLN);
|
||||
}
|
||||
val = crtc_h << 16 | crtc_w;
|
||||
if (dplane->dst_hw != val) {
|
||||
dplane->dst_hw = val;
|
||||
armada_reg_queue_set(dplane->vbl.regs, idx, val,
|
||||
LCD_SPU_DZM_HPXL_VLN);
|
||||
}
|
||||
val = crtc_y << 16 | crtc_x;
|
||||
if (dplane->dst_yx != val) {
|
||||
dplane->dst_yx = val;
|
||||
armada_reg_queue_set(dplane->vbl.regs, idx, val,
|
||||
LCD_SPU_DMA_OVSA_HPXL_VLN);
|
||||
}
|
||||
if (dplane->ctrl0 != ctrl0) {
|
||||
dplane->ctrl0 = ctrl0;
|
||||
armada_reg_queue_mod(dplane->vbl.regs, idx, ctrl0,
|
||||
CFG_CBSH_ENA | CFG_DMAFORMAT | CFG_DMA_FTOGGLE |
|
||||
CFG_DMA_HSMOOTH | CFG_DMA_TSTMODE |
|
||||
CFG_DMA_MOD(CFG_SWAPRB | CFG_SWAPUV | CFG_SWAPYU |
|
||||
CFG_YUV2RGB) | CFG_DMA_ENA,
|
||||
LCD_SPU_DMA_CTRL0);
|
||||
}
|
||||
if (idx) {
|
||||
armada_reg_queue_end(dplane->vbl.regs, idx);
|
||||
armada_drm_vbl_event_add(dcrtc, &dplane->vbl.update);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int armada_plane_disable(struct drm_plane *plane)
|
||||
{
|
||||
struct armada_plane *dplane = drm_to_armada_plane(plane);
|
||||
struct drm_framebuffer *fb;
|
||||
struct armada_crtc *dcrtc;
|
||||
|
||||
if (!dplane->base.crtc)
|
||||
return 0;
|
||||
|
||||
dcrtc = drm_to_armada_crtc(dplane->base.crtc);
|
||||
dcrtc->plane = NULL;
|
||||
|
||||
spin_lock_irq(&dcrtc->irq_lock);
|
||||
armada_drm_vbl_event_remove(dcrtc, &dplane->vbl.update);
|
||||
armada_updatel(0, CFG_DMA_ENA, dcrtc->base + LCD_SPU_DMA_CTRL0);
|
||||
dplane->ctrl0 = 0;
|
||||
spin_unlock_irq(&dcrtc->irq_lock);
|
||||
|
||||
/* Power down the Y/U/V FIFOs */
|
||||
armada_updatel(CFG_PDWN16x66 | CFG_PDWN32x66, 0,
|
||||
dcrtc->base + LCD_SPU_SRAM_PARA1);
|
||||
|
||||
if (plane->fb)
|
||||
drm_framebuffer_unreference(plane->fb);
|
||||
|
||||
spin_lock_irq(&dplane->lock);
|
||||
fb = dplane->old_fb;
|
||||
dplane->old_fb = NULL;
|
||||
spin_unlock_irq(&dplane->lock);
|
||||
if (fb)
|
||||
drm_framebuffer_unreference(fb);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void armada_plane_destroy(struct drm_plane *plane)
|
||||
{
|
||||
kfree(plane);
|
||||
}
|
||||
|
||||
static int armada_plane_set_property(struct drm_plane *plane,
|
||||
struct drm_property *property, uint64_t val)
|
||||
{
|
||||
struct armada_private *priv = plane->dev->dev_private;
|
||||
struct armada_plane *dplane = drm_to_armada_plane(plane);
|
||||
bool update_attr = false;
|
||||
|
||||
if (property == priv->colorkey_prop) {
|
||||
#define CCC(v) ((v) << 24 | (v) << 16 | (v) << 8)
|
||||
dplane->prop.colorkey_yr = CCC(K2R(val));
|
||||
dplane->prop.colorkey_ug = CCC(K2G(val));
|
||||
dplane->prop.colorkey_vb = CCC(K2B(val));
|
||||
#undef CCC
|
||||
update_attr = true;
|
||||
} else if (property == priv->colorkey_min_prop) {
|
||||
dplane->prop.colorkey_yr &= ~0x00ff0000;
|
||||
dplane->prop.colorkey_yr |= K2R(val) << 16;
|
||||
dplane->prop.colorkey_ug &= ~0x00ff0000;
|
||||
dplane->prop.colorkey_ug |= K2G(val) << 16;
|
||||
dplane->prop.colorkey_vb &= ~0x00ff0000;
|
||||
dplane->prop.colorkey_vb |= K2B(val) << 16;
|
||||
update_attr = true;
|
||||
} else if (property == priv->colorkey_max_prop) {
|
||||
dplane->prop.colorkey_yr &= ~0xff000000;
|
||||
dplane->prop.colorkey_yr |= K2R(val) << 24;
|
||||
dplane->prop.colorkey_ug &= ~0xff000000;
|
||||
dplane->prop.colorkey_ug |= K2G(val) << 24;
|
||||
dplane->prop.colorkey_vb &= ~0xff000000;
|
||||
dplane->prop.colorkey_vb |= K2B(val) << 24;
|
||||
update_attr = true;
|
||||
} else if (property == priv->colorkey_val_prop) {
|
||||
dplane->prop.colorkey_yr &= ~0x0000ff00;
|
||||
dplane->prop.colorkey_yr |= K2R(val) << 8;
|
||||
dplane->prop.colorkey_ug &= ~0x0000ff00;
|
||||
dplane->prop.colorkey_ug |= K2G(val) << 8;
|
||||
dplane->prop.colorkey_vb &= ~0x0000ff00;
|
||||
dplane->prop.colorkey_vb |= K2B(val) << 8;
|
||||
update_attr = true;
|
||||
} else if (property == priv->colorkey_alpha_prop) {
|
||||
dplane->prop.colorkey_yr &= ~0x000000ff;
|
||||
dplane->prop.colorkey_yr |= K2R(val);
|
||||
dplane->prop.colorkey_ug &= ~0x000000ff;
|
||||
dplane->prop.colorkey_ug |= K2G(val);
|
||||
dplane->prop.colorkey_vb &= ~0x000000ff;
|
||||
dplane->prop.colorkey_vb |= K2B(val);
|
||||
update_attr = true;
|
||||
} else if (property == priv->colorkey_mode_prop) {
|
||||
dplane->prop.colorkey_mode &= ~CFG_CKMODE_MASK;
|
||||
dplane->prop.colorkey_mode |= CFG_CKMODE(val);
|
||||
update_attr = true;
|
||||
} else if (property == priv->brightness_prop) {
|
||||
dplane->prop.brightness = val - 256;
|
||||
update_attr = true;
|
||||
} else if (property == priv->contrast_prop) {
|
||||
dplane->prop.contrast = val;
|
||||
update_attr = true;
|
||||
} else if (property == priv->saturation_prop) {
|
||||
dplane->prop.saturation = val;
|
||||
update_attr = true;
|
||||
}
|
||||
|
||||
if (update_attr && dplane->base.crtc)
|
||||
armada_ovl_update_attr(&dplane->prop,
|
||||
drm_to_armada_crtc(dplane->base.crtc));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct drm_plane_funcs armada_plane_funcs = {
|
||||
.update_plane = armada_plane_update,
|
||||
.disable_plane = armada_plane_disable,
|
||||
.destroy = armada_plane_destroy,
|
||||
.set_property = armada_plane_set_property,
|
||||
};
|
||||
|
||||
static const uint32_t armada_formats[] = {
|
||||
DRM_FORMAT_UYVY,
|
||||
DRM_FORMAT_YUYV,
|
||||
DRM_FORMAT_YUV420,
|
||||
DRM_FORMAT_YVU420,
|
||||
DRM_FORMAT_YUV422,
|
||||
DRM_FORMAT_YVU422,
|
||||
DRM_FORMAT_VYUY,
|
||||
DRM_FORMAT_YVYU,
|
||||
DRM_FORMAT_ARGB8888,
|
||||
DRM_FORMAT_ABGR8888,
|
||||
DRM_FORMAT_XRGB8888,
|
||||
DRM_FORMAT_XBGR8888,
|
||||
DRM_FORMAT_RGB888,
|
||||
DRM_FORMAT_BGR888,
|
||||
DRM_FORMAT_ARGB1555,
|
||||
DRM_FORMAT_ABGR1555,
|
||||
DRM_FORMAT_RGB565,
|
||||
DRM_FORMAT_BGR565,
|
||||
};
|
||||
|
||||
static struct drm_prop_enum_list armada_drm_colorkey_enum_list[] = {
|
||||
{ CKMODE_DISABLE, "disabled" },
|
||||
{ CKMODE_Y, "Y component" },
|
||||
{ CKMODE_U, "U component" },
|
||||
{ CKMODE_V, "V component" },
|
||||
{ CKMODE_RGB, "RGB" },
|
||||
{ CKMODE_R, "R component" },
|
||||
{ CKMODE_G, "G component" },
|
||||
{ CKMODE_B, "B component" },
|
||||
};
|
||||
|
||||
static int armada_overlay_create_properties(struct drm_device *dev)
|
||||
{
|
||||
struct armada_private *priv = dev->dev_private;
|
||||
|
||||
if (priv->colorkey_prop)
|
||||
return 0;
|
||||
|
||||
priv->colorkey_prop = drm_property_create_range(dev, 0,
|
||||
"colorkey", 0, 0xffffff);
|
||||
priv->colorkey_min_prop = drm_property_create_range(dev, 0,
|
||||
"colorkey_min", 0, 0xffffff);
|
||||
priv->colorkey_max_prop = drm_property_create_range(dev, 0,
|
||||
"colorkey_max", 0, 0xffffff);
|
||||
priv->colorkey_val_prop = drm_property_create_range(dev, 0,
|
||||
"colorkey_val", 0, 0xffffff);
|
||||
priv->colorkey_alpha_prop = drm_property_create_range(dev, 0,
|
||||
"colorkey_alpha", 0, 0xffffff);
|
||||
priv->colorkey_mode_prop = drm_property_create_enum(dev, 0,
|
||||
"colorkey_mode",
|
||||
armada_drm_colorkey_enum_list,
|
||||
ARRAY_SIZE(armada_drm_colorkey_enum_list));
|
||||
priv->brightness_prop = drm_property_create_range(dev, 0,
|
||||
"brightness", 0, 256 + 255);
|
||||
priv->contrast_prop = drm_property_create_range(dev, 0,
|
||||
"contrast", 0, 0x7fff);
|
||||
priv->saturation_prop = drm_property_create_range(dev, 0,
|
||||
"saturation", 0, 0x7fff);
|
||||
|
||||
if (!priv->colorkey_prop)
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int armada_overlay_plane_create(struct drm_device *dev, unsigned long crtcs)
|
||||
{
|
||||
struct armada_private *priv = dev->dev_private;
|
||||
struct drm_mode_object *mobj;
|
||||
struct armada_plane *dplane;
|
||||
int ret;
|
||||
|
||||
ret = armada_overlay_create_properties(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
dplane = kzalloc(sizeof(*dplane), GFP_KERNEL);
|
||||
if (!dplane)
|
||||
return -ENOMEM;
|
||||
|
||||
spin_lock_init(&dplane->lock);
|
||||
init_waitqueue_head(&dplane->vbl.wait);
|
||||
armada_drm_vbl_event_init(&dplane->vbl.update, armada_plane_vbl,
|
||||
dplane);
|
||||
|
||||
drm_plane_init(dev, &dplane->base, crtcs, &armada_plane_funcs,
|
||||
armada_formats, ARRAY_SIZE(armada_formats), false);
|
||||
|
||||
dplane->prop.colorkey_yr = 0xfefefe00;
|
||||
dplane->prop.colorkey_ug = 0x01010100;
|
||||
dplane->prop.colorkey_vb = 0x01010100;
|
||||
dplane->prop.colorkey_mode = CFG_CKMODE(CKMODE_RGB);
|
||||
dplane->prop.brightness = 0;
|
||||
dplane->prop.contrast = 0x4000;
|
||||
dplane->prop.saturation = 0x4000;
|
||||
|
||||
mobj = &dplane->base.base;
|
||||
drm_object_attach_property(mobj, priv->colorkey_prop,
|
||||
0x0101fe);
|
||||
drm_object_attach_property(mobj, priv->colorkey_min_prop,
|
||||
0x0101fe);
|
||||
drm_object_attach_property(mobj, priv->colorkey_max_prop,
|
||||
0x0101fe);
|
||||
drm_object_attach_property(mobj, priv->colorkey_val_prop,
|
||||
0x0101fe);
|
||||
drm_object_attach_property(mobj, priv->colorkey_alpha_prop,
|
||||
0x000000);
|
||||
drm_object_attach_property(mobj, priv->colorkey_mode_prop,
|
||||
CKMODE_RGB);
|
||||
drm_object_attach_property(mobj, priv->brightness_prop, 256);
|
||||
drm_object_attach_property(mobj, priv->contrast_prop,
|
||||
dplane->prop.contrast);
|
||||
drm_object_attach_property(mobj, priv->saturation_prop,
|
||||
dplane->prop.saturation);
|
||||
|
||||
return 0;
|
||||
}
|
139
drivers/gpu/drm/armada/armada_slave.c
Normal file
139
drivers/gpu/drm/armada/armada_slave.c
Normal file
@ -0,0 +1,139 @@
|
||||
/*
|
||||
* Copyright (C) 2012 Russell King
|
||||
* Rewritten from the dovefb driver, and Armada510 manuals.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/drm_crtc_helper.h>
|
||||
#include <drm/drm_edid.h>
|
||||
#include <drm/drm_encoder_slave.h>
|
||||
#include "armada_drm.h"
|
||||
#include "armada_output.h"
|
||||
#include "armada_slave.h"
|
||||
|
||||
static int armada_drm_slave_get_modes(struct drm_connector *conn)
|
||||
{
|
||||
struct drm_encoder *enc = armada_drm_connector_encoder(conn);
|
||||
int count = 0;
|
||||
|
||||
if (enc) {
|
||||
struct drm_encoder_slave *slave = to_encoder_slave(enc);
|
||||
|
||||
count = slave->slave_funcs->get_modes(enc, conn);
|
||||
}
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
static void armada_drm_slave_destroy(struct drm_encoder *enc)
|
||||
{
|
||||
struct drm_encoder_slave *slave = to_encoder_slave(enc);
|
||||
struct i2c_client *client = drm_i2c_encoder_get_client(enc);
|
||||
|
||||
if (slave->slave_funcs)
|
||||
slave->slave_funcs->destroy(enc);
|
||||
if (client)
|
||||
i2c_put_adapter(client->adapter);
|
||||
|
||||
drm_encoder_cleanup(&slave->base);
|
||||
kfree(slave);
|
||||
}
|
||||
|
||||
static const struct drm_encoder_funcs armada_drm_slave_encoder_funcs = {
|
||||
.destroy = armada_drm_slave_destroy,
|
||||
};
|
||||
|
||||
static const struct drm_connector_helper_funcs armada_drm_slave_helper_funcs = {
|
||||
.get_modes = armada_drm_slave_get_modes,
|
||||
.mode_valid = armada_drm_slave_encoder_mode_valid,
|
||||
.best_encoder = armada_drm_connector_encoder,
|
||||
};
|
||||
|
||||
static const struct drm_encoder_helper_funcs drm_slave_encoder_helpers = {
|
||||
.dpms = drm_i2c_encoder_dpms,
|
||||
.save = drm_i2c_encoder_save,
|
||||
.restore = drm_i2c_encoder_restore,
|
||||
.mode_fixup = drm_i2c_encoder_mode_fixup,
|
||||
.prepare = drm_i2c_encoder_prepare,
|
||||
.commit = drm_i2c_encoder_commit,
|
||||
.mode_set = drm_i2c_encoder_mode_set,
|
||||
.detect = drm_i2c_encoder_detect,
|
||||
};
|
||||
|
||||
static int
|
||||
armada_drm_conn_slave_create(struct drm_connector *conn, const void *data)
|
||||
{
|
||||
const struct armada_drm_slave_config *config = data;
|
||||
struct drm_encoder_slave *slave;
|
||||
struct i2c_adapter *adap;
|
||||
int ret;
|
||||
|
||||
conn->interlace_allowed = config->interlace_allowed;
|
||||
conn->doublescan_allowed = config->doublescan_allowed;
|
||||
conn->polled = config->polled;
|
||||
|
||||
drm_connector_helper_add(conn, &armada_drm_slave_helper_funcs);
|
||||
|
||||
slave = kzalloc(sizeof(*slave), GFP_KERNEL);
|
||||
if (!slave)
|
||||
return -ENOMEM;
|
||||
|
||||
slave->base.possible_crtcs = config->crtcs;
|
||||
|
||||
adap = i2c_get_adapter(config->i2c_adapter_id);
|
||||
if (!adap) {
|
||||
kfree(slave);
|
||||
return -EPROBE_DEFER;
|
||||
}
|
||||
|
||||
ret = drm_encoder_init(conn->dev, &slave->base,
|
||||
&armada_drm_slave_encoder_funcs,
|
||||
DRM_MODE_ENCODER_TMDS);
|
||||
if (ret) {
|
||||
DRM_ERROR("unable to init encoder\n");
|
||||
i2c_put_adapter(adap);
|
||||
kfree(slave);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = drm_i2c_encoder_init(conn->dev, slave, adap, &config->info);
|
||||
i2c_put_adapter(adap);
|
||||
if (ret) {
|
||||
DRM_ERROR("unable to init encoder slave\n");
|
||||
armada_drm_slave_destroy(&slave->base);
|
||||
return ret;
|
||||
}
|
||||
|
||||
drm_encoder_helper_add(&slave->base, &drm_slave_encoder_helpers);
|
||||
|
||||
ret = slave->slave_funcs->create_resources(&slave->base, conn);
|
||||
if (ret) {
|
||||
armada_drm_slave_destroy(&slave->base);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = drm_mode_connector_attach_encoder(conn, &slave->base);
|
||||
if (ret) {
|
||||
armada_drm_slave_destroy(&slave->base);
|
||||
return ret;
|
||||
}
|
||||
|
||||
conn->encoder = &slave->base;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct armada_output_type armada_drm_conn_slave = {
|
||||
.connector_type = DRM_MODE_CONNECTOR_HDMIA,
|
||||
.create = armada_drm_conn_slave_create,
|
||||
.set_property = armada_drm_slave_encoder_set_property,
|
||||
};
|
||||
|
||||
int armada_drm_connector_slave_create(struct drm_device *dev,
|
||||
const struct armada_drm_slave_config *config)
|
||||
{
|
||||
return armada_output_create(dev, &armada_drm_conn_slave, config);
|
||||
}
|
26
drivers/gpu/drm/armada/armada_slave.h
Normal file
26
drivers/gpu/drm/armada/armada_slave.h
Normal file
@ -0,0 +1,26 @@
|
||||
/*
|
||||
* Copyright (C) 2012 Russell King
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
#ifndef ARMADA_SLAVE_H
|
||||
#define ARMADA_SLAVE_H
|
||||
|
||||
#include <linux/i2c.h>
|
||||
#include <drm/drmP.h>
|
||||
|
||||
struct armada_drm_slave_config {
|
||||
int i2c_adapter_id;
|
||||
uint32_t crtcs;
|
||||
uint8_t polled;
|
||||
bool interlace_allowed;
|
||||
bool doublescan_allowed;
|
||||
struct i2c_board_info info;
|
||||
};
|
||||
|
||||
int armada_drm_connector_slave_create(struct drm_device *dev,
|
||||
const struct armada_drm_slave_config *);
|
||||
|
||||
#endif
|
@ -494,13 +494,12 @@ static struct drm_encoder *cirrus_encoder_init(struct drm_device *dev)
|
||||
|
||||
int cirrus_vga_get_modes(struct drm_connector *connector)
|
||||
{
|
||||
/* Just add a static list of modes */
|
||||
drm_add_modes_noedid(connector, 640, 480);
|
||||
drm_add_modes_noedid(connector, 800, 600);
|
||||
drm_add_modes_noedid(connector, 1024, 768);
|
||||
drm_add_modes_noedid(connector, 1280, 1024);
|
||||
int count;
|
||||
|
||||
return 4;
|
||||
/* Just add a static list of modes */
|
||||
count = drm_add_modes_noedid(connector, 1280, 1024);
|
||||
drm_set_preferred_mode(connector, 1024, 768);
|
||||
return count;
|
||||
}
|
||||
|
||||
static int cirrus_vga_mode_valid(struct drm_connector *connector,
|
||||
|
@ -1303,7 +1303,7 @@ static void drm_crtc_convert_to_umode(struct drm_mode_modeinfo *out,
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_crtc_convert_to_umode - convert a modeinfo into a drm_display_mode
|
||||
* drm_crtc_convert_umode - convert a modeinfo into a drm_display_mode
|
||||
* @out: drm_display_mode to return to the user
|
||||
* @in: drm_mode_modeinfo to use
|
||||
*
|
||||
@ -1557,7 +1557,7 @@ int drm_mode_getcrtc(struct drm_device *dev,
|
||||
obj = drm_mode_object_find(dev, crtc_resp->crtc_id,
|
||||
DRM_MODE_OBJECT_CRTC);
|
||||
if (!obj) {
|
||||
ret = -EINVAL;
|
||||
ret = -ENOENT;
|
||||
goto out;
|
||||
}
|
||||
crtc = obj_to_crtc(obj);
|
||||
@ -1641,7 +1641,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
|
||||
obj = drm_mode_object_find(dev, out_resp->connector_id,
|
||||
DRM_MODE_OBJECT_CONNECTOR);
|
||||
if (!obj) {
|
||||
ret = -EINVAL;
|
||||
ret = -ENOENT;
|
||||
goto out;
|
||||
}
|
||||
connector = obj_to_connector(obj);
|
||||
@ -1757,7 +1757,7 @@ int drm_mode_getencoder(struct drm_device *dev, void *data,
|
||||
obj = drm_mode_object_find(dev, enc_resp->encoder_id,
|
||||
DRM_MODE_OBJECT_ENCODER);
|
||||
if (!obj) {
|
||||
ret = -EINVAL;
|
||||
ret = -ENOENT;
|
||||
goto out;
|
||||
}
|
||||
encoder = obj_to_encoder(obj);
|
||||
@ -2141,7 +2141,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
|
||||
DRM_MODE_OBJECT_CRTC);
|
||||
if (!obj) {
|
||||
DRM_DEBUG_KMS("Unknown CRTC ID %d\n", crtc_req->crtc_id);
|
||||
ret = -EINVAL;
|
||||
ret = -ENOENT;
|
||||
goto out;
|
||||
}
|
||||
crtc = obj_to_crtc(obj);
|
||||
@ -2164,7 +2164,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
|
||||
if (!fb) {
|
||||
DRM_DEBUG_KMS("Unknown FB ID%d\n",
|
||||
crtc_req->fb_id);
|
||||
ret = -EINVAL;
|
||||
ret = -ENOENT;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
@ -2232,7 +2232,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
|
||||
if (!obj) {
|
||||
DRM_DEBUG_KMS("Connector id %d unknown\n",
|
||||
out_id);
|
||||
ret = -EINVAL;
|
||||
ret = -ENOENT;
|
||||
goto out;
|
||||
}
|
||||
connector = obj_to_connector(obj);
|
||||
@ -2280,7 +2280,7 @@ static int drm_mode_cursor_common(struct drm_device *dev,
|
||||
obj = drm_mode_object_find(dev, req->crtc_id, DRM_MODE_OBJECT_CRTC);
|
||||
if (!obj) {
|
||||
DRM_DEBUG_KMS("Unknown CRTC ID %d\n", req->crtc_id);
|
||||
return -EINVAL;
|
||||
return -ENOENT;
|
||||
}
|
||||
crtc = obj_to_crtc(obj);
|
||||
|
||||
@ -2489,6 +2489,8 @@ static int format_check(const struct drm_mode_fb_cmd2 *r)
|
||||
case DRM_FORMAT_YVU444:
|
||||
return 0;
|
||||
default:
|
||||
DRM_DEBUG_KMS("invalid pixel format %s\n",
|
||||
drm_get_format_name(r->pixel_format));
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
@ -2654,7 +2656,7 @@ fail_lookup:
|
||||
mutex_unlock(&dev->mode_config.fb_lock);
|
||||
mutex_unlock(&file_priv->fbs_lock);
|
||||
|
||||
return -EINVAL;
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -2682,7 +2684,7 @@ int drm_mode_getfb(struct drm_device *dev,
|
||||
|
||||
fb = drm_framebuffer_lookup(dev, r->fb_id);
|
||||
if (!fb)
|
||||
return -EINVAL;
|
||||
return -ENOENT;
|
||||
|
||||
r->height = fb->height;
|
||||
r->width = fb->width;
|
||||
@ -2727,7 +2729,7 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
|
||||
|
||||
fb = drm_framebuffer_lookup(dev, r->fb_id);
|
||||
if (!fb)
|
||||
return -EINVAL;
|
||||
return -ENOENT;
|
||||
|
||||
num_clips = r->num_clips;
|
||||
clips_ptr = (struct drm_clip_rect __user *)(unsigned long)r->clips_ptr;
|
||||
@ -3059,7 +3061,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
|
||||
drm_modeset_lock_all(dev);
|
||||
obj = drm_mode_object_find(dev, out_resp->prop_id, DRM_MODE_OBJECT_PROPERTY);
|
||||
if (!obj) {
|
||||
ret = -EINVAL;
|
||||
ret = -ENOENT;
|
||||
goto done;
|
||||
}
|
||||
property = obj_to_property(obj);
|
||||
@ -3188,7 +3190,7 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
|
||||
drm_modeset_lock_all(dev);
|
||||
obj = drm_mode_object_find(dev, out_resp->blob_id, DRM_MODE_OBJECT_BLOB);
|
||||
if (!obj) {
|
||||
ret = -EINVAL;
|
||||
ret = -ENOENT;
|
||||
goto done;
|
||||
}
|
||||
blob = obj_to_blob(obj);
|
||||
@ -3349,7 +3351,7 @@ int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data,
|
||||
|
||||
obj = drm_mode_object_find(dev, arg->obj_id, arg->obj_type);
|
||||
if (!obj) {
|
||||
ret = -EINVAL;
|
||||
ret = -ENOENT;
|
||||
goto out;
|
||||
}
|
||||
if (!obj->properties) {
|
||||
@ -3402,8 +3404,10 @@ int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
|
||||
drm_modeset_lock_all(dev);
|
||||
|
||||
arg_obj = drm_mode_object_find(dev, arg->obj_id, arg->obj_type);
|
||||
if (!arg_obj)
|
||||
if (!arg_obj) {
|
||||
ret = -ENOENT;
|
||||
goto out;
|
||||
}
|
||||
if (!arg_obj->properties)
|
||||
goto out;
|
||||
|
||||
@ -3416,8 +3420,10 @@ int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
|
||||
|
||||
prop_obj = drm_mode_object_find(dev, arg->prop_id,
|
||||
DRM_MODE_OBJECT_PROPERTY);
|
||||
if (!prop_obj)
|
||||
if (!prop_obj) {
|
||||
ret = -ENOENT;
|
||||
goto out;
|
||||
}
|
||||
property = obj_to_property(prop_obj);
|
||||
|
||||
if (!drm_property_change_is_valid(property, arg->value))
|
||||
@ -3502,7 +3508,7 @@ int drm_mode_gamma_set_ioctl(struct drm_device *dev,
|
||||
drm_modeset_lock_all(dev);
|
||||
obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC);
|
||||
if (!obj) {
|
||||
ret = -EINVAL;
|
||||
ret = -ENOENT;
|
||||
goto out;
|
||||
}
|
||||
crtc = obj_to_crtc(obj);
|
||||
@ -3561,7 +3567,7 @@ int drm_mode_gamma_get_ioctl(struct drm_device *dev,
|
||||
drm_modeset_lock_all(dev);
|
||||
obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC);
|
||||
if (!obj) {
|
||||
ret = -EINVAL;
|
||||
ret = -ENOENT;
|
||||
goto out;
|
||||
}
|
||||
crtc = obj_to_crtc(obj);
|
||||
@ -3615,7 +3621,7 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
|
||||
|
||||
obj = drm_mode_object_find(dev, page_flip->crtc_id, DRM_MODE_OBJECT_CRTC);
|
||||
if (!obj)
|
||||
return -EINVAL;
|
||||
return -ENOENT;
|
||||
crtc = obj_to_crtc(obj);
|
||||
|
||||
mutex_lock(&crtc->mutex);
|
||||
@ -3632,8 +3638,10 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
|
||||
goto out;
|
||||
|
||||
fb = drm_framebuffer_lookup(dev, page_flip->fb_id);
|
||||
if (!fb)
|
||||
if (!fb) {
|
||||
ret = -ENOENT;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = drm_crtc_check_viewport(crtc, crtc->x, crtc->y, &crtc->mode, fb);
|
||||
if (ret)
|
||||
@ -3822,7 +3830,8 @@ void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth,
|
||||
*bpp = 32;
|
||||
break;
|
||||
default:
|
||||
DRM_DEBUG_KMS("unsupported pixel format\n");
|
||||
DRM_DEBUG_KMS("unsupported pixel format %s\n",
|
||||
drm_get_format_name(format));
|
||||
*depth = 0;
|
||||
*bpp = 0;
|
||||
break;
|
||||
|
@ -405,22 +405,25 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
|
||||
struct drm_framebuffer *old_fb)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_display_mode *adjusted_mode, saved_mode, saved_hwmode;
|
||||
struct drm_display_mode *adjusted_mode, saved_mode;
|
||||
struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
|
||||
struct drm_encoder_helper_funcs *encoder_funcs;
|
||||
int saved_x, saved_y;
|
||||
bool saved_enabled;
|
||||
struct drm_encoder *encoder;
|
||||
bool ret = true;
|
||||
|
||||
saved_enabled = crtc->enabled;
|
||||
crtc->enabled = drm_helper_crtc_in_use(crtc);
|
||||
if (!crtc->enabled)
|
||||
return true;
|
||||
|
||||
adjusted_mode = drm_mode_duplicate(dev, mode);
|
||||
if (!adjusted_mode)
|
||||
if (!adjusted_mode) {
|
||||
crtc->enabled = saved_enabled;
|
||||
return false;
|
||||
}
|
||||
|
||||
saved_hwmode = crtc->hwmode;
|
||||
saved_mode = crtc->mode;
|
||||
saved_x = crtc->x;
|
||||
saved_y = crtc->y;
|
||||
@ -539,7 +542,7 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
|
||||
done:
|
||||
drm_mode_destroy(dev, adjusted_mode);
|
||||
if (!ret) {
|
||||
crtc->hwmode = saved_hwmode;
|
||||
crtc->enabled = saved_enabled;
|
||||
crtc->mode = saved_mode;
|
||||
crtc->x = saved_x;
|
||||
crtc->y = saved_y;
|
||||
@ -567,6 +570,14 @@ drm_crtc_helper_disable(struct drm_crtc *crtc)
|
||||
continue;
|
||||
|
||||
connector->encoder = NULL;
|
||||
|
||||
/*
|
||||
* drm_helper_disable_unused_functions() ought to be
|
||||
* doing this, but since we've decoupled the encoder
|
||||
* from the connector above, the required connection
|
||||
* between them is henceforth no longer available.
|
||||
*/
|
||||
connector->dpms = DRM_MODE_DPMS_OFF;
|
||||
}
|
||||
}
|
||||
|
||||
@ -593,9 +604,8 @@ drm_crtc_helper_disable(struct drm_crtc *crtc)
|
||||
int drm_crtc_helper_set_config(struct drm_mode_set *set)
|
||||
{
|
||||
struct drm_device *dev;
|
||||
struct drm_crtc *save_crtcs, *new_crtc, *crtc;
|
||||
struct drm_crtc *new_crtc;
|
||||
struct drm_encoder *save_encoders, *new_encoder, *encoder;
|
||||
struct drm_framebuffer *old_fb = NULL;
|
||||
bool mode_changed = false; /* if true do a full mode set */
|
||||
bool fb_changed = false; /* if true and !mode_changed just do a flip */
|
||||
struct drm_connector *save_connectors, *connector;
|
||||
@ -631,37 +641,27 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
|
||||
|
||||
dev = set->crtc->dev;
|
||||
|
||||
/* Allocate space for the backup of all (non-pointer) crtc, encoder and
|
||||
* connector data. */
|
||||
save_crtcs = kzalloc(dev->mode_config.num_crtc *
|
||||
sizeof(struct drm_crtc), GFP_KERNEL);
|
||||
if (!save_crtcs)
|
||||
return -ENOMEM;
|
||||
|
||||
/*
|
||||
* Allocate space for the backup of all (non-pointer) encoder and
|
||||
* connector data.
|
||||
*/
|
||||
save_encoders = kzalloc(dev->mode_config.num_encoder *
|
||||
sizeof(struct drm_encoder), GFP_KERNEL);
|
||||
if (!save_encoders) {
|
||||
kfree(save_crtcs);
|
||||
if (!save_encoders)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
save_connectors = kzalloc(dev->mode_config.num_connector *
|
||||
sizeof(struct drm_connector), GFP_KERNEL);
|
||||
if (!save_connectors) {
|
||||
kfree(save_crtcs);
|
||||
kfree(save_encoders);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* Copy data. Note that driver private data is not affected.
|
||||
/*
|
||||
* Copy data. Note that driver private data is not affected.
|
||||
* Should anything bad happen only the expected state is
|
||||
* restored, not the drivers personal bookkeeping.
|
||||
*/
|
||||
count = 0;
|
||||
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
|
||||
save_crtcs[count++] = *crtc;
|
||||
}
|
||||
|
||||
count = 0;
|
||||
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
|
||||
save_encoders[count++] = *encoder;
|
||||
@ -785,19 +785,17 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
|
||||
mode_changed = true;
|
||||
|
||||
if (mode_changed) {
|
||||
set->crtc->enabled = drm_helper_crtc_in_use(set->crtc);
|
||||
if (set->crtc->enabled) {
|
||||
if (drm_helper_crtc_in_use(set->crtc)) {
|
||||
DRM_DEBUG_KMS("attempting to set mode from"
|
||||
" userspace\n");
|
||||
drm_mode_debug_printmodeline(set->mode);
|
||||
old_fb = set->crtc->fb;
|
||||
set->crtc->fb = set->fb;
|
||||
if (!drm_crtc_helper_set_mode(set->crtc, set->mode,
|
||||
set->x, set->y,
|
||||
old_fb)) {
|
||||
save_set.fb)) {
|
||||
DRM_ERROR("failed to set mode on [CRTC:%d]\n",
|
||||
set->crtc->base.id);
|
||||
set->crtc->fb = old_fb;
|
||||
set->crtc->fb = save_set.fb;
|
||||
ret = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
@ -812,30 +810,23 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
|
||||
} else if (fb_changed) {
|
||||
set->crtc->x = set->x;
|
||||
set->crtc->y = set->y;
|
||||
|
||||
old_fb = set->crtc->fb;
|
||||
if (set->crtc->fb != set->fb)
|
||||
set->crtc->fb = set->fb;
|
||||
set->crtc->fb = set->fb;
|
||||
ret = crtc_funcs->mode_set_base(set->crtc,
|
||||
set->x, set->y, old_fb);
|
||||
set->x, set->y, save_set.fb);
|
||||
if (ret != 0) {
|
||||
set->crtc->fb = old_fb;
|
||||
set->crtc->x = save_set.x;
|
||||
set->crtc->y = save_set.y;
|
||||
set->crtc->fb = save_set.fb;
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
|
||||
kfree(save_connectors);
|
||||
kfree(save_encoders);
|
||||
kfree(save_crtcs);
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
/* Restore all previous data. */
|
||||
count = 0;
|
||||
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
|
||||
*crtc = save_crtcs[count++];
|
||||
}
|
||||
|
||||
count = 0;
|
||||
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
|
||||
*encoder = save_encoders[count++];
|
||||
@ -854,7 +845,6 @@ fail:
|
||||
|
||||
kfree(save_connectors);
|
||||
kfree(save_encoders);
|
||||
kfree(save_crtcs);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_crtc_helper_set_config);
|
||||
@ -1135,14 +1125,14 @@ void drm_kms_helper_poll_fini(struct drm_device *dev)
|
||||
}
|
||||
EXPORT_SYMBOL(drm_kms_helper_poll_fini);
|
||||
|
||||
void drm_helper_hpd_irq_event(struct drm_device *dev)
|
||||
bool drm_helper_hpd_irq_event(struct drm_device *dev)
|
||||
{
|
||||
struct drm_connector *connector;
|
||||
enum drm_connector_status old_status;
|
||||
bool changed = false;
|
||||
|
||||
if (!dev->mode_config.poll_enabled)
|
||||
return;
|
||||
return false;
|
||||
|
||||
mutex_lock(&dev->mode_config.mutex);
|
||||
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
|
||||
@ -1167,5 +1157,7 @@ void drm_helper_hpd_irq_event(struct drm_device *dev)
|
||||
|
||||
if (changed)
|
||||
drm_kms_helper_hotplug_event(dev);
|
||||
|
||||
return changed;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_helper_hpd_irq_event);
|
||||
|
@ -42,7 +42,7 @@
|
||||
* Initialization, etc.
|
||||
**************************************************/
|
||||
|
||||
static struct drm_info_list drm_debugfs_list[] = {
|
||||
static const struct drm_info_list drm_debugfs_list[] = {
|
||||
{"name", drm_name_info, 0},
|
||||
{"vm", drm_vm_info, 0},
|
||||
{"clients", drm_clients_info, 0},
|
||||
@ -84,7 +84,7 @@ static const struct file_operations drm_debugfs_fops = {
|
||||
* Create a given set of debugfs files represented by an array of
|
||||
* gdm_debugfs_lists in the given root directory.
|
||||
*/
|
||||
int drm_debugfs_create_files(struct drm_info_list *files, int count,
|
||||
int drm_debugfs_create_files(const struct drm_info_list *files, int count,
|
||||
struct dentry *root, struct drm_minor *minor)
|
||||
{
|
||||
struct drm_device *dev = minor->dev;
|
||||
@ -188,7 +188,7 @@ int drm_debugfs_init(struct drm_minor *minor, int minor_id,
|
||||
*
|
||||
* Remove all debugfs entries created by debugfs_init().
|
||||
*/
|
||||
int drm_debugfs_remove_files(struct drm_info_list *files, int count,
|
||||
int drm_debugfs_remove_files(const struct drm_info_list *files, int count,
|
||||
struct drm_minor *minor)
|
||||
{
|
||||
struct list_head *pos, *q;
|
||||
|
@ -403,7 +403,7 @@ long drm_ioctl(struct file *filp,
|
||||
|
||||
err_i1:
|
||||
if (!ioctl)
|
||||
DRM_DEBUG("invalid iotcl: pid=%d, dev=0x%lx, auth=%d, cmd=0x%02x, nr=0x%02x\n",
|
||||
DRM_DEBUG("invalid ioctl: pid=%d, dev=0x%lx, auth=%d, cmd=0x%02x, nr=0x%02x\n",
|
||||
task_pid_nr(current),
|
||||
(long)old_encode_dev(file_priv->minor->device),
|
||||
file_priv->authenticated, cmd, nr);
|
||||
|
@ -458,6 +458,15 @@ static const struct drm_display_mode drm_dmt_modes[] = {
|
||||
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
|
||||
};
|
||||
|
||||
/*
|
||||
* These more or less come from the DMT spec. The 720x400 modes are
|
||||
* inferred from historical 80x25 practice. The 640x480@67 and 832x624@75
|
||||
* modes are old-school Mac modes. The EDID spec says the 1152x864@75 mode
|
||||
* should be 1152x870, again for the Mac, but instead we use the x864 DMT
|
||||
* mode.
|
||||
*
|
||||
* The DMT modes have been fact-checked; the rest are mild guesses.
|
||||
*/
|
||||
static const struct drm_display_mode edid_est_modes[] = {
|
||||
{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
|
||||
968, 1056, 0, 600, 601, 605, 628, 0,
|
||||
@ -560,7 +569,7 @@ static const struct minimode est3_modes[] = {
|
||||
{ 1600, 1200, 75, 0 },
|
||||
{ 1600, 1200, 85, 0 },
|
||||
{ 1792, 1344, 60, 0 },
|
||||
{ 1792, 1344, 85, 0 },
|
||||
{ 1792, 1344, 75, 0 },
|
||||
{ 1856, 1392, 60, 0 },
|
||||
{ 1856, 1392, 75, 0 },
|
||||
{ 1920, 1200, 60, 1 },
|
||||
@ -1320,7 +1329,7 @@ static u32 edid_get_quirks(struct edid *edid)
|
||||
}
|
||||
|
||||
#define MODE_SIZE(m) ((m)->hdisplay * (m)->vdisplay)
|
||||
#define MODE_REFRESH_DIFF(m,r) (abs((m)->vrefresh - target_refresh))
|
||||
#define MODE_REFRESH_DIFF(c,t) (abs((c) - (t)))
|
||||
|
||||
/**
|
||||
* edid_fixup_preferred - set preferred modes based on quirk list
|
||||
@ -1335,6 +1344,7 @@ static void edid_fixup_preferred(struct drm_connector *connector,
|
||||
{
|
||||
struct drm_display_mode *t, *cur_mode, *preferred_mode;
|
||||
int target_refresh = 0;
|
||||
int cur_vrefresh, preferred_vrefresh;
|
||||
|
||||
if (list_empty(&connector->probed_modes))
|
||||
return;
|
||||
@ -1357,10 +1367,14 @@ static void edid_fixup_preferred(struct drm_connector *connector,
|
||||
if (MODE_SIZE(cur_mode) > MODE_SIZE(preferred_mode))
|
||||
preferred_mode = cur_mode;
|
||||
|
||||
cur_vrefresh = cur_mode->vrefresh ?
|
||||
cur_mode->vrefresh : drm_mode_vrefresh(cur_mode);
|
||||
preferred_vrefresh = preferred_mode->vrefresh ?
|
||||
preferred_mode->vrefresh : drm_mode_vrefresh(preferred_mode);
|
||||
/* At a given size, try to get closest to target refresh */
|
||||
if ((MODE_SIZE(cur_mode) == MODE_SIZE(preferred_mode)) &&
|
||||
MODE_REFRESH_DIFF(cur_mode, target_refresh) <
|
||||
MODE_REFRESH_DIFF(preferred_mode, target_refresh)) {
|
||||
MODE_REFRESH_DIFF(cur_vrefresh, target_refresh) <
|
||||
MODE_REFRESH_DIFF(preferred_vrefresh, target_refresh)) {
|
||||
preferred_mode = cur_mode;
|
||||
}
|
||||
}
|
||||
@ -2080,7 +2094,7 @@ drm_est3_modes(struct drm_connector *connector, struct detailed_timing *timing)
|
||||
u8 *est = ((u8 *)timing) + 5;
|
||||
|
||||
for (i = 0; i < 6; i++) {
|
||||
for (j = 7; j > 0; j--) {
|
||||
for (j = 7; j >= 0; j--) {
|
||||
m = (i * 8) + (7 - j);
|
||||
if (m >= ARRAY_SIZE(est3_modes))
|
||||
break;
|
||||
@ -3473,6 +3487,19 @@ int drm_add_modes_noedid(struct drm_connector *connector,
|
||||
}
|
||||
EXPORT_SYMBOL(drm_add_modes_noedid);
|
||||
|
||||
void drm_set_preferred_mode(struct drm_connector *connector,
|
||||
int hpref, int vpref)
|
||||
{
|
||||
struct drm_display_mode *mode;
|
||||
|
||||
list_for_each_entry(mode, &connector->probed_modes, head) {
|
||||
if (drm_mode_width(mode) == hpref &&
|
||||
drm_mode_height(mode) == vpref)
|
||||
mode->type |= DRM_MODE_TYPE_PREFERRED;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(drm_set_preferred_mode);
|
||||
|
||||
/**
|
||||
* drm_hdmi_avi_infoframe_from_display_mode() - fill an HDMI AVI infoframe with
|
||||
* data from a DRM display mode
|
||||
|
@ -239,7 +239,8 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
|
||||
|
||||
priv->ioctl_count = 0;
|
||||
/* for compatibility root is always authenticated */
|
||||
priv->authenticated = capable(CAP_SYS_ADMIN);
|
||||
priv->always_authenticated = capable(CAP_SYS_ADMIN);
|
||||
priv->authenticated = priv->always_authenticated;
|
||||
priv->lock_count = 0;
|
||||
|
||||
INIT_LIST_HEAD(&priv->lhead);
|
||||
@ -378,8 +379,10 @@ static void drm_events_release(struct drm_file *file_priv)
|
||||
}
|
||||
|
||||
/* Remove unconsumed events */
|
||||
list_for_each_entry_safe(e, et, &file_priv->event_list, link)
|
||||
list_for_each_entry_safe(e, et, &file_priv->event_list, link) {
|
||||
list_del(&e->link);
|
||||
e->destroy(e);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&dev->event_lock, flags);
|
||||
}
|
||||
@ -531,7 +534,7 @@ int drm_release(struct inode *inode, struct file *filp)
|
||||
list_for_each_entry(temp, &dev->filelist, lhead) {
|
||||
if ((temp->master == file_priv->master) &&
|
||||
(temp != file_priv))
|
||||
temp->authenticated = 0;
|
||||
temp->authenticated = temp->always_authenticated;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -219,7 +219,7 @@ int drm_vblank_init(struct drm_device *dev, int num_crtcs)
|
||||
for (i = 0; i < num_crtcs; i++)
|
||||
init_waitqueue_head(&dev->vblank[i].queue);
|
||||
|
||||
DRM_INFO("Supports vblank timestamp caching Rev 1 (10.10.2010).\n");
|
||||
DRM_INFO("Supports vblank timestamp caching Rev 2 (21.10.2013).\n");
|
||||
|
||||
/* Driver specific high-precision vblank timestamping supported? */
|
||||
if (dev->driver->get_vblank_timestamp)
|
||||
@ -586,24 +586,20 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
|
||||
* code gets preempted or delayed for some reason.
|
||||
*/
|
||||
for (i = 0; i < DRM_TIMESTAMP_MAXRETRIES; i++) {
|
||||
/* Disable preemption to make it very likely to
|
||||
* succeed in the first iteration even on PREEMPT_RT kernel.
|
||||
/*
|
||||
* Get vertical and horizontal scanout position vpos, hpos,
|
||||
* and bounding timestamps stime, etime, pre/post query.
|
||||
*/
|
||||
preempt_disable();
|
||||
vbl_status = dev->driver->get_scanout_position(dev, crtc, &vpos,
|
||||
&hpos, &stime, &etime);
|
||||
|
||||
/* Get system timestamp before query. */
|
||||
stime = ktime_get();
|
||||
|
||||
/* Get vertical and horizontal scanout pos. vpos, hpos. */
|
||||
vbl_status = dev->driver->get_scanout_position(dev, crtc, &vpos, &hpos);
|
||||
|
||||
/* Get system timestamp after query. */
|
||||
etime = ktime_get();
|
||||
/*
|
||||
* Get correction for CLOCK_MONOTONIC -> CLOCK_REALTIME if
|
||||
* CLOCK_REALTIME is requested.
|
||||
*/
|
||||
if (!drm_timestamp_monotonic)
|
||||
mono_time_offset = ktime_get_monotonic_offset();
|
||||
|
||||
preempt_enable();
|
||||
|
||||
/* Return as no-op if scanout query unsupported or failed. */
|
||||
if (!(vbl_status & DRM_SCANOUTPOS_VALID)) {
|
||||
DRM_DEBUG("crtc %d : scanoutpos query failed [%d].\n",
|
||||
@ -611,6 +607,7 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
/* Compute uncertainty in timestamp of scanout position query. */
|
||||
duration_ns = ktime_to_ns(etime) - ktime_to_ns(stime);
|
||||
|
||||
/* Accept result with < max_error nsecs timing uncertainty. */
|
||||
|
@ -1041,7 +1041,7 @@ void drm_mode_connector_list_update(struct drm_connector *connector)
|
||||
/* if equal delete the probed mode */
|
||||
mode->status = pmode->status;
|
||||
/* Merge type bits together */
|
||||
mode->type |= pmode->type;
|
||||
mode->type = pmode->type;
|
||||
list_del(&pmode->head);
|
||||
drm_mode_destroy(connector->dev, pmode);
|
||||
break;
|
||||
|
@ -80,7 +80,7 @@ drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t ali
|
||||
/* Reserve */
|
||||
for (addr = (unsigned long)dmah->vaddr, sz = size;
|
||||
sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
|
||||
SetPageReserved(virt_to_page(addr));
|
||||
SetPageReserved(virt_to_page((void *)addr));
|
||||
}
|
||||
|
||||
return dmah;
|
||||
@ -103,7 +103,7 @@ void __drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
|
||||
/* Unreserve */
|
||||
for (addr = (unsigned long)dmah->vaddr, sz = dmah->size;
|
||||
sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
|
||||
ClearPageReserved(virt_to_page(addr));
|
||||
ClearPageReserved(virt_to_page((void *)addr));
|
||||
}
|
||||
dma_free_coherent(&dev->pdev->dev, dmah->size, dmah->vaddr,
|
||||
dmah->busaddr);
|
||||
|
@ -255,16 +255,20 @@ int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a secondary minor number.
|
||||
* drm_get_minor - Allocate and register new DRM minor
|
||||
* @dev: DRM device
|
||||
* @minor: Pointer to where new minor is stored
|
||||
* @type: Type of minor
|
||||
*
|
||||
* \param dev device data structure
|
||||
* \param sec-minor structure to hold the assigned minor
|
||||
* \return negative number on failure.
|
||||
* Allocate a new minor of the given type and register it. A pointer to the new
|
||||
* minor is returned in @minor.
|
||||
* Caller must hold the global DRM mutex.
|
||||
*
|
||||
* Search an empty entry and initialize it to the given parameters. This
|
||||
* routines assigns minor numbers to secondary heads of multi-headed cards
|
||||
* RETURNS:
|
||||
* 0 on success, negative error code on failure.
|
||||
*/
|
||||
int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int type)
|
||||
static int drm_get_minor(struct drm_device *dev, struct drm_minor **minor,
|
||||
int type)
|
||||
{
|
||||
struct drm_minor *new_minor;
|
||||
int ret;
|
||||
@ -321,37 +325,48 @@ err_idr:
|
||||
*minor = NULL;
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_get_minor);
|
||||
|
||||
/**
|
||||
* Put a secondary minor number.
|
||||
* drm_unplug_minor - Unplug DRM minor
|
||||
* @minor: Minor to unplug
|
||||
*
|
||||
* \param sec_minor - structure to be released
|
||||
* \return always zero
|
||||
* Unplugs the given DRM minor but keeps the object. So after this returns,
|
||||
* minor->dev is still valid so existing open-files can still access it to get
|
||||
* device information from their drm_file ojects.
|
||||
* If the minor is already unplugged or if @minor is NULL, nothing is done.
|
||||
* The global DRM mutex must be held by the caller.
|
||||
*/
|
||||
int drm_put_minor(struct drm_minor **minor_p)
|
||||
static void drm_unplug_minor(struct drm_minor *minor)
|
||||
{
|
||||
struct drm_minor *minor = *minor_p;
|
||||
|
||||
DRM_DEBUG("release secondary minor %d\n", minor->index);
|
||||
if (!minor || !device_is_registered(minor->kdev))
|
||||
return;
|
||||
|
||||
#if defined(CONFIG_DEBUG_FS)
|
||||
drm_debugfs_cleanup(minor);
|
||||
#endif
|
||||
|
||||
drm_sysfs_device_remove(minor);
|
||||
|
||||
idr_remove(&drm_minors_idr, minor->index);
|
||||
|
||||
kfree(minor);
|
||||
*minor_p = NULL;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_put_minor);
|
||||
|
||||
static void drm_unplug_minor(struct drm_minor *minor)
|
||||
/**
|
||||
* drm_put_minor - Destroy DRM minor
|
||||
* @minor: Minor to destroy
|
||||
*
|
||||
* This calls drm_unplug_minor() on the given minor and then frees it. Nothing
|
||||
* is done if @minor is NULL. It is fine to call this on already unplugged
|
||||
* minors.
|
||||
* The global DRM mutex must be held by the caller.
|
||||
*/
|
||||
static void drm_put_minor(struct drm_minor *minor)
|
||||
{
|
||||
drm_sysfs_device_remove(minor);
|
||||
if (!minor)
|
||||
return;
|
||||
|
||||
DRM_DEBUG("release secondary minor %d\n", minor->index);
|
||||
|
||||
drm_unplug_minor(minor);
|
||||
kfree(minor);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -472,6 +487,10 @@ EXPORT_SYMBOL(drm_dev_alloc);
|
||||
*/
|
||||
void drm_dev_free(struct drm_device *dev)
|
||||
{
|
||||
drm_put_minor(dev->control);
|
||||
drm_put_minor(dev->render);
|
||||
drm_put_minor(dev->primary);
|
||||
|
||||
if (dev->driver->driver_features & DRIVER_GEM)
|
||||
drm_gem_destroy(dev);
|
||||
|
||||
@ -547,13 +566,11 @@ err_unload:
|
||||
if (dev->driver->unload)
|
||||
dev->driver->unload(dev);
|
||||
err_primary_node:
|
||||
drm_put_minor(&dev->primary);
|
||||
drm_put_minor(dev->primary);
|
||||
err_render_node:
|
||||
if (dev->render)
|
||||
drm_put_minor(&dev->render);
|
||||
drm_put_minor(dev->render);
|
||||
err_control_node:
|
||||
if (dev->control)
|
||||
drm_put_minor(&dev->control);
|
||||
drm_put_minor(dev->control);
|
||||
err_agp:
|
||||
if (dev->driver->bus->agp_destroy)
|
||||
dev->driver->bus->agp_destroy(dev);
|
||||
@ -588,11 +605,9 @@ void drm_dev_unregister(struct drm_device *dev)
|
||||
list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
|
||||
drm_rmmap(dev, r_list->map);
|
||||
|
||||
if (dev->control)
|
||||
drm_put_minor(&dev->control);
|
||||
if (dev->render)
|
||||
drm_put_minor(&dev->render);
|
||||
drm_put_minor(&dev->primary);
|
||||
drm_unplug_minor(dev->control);
|
||||
drm_unplug_minor(dev->render);
|
||||
drm_unplug_minor(dev->primary);
|
||||
|
||||
list_del(&dev->driver_item);
|
||||
}
|
||||
|
@ -22,8 +22,8 @@
|
||||
#include <drm/drm_core.h>
|
||||
#include <drm/drmP.h>
|
||||
|
||||
#define to_drm_minor(d) container_of(d, struct drm_minor, kdev)
|
||||
#define to_drm_connector(d) container_of(d, struct drm_connector, kdev)
|
||||
#define to_drm_minor(d) dev_get_drvdata(d)
|
||||
#define to_drm_connector(d) dev_get_drvdata(d)
|
||||
|
||||
static struct device_type drm_sysfs_device_minor = {
|
||||
.name = "drm_minor"
|
||||
@ -162,20 +162,6 @@ void drm_sysfs_destroy(void)
|
||||
drm_class = NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_sysfs_device_release - do nothing
|
||||
* @dev: Linux device
|
||||
*
|
||||
* Normally, this would free the DRM device associated with @dev, along
|
||||
* with cleaning up any other stuff. But we do that in the DRM core, so
|
||||
* this function can just return and hope that the core does its job.
|
||||
*/
|
||||
static void drm_sysfs_device_release(struct device *dev)
|
||||
{
|
||||
memset(dev, 0, sizeof(struct device));
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Connector properties
|
||||
*/
|
||||
@ -380,11 +366,6 @@ static struct bin_attribute edid_attr = {
|
||||
* properties (so far, connection status, dpms, mode list & edid) and
|
||||
* generate a hotplug event so userspace knows there's a new connector
|
||||
* available.
|
||||
*
|
||||
* Note:
|
||||
* This routine should only be called *once* for each registered connector.
|
||||
* A second call for an already registered connector will trigger the BUG_ON
|
||||
* below.
|
||||
*/
|
||||
int drm_sysfs_connector_add(struct drm_connector *connector)
|
||||
{
|
||||
@ -394,29 +375,25 @@ int drm_sysfs_connector_add(struct drm_connector *connector)
|
||||
int i;
|
||||
int ret;
|
||||
|
||||
/* We shouldn't get called more than once for the same connector */
|
||||
BUG_ON(device_is_registered(&connector->kdev));
|
||||
|
||||
connector->kdev.parent = &dev->primary->kdev;
|
||||
connector->kdev.class = drm_class;
|
||||
connector->kdev.release = drm_sysfs_device_release;
|
||||
if (connector->kdev)
|
||||
return 0;
|
||||
|
||||
connector->kdev = device_create(drm_class, dev->primary->kdev,
|
||||
0, connector, "card%d-%s",
|
||||
dev->primary->index, drm_get_connector_name(connector));
|
||||
DRM_DEBUG("adding \"%s\" to sysfs\n",
|
||||
drm_get_connector_name(connector));
|
||||
|
||||
dev_set_name(&connector->kdev, "card%d-%s",
|
||||
dev->primary->index, drm_get_connector_name(connector));
|
||||
ret = device_register(&connector->kdev);
|
||||
|
||||
if (ret) {
|
||||
DRM_ERROR("failed to register connector device: %d\n", ret);
|
||||
if (IS_ERR(connector->kdev)) {
|
||||
DRM_ERROR("failed to register connector device: %ld\n", PTR_ERR(connector->kdev));
|
||||
ret = PTR_ERR(connector->kdev);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Standard attributes */
|
||||
|
||||
for (attr_cnt = 0; attr_cnt < ARRAY_SIZE(connector_attrs); attr_cnt++) {
|
||||
ret = device_create_file(&connector->kdev, &connector_attrs[attr_cnt]);
|
||||
ret = device_create_file(connector->kdev, &connector_attrs[attr_cnt]);
|
||||
if (ret)
|
||||
goto err_out_files;
|
||||
}
|
||||
@ -433,7 +410,7 @@ int drm_sysfs_connector_add(struct drm_connector *connector)
|
||||
case DRM_MODE_CONNECTOR_Component:
|
||||
case DRM_MODE_CONNECTOR_TV:
|
||||
for (opt_cnt = 0; opt_cnt < ARRAY_SIZE(connector_attrs_opt1); opt_cnt++) {
|
||||
ret = device_create_file(&connector->kdev, &connector_attrs_opt1[opt_cnt]);
|
||||
ret = device_create_file(connector->kdev, &connector_attrs_opt1[opt_cnt]);
|
||||
if (ret)
|
||||
goto err_out_files;
|
||||
}
|
||||
@ -442,7 +419,7 @@ int drm_sysfs_connector_add(struct drm_connector *connector)
|
||||
break;
|
||||
}
|
||||
|
||||
ret = sysfs_create_bin_file(&connector->kdev.kobj, &edid_attr);
|
||||
ret = sysfs_create_bin_file(&connector->kdev->kobj, &edid_attr);
|
||||
if (ret)
|
||||
goto err_out_files;
|
||||
|
||||
@ -453,10 +430,10 @@ int drm_sysfs_connector_add(struct drm_connector *connector)
|
||||
|
||||
err_out_files:
|
||||
for (i = 0; i < opt_cnt; i++)
|
||||
device_remove_file(&connector->kdev, &connector_attrs_opt1[i]);
|
||||
device_remove_file(connector->kdev, &connector_attrs_opt1[i]);
|
||||
for (i = 0; i < attr_cnt; i++)
|
||||
device_remove_file(&connector->kdev, &connector_attrs[i]);
|
||||
device_unregister(&connector->kdev);
|
||||
device_remove_file(connector->kdev, &connector_attrs[i]);
|
||||
device_unregister(connector->kdev);
|
||||
|
||||
out:
|
||||
return ret;
|
||||
@ -480,16 +457,16 @@ void drm_sysfs_connector_remove(struct drm_connector *connector)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (!connector->kdev.parent)
|
||||
if (!connector->kdev)
|
||||
return;
|
||||
DRM_DEBUG("removing \"%s\" from sysfs\n",
|
||||
drm_get_connector_name(connector));
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(connector_attrs); i++)
|
||||
device_remove_file(&connector->kdev, &connector_attrs[i]);
|
||||
sysfs_remove_bin_file(&connector->kdev.kobj, &edid_attr);
|
||||
device_unregister(&connector->kdev);
|
||||
connector->kdev.parent = NULL;
|
||||
device_remove_file(connector->kdev, &connector_attrs[i]);
|
||||
sysfs_remove_bin_file(&connector->kdev->kobj, &edid_attr);
|
||||
device_unregister(connector->kdev);
|
||||
connector->kdev = NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_sysfs_connector_remove);
|
||||
|
||||
@ -508,7 +485,7 @@ void drm_sysfs_hotplug_event(struct drm_device *dev)
|
||||
|
||||
DRM_DEBUG("generating hotplug event\n");
|
||||
|
||||
kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, envp);
|
||||
kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, envp);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_sysfs_hotplug_event);
|
||||
|
||||
@ -523,15 +500,8 @@ EXPORT_SYMBOL(drm_sysfs_hotplug_event);
|
||||
*/
|
||||
int drm_sysfs_device_add(struct drm_minor *minor)
|
||||
{
|
||||
int err;
|
||||
char *minor_str;
|
||||
|
||||
minor->kdev.parent = minor->dev->dev;
|
||||
|
||||
minor->kdev.class = drm_class;
|
||||
minor->kdev.release = drm_sysfs_device_release;
|
||||
minor->kdev.devt = minor->device;
|
||||
minor->kdev.type = &drm_sysfs_device_minor;
|
||||
if (minor->type == DRM_MINOR_CONTROL)
|
||||
minor_str = "controlD%d";
|
||||
else if (minor->type == DRM_MINOR_RENDER)
|
||||
@ -539,18 +509,14 @@ int drm_sysfs_device_add(struct drm_minor *minor)
|
||||
else
|
||||
minor_str = "card%d";
|
||||
|
||||
dev_set_name(&minor->kdev, minor_str, minor->index);
|
||||
|
||||
err = device_register(&minor->kdev);
|
||||
if (err) {
|
||||
DRM_ERROR("device add failed: %d\n", err);
|
||||
goto err_out;
|
||||
minor->kdev = device_create(drm_class, minor->dev->dev,
|
||||
MKDEV(DRM_MAJOR, minor->index),
|
||||
minor, minor_str, minor->index);
|
||||
if (IS_ERR(minor->kdev)) {
|
||||
DRM_ERROR("device create failed %ld\n", PTR_ERR(minor->kdev));
|
||||
return PTR_ERR(minor->kdev);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_out:
|
||||
return err;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -562,9 +528,9 @@ err_out:
|
||||
*/
|
||||
void drm_sysfs_device_remove(struct drm_minor *minor)
|
||||
{
|
||||
if (minor->kdev.parent)
|
||||
device_unregister(&minor->kdev);
|
||||
minor->kdev.parent = NULL;
|
||||
if (minor->kdev)
|
||||
device_destroy(drm_class, MKDEV(DRM_MAJOR, minor->index));
|
||||
minor->kdev = NULL;
|
||||
}
|
||||
|
||||
|
||||
|
@ -301,7 +301,7 @@ static int drm_do_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
|
||||
|
||||
offset = (unsigned long)vmf->virtual_address - vma->vm_start; /* vm_[pg]off[set] should be 0 */
|
||||
page_nr = offset >> PAGE_SHIFT; /* page_nr could just be vmf->pgoff */
|
||||
page = virt_to_page((dma->pagelist[page_nr] + (offset & (~PAGE_MASK))));
|
||||
page = virt_to_page((void *)dma->pagelist[page_nr]);
|
||||
|
||||
get_page(page);
|
||||
vmf->page = page;
|
||||
|
@ -634,6 +634,7 @@ const struct psb_ops cdv_chip_ops = {
|
||||
.crtcs = 2,
|
||||
.hdmi_mask = (1 << 0) | (1 << 1),
|
||||
.lvds_mask = (1 << 1),
|
||||
.sdvo_mask = (1 << 0),
|
||||
.cursor_needs_phys = 0,
|
||||
.sgx_offset = MRST_SGX_OFFSET,
|
||||
.chip_setup = cdv_chip_setup,
|
||||
|
@ -666,7 +666,7 @@ cdv_intel_dp_i2c_init(struct gma_connector *connector,
|
||||
strncpy (intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1);
|
||||
intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0';
|
||||
intel_dp->adapter.algo_data = &intel_dp->algo;
|
||||
intel_dp->adapter.dev.parent = &connector->base.kdev;
|
||||
intel_dp->adapter.dev.parent = connector->base.kdev;
|
||||
|
||||
if (is_edp(encoder))
|
||||
cdv_intel_edp_panel_vdd_on(encoder);
|
||||
|
@ -714,7 +714,7 @@ static void psb_setup_outputs(struct drm_device *dev)
|
||||
clone_mask = (1 << INTEL_OUTPUT_ANALOG);
|
||||
break;
|
||||
case INTEL_OUTPUT_SDVO:
|
||||
crtc_mask = ((1 << 0) | (1 << 1));
|
||||
crtc_mask = dev_priv->ops->sdvo_mask;
|
||||
clone_mask = (1 << INTEL_OUTPUT_SDVO);
|
||||
break;
|
||||
case INTEL_OUTPUT_LVDS:
|
||||
|
@ -51,6 +51,9 @@
|
||||
#define wait_for(COND, MS) _wait_for(COND, MS, 1)
|
||||
#define wait_for_atomic(COND, MS) _wait_for(COND, MS, 0)
|
||||
|
||||
#define GMBUS_REG_READ(reg) ioread32(dev_priv->gmbus_reg + (reg))
|
||||
#define GMBUS_REG_WRITE(reg, val) iowrite32((val), dev_priv->gmbus_reg + (reg))
|
||||
|
||||
/* Intel GPIO access functions */
|
||||
|
||||
#define I2C_RISEFALL_TIME 20
|
||||
@ -71,7 +74,8 @@ struct intel_gpio {
|
||||
void
|
||||
gma_intel_i2c_reset(struct drm_device *dev)
|
||||
{
|
||||
REG_WRITE(GMBUS0, 0);
|
||||
struct drm_psb_private *dev_priv = dev->dev_private;
|
||||
GMBUS_REG_WRITE(GMBUS0, 0);
|
||||
}
|
||||
|
||||
static void intel_i2c_quirk_set(struct drm_psb_private *dev_priv, bool enable)
|
||||
@ -98,11 +102,10 @@ static void intel_i2c_quirk_set(struct drm_psb_private *dev_priv, bool enable)
|
||||
static u32 get_reserved(struct intel_gpio *gpio)
|
||||
{
|
||||
struct drm_psb_private *dev_priv = gpio->dev_priv;
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
u32 reserved = 0;
|
||||
|
||||
/* On most chips, these bits must be preserved in software. */
|
||||
reserved = REG_READ(gpio->reg) &
|
||||
reserved = GMBUS_REG_READ(gpio->reg) &
|
||||
(GPIO_DATA_PULLUP_DISABLE |
|
||||
GPIO_CLOCK_PULLUP_DISABLE);
|
||||
|
||||
@ -113,29 +116,26 @@ static int get_clock(void *data)
|
||||
{
|
||||
struct intel_gpio *gpio = data;
|
||||
struct drm_psb_private *dev_priv = gpio->dev_priv;
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
u32 reserved = get_reserved(gpio);
|
||||
REG_WRITE(gpio->reg, reserved | GPIO_CLOCK_DIR_MASK);
|
||||
REG_WRITE(gpio->reg, reserved);
|
||||
return (REG_READ(gpio->reg) & GPIO_CLOCK_VAL_IN) != 0;
|
||||
GMBUS_REG_WRITE(gpio->reg, reserved | GPIO_CLOCK_DIR_MASK);
|
||||
GMBUS_REG_WRITE(gpio->reg, reserved);
|
||||
return (GMBUS_REG_READ(gpio->reg) & GPIO_CLOCK_VAL_IN) != 0;
|
||||
}
|
||||
|
||||
static int get_data(void *data)
|
||||
{
|
||||
struct intel_gpio *gpio = data;
|
||||
struct drm_psb_private *dev_priv = gpio->dev_priv;
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
u32 reserved = get_reserved(gpio);
|
||||
REG_WRITE(gpio->reg, reserved | GPIO_DATA_DIR_MASK);
|
||||
REG_WRITE(gpio->reg, reserved);
|
||||
return (REG_READ(gpio->reg) & GPIO_DATA_VAL_IN) != 0;
|
||||
GMBUS_REG_WRITE(gpio->reg, reserved | GPIO_DATA_DIR_MASK);
|
||||
GMBUS_REG_WRITE(gpio->reg, reserved);
|
||||
return (GMBUS_REG_READ(gpio->reg) & GPIO_DATA_VAL_IN) != 0;
|
||||
}
|
||||
|
||||
static void set_clock(void *data, int state_high)
|
||||
{
|
||||
struct intel_gpio *gpio = data;
|
||||
struct drm_psb_private *dev_priv = gpio->dev_priv;
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
u32 reserved = get_reserved(gpio);
|
||||
u32 clock_bits;
|
||||
|
||||
@ -145,15 +145,14 @@ static void set_clock(void *data, int state_high)
|
||||
clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK |
|
||||
GPIO_CLOCK_VAL_MASK;
|
||||
|
||||
REG_WRITE(gpio->reg, reserved | clock_bits);
|
||||
REG_READ(gpio->reg); /* Posting */
|
||||
GMBUS_REG_WRITE(gpio->reg, reserved | clock_bits);
|
||||
GMBUS_REG_READ(gpio->reg); /* Posting */
|
||||
}
|
||||
|
||||
static void set_data(void *data, int state_high)
|
||||
{
|
||||
struct intel_gpio *gpio = data;
|
||||
struct drm_psb_private *dev_priv = gpio->dev_priv;
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
u32 reserved = get_reserved(gpio);
|
||||
u32 data_bits;
|
||||
|
||||
@ -163,8 +162,8 @@ static void set_data(void *data, int state_high)
|
||||
data_bits = GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK |
|
||||
GPIO_DATA_VAL_MASK;
|
||||
|
||||
REG_WRITE(gpio->reg, reserved | data_bits);
|
||||
REG_READ(gpio->reg);
|
||||
GMBUS_REG_WRITE(gpio->reg, reserved | data_bits);
|
||||
GMBUS_REG_READ(gpio->reg);
|
||||
}
|
||||
|
||||
static struct i2c_adapter *
|
||||
@ -251,7 +250,6 @@ gmbus_xfer(struct i2c_adapter *adapter,
|
||||
struct intel_gmbus,
|
||||
adapter);
|
||||
struct drm_psb_private *dev_priv = adapter->algo_data;
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
int i, reg_offset;
|
||||
|
||||
if (bus->force_bit)
|
||||
@ -260,28 +258,30 @@ gmbus_xfer(struct i2c_adapter *adapter,
|
||||
|
||||
reg_offset = 0;
|
||||
|
||||
REG_WRITE(GMBUS0 + reg_offset, bus->reg0);
|
||||
GMBUS_REG_WRITE(GMBUS0 + reg_offset, bus->reg0);
|
||||
|
||||
for (i = 0; i < num; i++) {
|
||||
u16 len = msgs[i].len;
|
||||
u8 *buf = msgs[i].buf;
|
||||
|
||||
if (msgs[i].flags & I2C_M_RD) {
|
||||
REG_WRITE(GMBUS1 + reg_offset,
|
||||
GMBUS_CYCLE_WAIT | (i + 1 == num ? GMBUS_CYCLE_STOP : 0) |
|
||||
(len << GMBUS_BYTE_COUNT_SHIFT) |
|
||||
(msgs[i].addr << GMBUS_SLAVE_ADDR_SHIFT) |
|
||||
GMBUS_SLAVE_READ | GMBUS_SW_RDY);
|
||||
REG_READ(GMBUS2+reg_offset);
|
||||
GMBUS_REG_WRITE(GMBUS1 + reg_offset,
|
||||
GMBUS_CYCLE_WAIT |
|
||||
(i + 1 == num ? GMBUS_CYCLE_STOP : 0) |
|
||||
(len << GMBUS_BYTE_COUNT_SHIFT) |
|
||||
(msgs[i].addr << GMBUS_SLAVE_ADDR_SHIFT) |
|
||||
GMBUS_SLAVE_READ | GMBUS_SW_RDY);
|
||||
GMBUS_REG_READ(GMBUS2+reg_offset);
|
||||
do {
|
||||
u32 val, loop = 0;
|
||||
|
||||
if (wait_for(REG_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_RDY), 50))
|
||||
if (wait_for(GMBUS_REG_READ(GMBUS2 + reg_offset) &
|
||||
(GMBUS_SATOER | GMBUS_HW_RDY), 50))
|
||||
goto timeout;
|
||||
if (REG_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
|
||||
if (GMBUS_REG_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
|
||||
goto clear_err;
|
||||
|
||||
val = REG_READ(GMBUS3 + reg_offset);
|
||||
val = GMBUS_REG_READ(GMBUS3 + reg_offset);
|
||||
do {
|
||||
*buf++ = val & 0xff;
|
||||
val >>= 8;
|
||||
@ -295,18 +295,20 @@ gmbus_xfer(struct i2c_adapter *adapter,
|
||||
val |= *buf++ << (8 * loop);
|
||||
} while (--len && ++loop < 4);
|
||||
|
||||
REG_WRITE(GMBUS3 + reg_offset, val);
|
||||
REG_WRITE(GMBUS1 + reg_offset,
|
||||
GMBUS_REG_WRITE(GMBUS3 + reg_offset, val);
|
||||
GMBUS_REG_WRITE(GMBUS1 + reg_offset,
|
||||
(i + 1 == num ? GMBUS_CYCLE_STOP : GMBUS_CYCLE_WAIT) |
|
||||
(msgs[i].len << GMBUS_BYTE_COUNT_SHIFT) |
|
||||
(msgs[i].addr << GMBUS_SLAVE_ADDR_SHIFT) |
|
||||
GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
|
||||
REG_READ(GMBUS2+reg_offset);
|
||||
GMBUS_REG_READ(GMBUS2+reg_offset);
|
||||
|
||||
while (len) {
|
||||
if (wait_for(REG_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_RDY), 50))
|
||||
if (wait_for(GMBUS_REG_READ(GMBUS2 + reg_offset) &
|
||||
(GMBUS_SATOER | GMBUS_HW_RDY), 50))
|
||||
goto timeout;
|
||||
if (REG_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
|
||||
if (GMBUS_REG_READ(GMBUS2 + reg_offset) &
|
||||
GMBUS_SATOER)
|
||||
goto clear_err;
|
||||
|
||||
val = loop = 0;
|
||||
@ -314,14 +316,14 @@ gmbus_xfer(struct i2c_adapter *adapter,
|
||||
val |= *buf++ << (8 * loop);
|
||||
} while (--len && ++loop < 4);
|
||||
|
||||
REG_WRITE(GMBUS3 + reg_offset, val);
|
||||
REG_READ(GMBUS2+reg_offset);
|
||||
GMBUS_REG_WRITE(GMBUS3 + reg_offset, val);
|
||||
GMBUS_REG_READ(GMBUS2+reg_offset);
|
||||
}
|
||||
}
|
||||
|
||||
if (i + 1 < num && wait_for(REG_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_WAIT_PHASE), 50))
|
||||
if (i + 1 < num && wait_for(GMBUS_REG_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_WAIT_PHASE), 50))
|
||||
goto timeout;
|
||||
if (REG_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
|
||||
if (GMBUS_REG_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
|
||||
goto clear_err;
|
||||
}
|
||||
|
||||
@ -332,20 +334,20 @@ clear_err:
|
||||
* of resetting the GMBUS controller and so clearing the
|
||||
* BUS_ERROR raised by the slave's NAK.
|
||||
*/
|
||||
REG_WRITE(GMBUS1 + reg_offset, GMBUS_SW_CLR_INT);
|
||||
REG_WRITE(GMBUS1 + reg_offset, 0);
|
||||
GMBUS_REG_WRITE(GMBUS1 + reg_offset, GMBUS_SW_CLR_INT);
|
||||
GMBUS_REG_WRITE(GMBUS1 + reg_offset, 0);
|
||||
|
||||
done:
|
||||
/* Mark the GMBUS interface as disabled. We will re-enable it at the
|
||||
* start of the next xfer, till then let it sleep.
|
||||
*/
|
||||
REG_WRITE(GMBUS0 + reg_offset, 0);
|
||||
GMBUS_REG_WRITE(GMBUS0 + reg_offset, 0);
|
||||
return i;
|
||||
|
||||
timeout:
|
||||
DRM_INFO("GMBUS timed out, falling back to bit banging on pin %d [%s]\n",
|
||||
bus->reg0 & 0xff, bus->adapter.name);
|
||||
REG_WRITE(GMBUS0 + reg_offset, 0);
|
||||
GMBUS_REG_WRITE(GMBUS0 + reg_offset, 0);
|
||||
|
||||
/* Hardware may not support GMBUS over these pins? Try GPIO bitbanging instead. */
|
||||
bus->force_bit = intel_gpio_create(dev_priv, bus->reg0 & 0xff);
|
||||
@ -399,6 +401,11 @@ int gma_intel_setup_gmbus(struct drm_device *dev)
|
||||
if (dev_priv->gmbus == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
if (IS_MRST(dev))
|
||||
dev_priv->gmbus_reg = dev_priv->aux_reg;
|
||||
else
|
||||
dev_priv->gmbus_reg = dev_priv->vdc_reg;
|
||||
|
||||
for (i = 0; i < GMBUS_NUM_PORTS; i++) {
|
||||
struct intel_gmbus *bus = &dev_priv->gmbus[i];
|
||||
|
||||
@ -487,6 +494,7 @@ void gma_intel_teardown_gmbus(struct drm_device *dev)
|
||||
i2c_del_adapter(&bus->adapter);
|
||||
}
|
||||
|
||||
dev_priv->gmbus_reg = NULL; /* iounmap is done in driver_unload */
|
||||
kfree(dev_priv->gmbus);
|
||||
dev_priv->gmbus = NULL;
|
||||
}
|
||||
|
@ -26,24 +26,10 @@
|
||||
#include "gma_display.h"
|
||||
#include "power.h"
|
||||
|
||||
struct psb_intel_range_t {
|
||||
int min, max;
|
||||
};
|
||||
|
||||
struct oaktrail_limit_t {
|
||||
struct psb_intel_range_t dot, m, p1;
|
||||
};
|
||||
|
||||
struct oaktrail_clock_t {
|
||||
/* derived values */
|
||||
int dot;
|
||||
int m;
|
||||
int p1;
|
||||
};
|
||||
|
||||
#define MRST_LIMIT_LVDS_100L 0
|
||||
#define MRST_LIMIT_LVDS_83 1
|
||||
#define MRST_LIMIT_LVDS_100 2
|
||||
#define MRST_LIMIT_LVDS_100L 0
|
||||
#define MRST_LIMIT_LVDS_83 1
|
||||
#define MRST_LIMIT_LVDS_100 2
|
||||
#define MRST_LIMIT_SDVO 3
|
||||
|
||||
#define MRST_DOT_MIN 19750
|
||||
#define MRST_DOT_MAX 120000
|
||||
@ -57,21 +43,40 @@ struct oaktrail_clock_t {
|
||||
#define MRST_P1_MAX_0 7
|
||||
#define MRST_P1_MAX_1 8
|
||||
|
||||
static const struct oaktrail_limit_t oaktrail_limits[] = {
|
||||
static bool mrst_lvds_find_best_pll(const struct gma_limit_t *limit,
|
||||
struct drm_crtc *crtc, int target,
|
||||
int refclk, struct gma_clock_t *best_clock);
|
||||
|
||||
static bool mrst_sdvo_find_best_pll(const struct gma_limit_t *limit,
|
||||
struct drm_crtc *crtc, int target,
|
||||
int refclk, struct gma_clock_t *best_clock);
|
||||
|
||||
static const struct gma_limit_t mrst_limits[] = {
|
||||
{ /* MRST_LIMIT_LVDS_100L */
|
||||
.dot = {.min = MRST_DOT_MIN, .max = MRST_DOT_MAX},
|
||||
.m = {.min = MRST_M_MIN_100L, .max = MRST_M_MAX_100L},
|
||||
.p1 = {.min = MRST_P1_MIN, .max = MRST_P1_MAX_1},
|
||||
.find_pll = mrst_lvds_find_best_pll,
|
||||
},
|
||||
{ /* MRST_LIMIT_LVDS_83L */
|
||||
.dot = {.min = MRST_DOT_MIN, .max = MRST_DOT_MAX},
|
||||
.m = {.min = MRST_M_MIN_83, .max = MRST_M_MAX_83},
|
||||
.p1 = {.min = MRST_P1_MIN, .max = MRST_P1_MAX_0},
|
||||
.find_pll = mrst_lvds_find_best_pll,
|
||||
},
|
||||
{ /* MRST_LIMIT_LVDS_100 */
|
||||
.dot = {.min = MRST_DOT_MIN, .max = MRST_DOT_MAX},
|
||||
.m = {.min = MRST_M_MIN_100, .max = MRST_M_MAX_100},
|
||||
.p1 = {.min = MRST_P1_MIN, .max = MRST_P1_MAX_1},
|
||||
.find_pll = mrst_lvds_find_best_pll,
|
||||
},
|
||||
{ /* MRST_LIMIT_SDVO */
|
||||
.vco = {.min = 1400000, .max = 2800000},
|
||||
.n = {.min = 3, .max = 7},
|
||||
.m = {.min = 80, .max = 137},
|
||||
.p1 = {.min = 1, .max = 2},
|
||||
.p2 = {.dot_limit = 200000, .p2_slow = 10, .p2_fast = 10},
|
||||
.find_pll = mrst_sdvo_find_best_pll,
|
||||
},
|
||||
};
|
||||
|
||||
@ -82,9 +87,10 @@ static const u32 oaktrail_m_converts[] = {
|
||||
0x12, 0x09, 0x24, 0x32, 0x39, 0x1c,
|
||||
};
|
||||
|
||||
static const struct oaktrail_limit_t *oaktrail_limit(struct drm_crtc *crtc)
|
||||
static const struct gma_limit_t *mrst_limit(struct drm_crtc *crtc,
|
||||
int refclk)
|
||||
{
|
||||
const struct oaktrail_limit_t *limit = NULL;
|
||||
const struct gma_limit_t *limit = NULL;
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_psb_private *dev_priv = dev->dev_private;
|
||||
|
||||
@ -92,45 +98,100 @@ static const struct oaktrail_limit_t *oaktrail_limit(struct drm_crtc *crtc)
|
||||
|| gma_pipe_has_type(crtc, INTEL_OUTPUT_MIPI)) {
|
||||
switch (dev_priv->core_freq) {
|
||||
case 100:
|
||||
limit = &oaktrail_limits[MRST_LIMIT_LVDS_100L];
|
||||
limit = &mrst_limits[MRST_LIMIT_LVDS_100L];
|
||||
break;
|
||||
case 166:
|
||||
limit = &oaktrail_limits[MRST_LIMIT_LVDS_83];
|
||||
limit = &mrst_limits[MRST_LIMIT_LVDS_83];
|
||||
break;
|
||||
case 200:
|
||||
limit = &oaktrail_limits[MRST_LIMIT_LVDS_100];
|
||||
limit = &mrst_limits[MRST_LIMIT_LVDS_100];
|
||||
break;
|
||||
}
|
||||
} else if (gma_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) {
|
||||
limit = &mrst_limits[MRST_LIMIT_SDVO];
|
||||
} else {
|
||||
limit = NULL;
|
||||
dev_err(dev->dev, "oaktrail_limit Wrong display type.\n");
|
||||
dev_err(dev->dev, "mrst_limit Wrong display type.\n");
|
||||
}
|
||||
|
||||
return limit;
|
||||
}
|
||||
|
||||
/** Derive the pixel clock for the given refclk and divisors for 8xx chips. */
|
||||
static void oaktrail_clock(int refclk, struct oaktrail_clock_t *clock)
|
||||
static void mrst_lvds_clock(int refclk, struct gma_clock_t *clock)
|
||||
{
|
||||
clock->dot = (refclk * clock->m) / (14 * clock->p1);
|
||||
}
|
||||
|
||||
static void mrstPrintPll(char *prefix, struct oaktrail_clock_t *clock)
|
||||
static void mrst_print_pll(struct gma_clock_t *clock)
|
||||
{
|
||||
pr_debug("%s: dotclock = %d, m = %d, p1 = %d.\n",
|
||||
prefix, clock->dot, clock->m, clock->p1);
|
||||
DRM_DEBUG_DRIVER("dotclock=%d, m=%d, m1=%d, m2=%d, n=%d, p1=%d, p2=%d\n",
|
||||
clock->dot, clock->m, clock->m1, clock->m2, clock->n,
|
||||
clock->p1, clock->p2);
|
||||
}
|
||||
|
||||
static bool mrst_sdvo_find_best_pll(const struct gma_limit_t *limit,
|
||||
struct drm_crtc *crtc, int target,
|
||||
int refclk, struct gma_clock_t *best_clock)
|
||||
{
|
||||
struct gma_clock_t clock;
|
||||
u32 target_vco, actual_freq;
|
||||
s32 freq_error, min_error = 100000;
|
||||
|
||||
memset(best_clock, 0, sizeof(*best_clock));
|
||||
|
||||
for (clock.m = limit->m.min; clock.m <= limit->m.max; clock.m++) {
|
||||
for (clock.n = limit->n.min; clock.n <= limit->n.max;
|
||||
clock.n++) {
|
||||
for (clock.p1 = limit->p1.min;
|
||||
clock.p1 <= limit->p1.max; clock.p1++) {
|
||||
/* p2 value always stored in p2_slow on SDVO */
|
||||
clock.p = clock.p1 * limit->p2.p2_slow;
|
||||
target_vco = target * clock.p;
|
||||
|
||||
/* VCO will increase at this point so break */
|
||||
if (target_vco > limit->vco.max)
|
||||
break;
|
||||
|
||||
if (target_vco < limit->vco.min)
|
||||
continue;
|
||||
|
||||
actual_freq = (refclk * clock.m) /
|
||||
(clock.n * clock.p);
|
||||
freq_error = 10000 -
|
||||
((target * 10000) / actual_freq);
|
||||
|
||||
if (freq_error < -min_error) {
|
||||
/* freq_error will start to decrease at
|
||||
this point so break */
|
||||
break;
|
||||
}
|
||||
|
||||
if (freq_error < 0)
|
||||
freq_error = -freq_error;
|
||||
|
||||
if (freq_error < min_error) {
|
||||
min_error = freq_error;
|
||||
*best_clock = clock;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (min_error == 0)
|
||||
break;
|
||||
}
|
||||
|
||||
return min_error == 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a set of divisors for the desired target clock with the given refclk,
|
||||
* or FALSE. Divisor values are the actual divisors for
|
||||
*/
|
||||
static bool
|
||||
mrstFindBestPLL(struct drm_crtc *crtc, int target, int refclk,
|
||||
struct oaktrail_clock_t *best_clock)
|
||||
static bool mrst_lvds_find_best_pll(const struct gma_limit_t *limit,
|
||||
struct drm_crtc *crtc, int target,
|
||||
int refclk, struct gma_clock_t *best_clock)
|
||||
{
|
||||
struct oaktrail_clock_t clock;
|
||||
const struct oaktrail_limit_t *limit = oaktrail_limit(crtc);
|
||||
struct gma_clock_t clock;
|
||||
int err = target;
|
||||
|
||||
memset(best_clock, 0, sizeof(*best_clock));
|
||||
@ -140,7 +201,7 @@ mrstFindBestPLL(struct drm_crtc *crtc, int target, int refclk,
|
||||
clock.p1++) {
|
||||
int this_err;
|
||||
|
||||
oaktrail_clock(refclk, &clock);
|
||||
mrst_lvds_clock(refclk, &clock);
|
||||
|
||||
this_err = abs(clock.dot - target);
|
||||
if (this_err < err) {
|
||||
@ -149,7 +210,6 @@ mrstFindBestPLL(struct drm_crtc *crtc, int target, int refclk,
|
||||
}
|
||||
}
|
||||
}
|
||||
dev_dbg(crtc->dev->dev, "mrstFindBestPLL err = %d.\n", err);
|
||||
return err != target;
|
||||
}
|
||||
|
||||
@ -167,8 +227,10 @@ static void oaktrail_crtc_dpms(struct drm_crtc *crtc, int mode)
|
||||
int pipe = gma_crtc->pipe;
|
||||
const struct psb_offset *map = &dev_priv->regmap[pipe];
|
||||
u32 temp;
|
||||
int i;
|
||||
int need_aux = gma_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) ? 1 : 0;
|
||||
|
||||
if (pipe == 1) {
|
||||
if (gma_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) {
|
||||
oaktrail_crtc_hdmi_dpms(crtc, mode);
|
||||
return;
|
||||
}
|
||||
@ -183,35 +245,45 @@ static void oaktrail_crtc_dpms(struct drm_crtc *crtc, int mode)
|
||||
case DRM_MODE_DPMS_ON:
|
||||
case DRM_MODE_DPMS_STANDBY:
|
||||
case DRM_MODE_DPMS_SUSPEND:
|
||||
/* Enable the DPLL */
|
||||
temp = REG_READ(map->dpll);
|
||||
if ((temp & DPLL_VCO_ENABLE) == 0) {
|
||||
REG_WRITE(map->dpll, temp);
|
||||
REG_READ(map->dpll);
|
||||
/* Wait for the clocks to stabilize. */
|
||||
udelay(150);
|
||||
REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
|
||||
REG_READ(map->dpll);
|
||||
/* Wait for the clocks to stabilize. */
|
||||
udelay(150);
|
||||
REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
|
||||
REG_READ(map->dpll);
|
||||
/* Wait for the clocks to stabilize. */
|
||||
udelay(150);
|
||||
}
|
||||
/* Enable the pipe */
|
||||
temp = REG_READ(map->conf);
|
||||
if ((temp & PIPEACONF_ENABLE) == 0)
|
||||
REG_WRITE(map->conf, temp | PIPEACONF_ENABLE);
|
||||
/* Enable the plane */
|
||||
temp = REG_READ(map->cntr);
|
||||
if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
|
||||
REG_WRITE(map->cntr,
|
||||
temp | DISPLAY_PLANE_ENABLE);
|
||||
/* Flush the plane changes */
|
||||
REG_WRITE(map->base, REG_READ(map->base));
|
||||
}
|
||||
for (i = 0; i <= need_aux; i++) {
|
||||
/* Enable the DPLL */
|
||||
temp = REG_READ_WITH_AUX(map->dpll, i);
|
||||
if ((temp & DPLL_VCO_ENABLE) == 0) {
|
||||
REG_WRITE_WITH_AUX(map->dpll, temp, i);
|
||||
REG_READ_WITH_AUX(map->dpll, i);
|
||||
/* Wait for the clocks to stabilize. */
|
||||
udelay(150);
|
||||
REG_WRITE_WITH_AUX(map->dpll,
|
||||
temp | DPLL_VCO_ENABLE, i);
|
||||
REG_READ_WITH_AUX(map->dpll, i);
|
||||
/* Wait for the clocks to stabilize. */
|
||||
udelay(150);
|
||||
REG_WRITE_WITH_AUX(map->dpll,
|
||||
temp | DPLL_VCO_ENABLE, i);
|
||||
REG_READ_WITH_AUX(map->dpll, i);
|
||||
/* Wait for the clocks to stabilize. */
|
||||
udelay(150);
|
||||
}
|
||||
|
||||
/* Enable the pipe */
|
||||
temp = REG_READ_WITH_AUX(map->conf, i);
|
||||
if ((temp & PIPEACONF_ENABLE) == 0) {
|
||||
REG_WRITE_WITH_AUX(map->conf,
|
||||
temp | PIPEACONF_ENABLE, i);
|
||||
}
|
||||
|
||||
/* Enable the plane */
|
||||
temp = REG_READ_WITH_AUX(map->cntr, i);
|
||||
if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
|
||||
REG_WRITE_WITH_AUX(map->cntr,
|
||||
temp | DISPLAY_PLANE_ENABLE,
|
||||
i);
|
||||
/* Flush the plane changes */
|
||||
REG_WRITE_WITH_AUX(map->base,
|
||||
REG_READ_WITH_AUX(map->base, i), i);
|
||||
}
|
||||
|
||||
}
|
||||
gma_crtc_load_lut(crtc);
|
||||
|
||||
/* Give the overlay scaler a chance to enable
|
||||
@ -223,48 +295,52 @@ static void oaktrail_crtc_dpms(struct drm_crtc *crtc, int mode)
|
||||
* if it's on this pipe */
|
||||
/* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */
|
||||
|
||||
/* Disable the VGA plane that we never use */
|
||||
REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
|
||||
/* Disable display plane */
|
||||
temp = REG_READ(map->cntr);
|
||||
if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
|
||||
REG_WRITE(map->cntr,
|
||||
temp & ~DISPLAY_PLANE_ENABLE);
|
||||
/* Flush the plane changes */
|
||||
REG_WRITE(map->base, REG_READ(map->base));
|
||||
REG_READ(map->base);
|
||||
}
|
||||
for (i = 0; i <= need_aux; i++) {
|
||||
/* Disable the VGA plane that we never use */
|
||||
REG_WRITE_WITH_AUX(VGACNTRL, VGA_DISP_DISABLE, i);
|
||||
/* Disable display plane */
|
||||
temp = REG_READ_WITH_AUX(map->cntr, i);
|
||||
if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
|
||||
REG_WRITE_WITH_AUX(map->cntr,
|
||||
temp & ~DISPLAY_PLANE_ENABLE, i);
|
||||
/* Flush the plane changes */
|
||||
REG_WRITE_WITH_AUX(map->base,
|
||||
REG_READ(map->base), i);
|
||||
REG_READ_WITH_AUX(map->base, i);
|
||||
}
|
||||
|
||||
/* Next, disable display pipes */
|
||||
temp = REG_READ(map->conf);
|
||||
if ((temp & PIPEACONF_ENABLE) != 0) {
|
||||
REG_WRITE(map->conf, temp & ~PIPEACONF_ENABLE);
|
||||
REG_READ(map->conf);
|
||||
}
|
||||
/* Wait for for the pipe disable to take effect. */
|
||||
gma_wait_for_vblank(dev);
|
||||
/* Next, disable display pipes */
|
||||
temp = REG_READ_WITH_AUX(map->conf, i);
|
||||
if ((temp & PIPEACONF_ENABLE) != 0) {
|
||||
REG_WRITE_WITH_AUX(map->conf,
|
||||
temp & ~PIPEACONF_ENABLE, i);
|
||||
REG_READ_WITH_AUX(map->conf, i);
|
||||
}
|
||||
/* Wait for for the pipe disable to take effect. */
|
||||
gma_wait_for_vblank(dev);
|
||||
|
||||
temp = REG_READ(map->dpll);
|
||||
if ((temp & DPLL_VCO_ENABLE) != 0) {
|
||||
REG_WRITE(map->dpll, temp & ~DPLL_VCO_ENABLE);
|
||||
REG_READ(map->dpll);
|
||||
}
|
||||
temp = REG_READ_WITH_AUX(map->dpll, i);
|
||||
if ((temp & DPLL_VCO_ENABLE) != 0) {
|
||||
REG_WRITE_WITH_AUX(map->dpll,
|
||||
temp & ~DPLL_VCO_ENABLE, i);
|
||||
REG_READ_WITH_AUX(map->dpll, i);
|
||||
}
|
||||
|
||||
/* Wait for the clocks to turn off. */
|
||||
udelay(150);
|
||||
/* Wait for the clocks to turn off. */
|
||||
udelay(150);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
/*Set FIFO Watermarks*/
|
||||
REG_WRITE(DSPARB, 0x3FFF);
|
||||
REG_WRITE(DSPFW1, 0x3F88080A);
|
||||
REG_WRITE(DSPFW2, 0x0b060808);
|
||||
/* Set FIFO Watermarks (values taken from EMGD) */
|
||||
REG_WRITE(DSPARB, 0x3f80);
|
||||
REG_WRITE(DSPFW1, 0x3f8f0404);
|
||||
REG_WRITE(DSPFW2, 0x04040f04);
|
||||
REG_WRITE(DSPFW3, 0x0);
|
||||
REG_WRITE(DSPFW4, 0x08030404);
|
||||
REG_WRITE(DSPFW4, 0x04040404);
|
||||
REG_WRITE(DSPFW5, 0x04040404);
|
||||
REG_WRITE(DSPFW6, 0x78);
|
||||
REG_WRITE(0x70400, REG_READ(0x70400) | 0x4000);
|
||||
/* Must write Bit 14 of the Chicken Bit Register */
|
||||
REG_WRITE(DSPCHICKENBIT, REG_READ(DSPCHICKENBIT) | 0xc040);
|
||||
|
||||
gma_power_end(dev);
|
||||
}
|
||||
@ -297,7 +373,8 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
|
||||
int pipe = gma_crtc->pipe;
|
||||
const struct psb_offset *map = &dev_priv->regmap[pipe];
|
||||
int refclk = 0;
|
||||
struct oaktrail_clock_t clock;
|
||||
struct gma_clock_t clock;
|
||||
const struct gma_limit_t *limit;
|
||||
u32 dpll = 0, fp = 0, dspcntr, pipeconf;
|
||||
bool ok, is_sdvo = false;
|
||||
bool is_lvds = false;
|
||||
@ -306,8 +383,10 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
|
||||
struct gma_encoder *gma_encoder = NULL;
|
||||
uint64_t scalingType = DRM_MODE_SCALE_FULLSCREEN;
|
||||
struct drm_connector *connector;
|
||||
int i;
|
||||
int need_aux = gma_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) ? 1 : 0;
|
||||
|
||||
if (pipe == 1)
|
||||
if (gma_pipe_has_type(crtc, INTEL_OUTPUT_HDMI))
|
||||
return oaktrail_crtc_hdmi_mode_set(crtc, mode, adjusted_mode, x, y, old_fb);
|
||||
|
||||
if (!gma_power_begin(dev, true))
|
||||
@ -340,15 +419,17 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
|
||||
}
|
||||
|
||||
/* Disable the VGA plane that we never use */
|
||||
REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
|
||||
for (i = 0; i <= need_aux; i++)
|
||||
REG_WRITE_WITH_AUX(VGACNTRL, VGA_DISP_DISABLE, i);
|
||||
|
||||
/* Disable the panel fitter if it was on our pipe */
|
||||
if (oaktrail_panel_fitter_pipe(dev) == pipe)
|
||||
REG_WRITE(PFIT_CONTROL, 0);
|
||||
|
||||
REG_WRITE(map->src,
|
||||
((mode->crtc_hdisplay - 1) << 16) |
|
||||
(mode->crtc_vdisplay - 1));
|
||||
for (i = 0; i <= need_aux; i++) {
|
||||
REG_WRITE_WITH_AUX(map->src, ((mode->crtc_hdisplay - 1) << 16) |
|
||||
(mode->crtc_vdisplay - 1), i);
|
||||
}
|
||||
|
||||
if (gma_encoder)
|
||||
drm_object_property_get_value(&connector->base,
|
||||
@ -365,35 +446,39 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
|
||||
offsetY = (adjusted_mode->crtc_vdisplay -
|
||||
mode->crtc_vdisplay) / 2;
|
||||
|
||||
REG_WRITE(map->htotal, (mode->crtc_hdisplay - 1) |
|
||||
((adjusted_mode->crtc_htotal - 1) << 16));
|
||||
REG_WRITE(map->vtotal, (mode->crtc_vdisplay - 1) |
|
||||
((adjusted_mode->crtc_vtotal - 1) << 16));
|
||||
REG_WRITE(map->hblank,
|
||||
(adjusted_mode->crtc_hblank_start - offsetX - 1) |
|
||||
((adjusted_mode->crtc_hblank_end - offsetX - 1) << 16));
|
||||
REG_WRITE(map->hsync,
|
||||
(adjusted_mode->crtc_hsync_start - offsetX - 1) |
|
||||
((adjusted_mode->crtc_hsync_end - offsetX - 1) << 16));
|
||||
REG_WRITE(map->vblank,
|
||||
(adjusted_mode->crtc_vblank_start - offsetY - 1) |
|
||||
((adjusted_mode->crtc_vblank_end - offsetY - 1) << 16));
|
||||
REG_WRITE(map->vsync,
|
||||
(adjusted_mode->crtc_vsync_start - offsetY - 1) |
|
||||
((adjusted_mode->crtc_vsync_end - offsetY - 1) << 16));
|
||||
for (i = 0; i <= need_aux; i++) {
|
||||
REG_WRITE_WITH_AUX(map->htotal, (mode->crtc_hdisplay - 1) |
|
||||
((adjusted_mode->crtc_htotal - 1) << 16), i);
|
||||
REG_WRITE_WITH_AUX(map->vtotal, (mode->crtc_vdisplay - 1) |
|
||||
((adjusted_mode->crtc_vtotal - 1) << 16), i);
|
||||
REG_WRITE_WITH_AUX(map->hblank,
|
||||
(adjusted_mode->crtc_hblank_start - offsetX - 1) |
|
||||
((adjusted_mode->crtc_hblank_end - offsetX - 1) << 16), i);
|
||||
REG_WRITE_WITH_AUX(map->hsync,
|
||||
(adjusted_mode->crtc_hsync_start - offsetX - 1) |
|
||||
((adjusted_mode->crtc_hsync_end - offsetX - 1) << 16), i);
|
||||
REG_WRITE_WITH_AUX(map->vblank,
|
||||
(adjusted_mode->crtc_vblank_start - offsetY - 1) |
|
||||
((adjusted_mode->crtc_vblank_end - offsetY - 1) << 16), i);
|
||||
REG_WRITE_WITH_AUX(map->vsync,
|
||||
(adjusted_mode->crtc_vsync_start - offsetY - 1) |
|
||||
((adjusted_mode->crtc_vsync_end - offsetY - 1) << 16), i);
|
||||
}
|
||||
} else {
|
||||
REG_WRITE(map->htotal, (adjusted_mode->crtc_hdisplay - 1) |
|
||||
((adjusted_mode->crtc_htotal - 1) << 16));
|
||||
REG_WRITE(map->vtotal, (adjusted_mode->crtc_vdisplay - 1) |
|
||||
((adjusted_mode->crtc_vtotal - 1) << 16));
|
||||
REG_WRITE(map->hblank, (adjusted_mode->crtc_hblank_start - 1) |
|
||||
((adjusted_mode->crtc_hblank_end - 1) << 16));
|
||||
REG_WRITE(map->hsync, (adjusted_mode->crtc_hsync_start - 1) |
|
||||
((adjusted_mode->crtc_hsync_end - 1) << 16));
|
||||
REG_WRITE(map->vblank, (adjusted_mode->crtc_vblank_start - 1) |
|
||||
((adjusted_mode->crtc_vblank_end - 1) << 16));
|
||||
REG_WRITE(map->vsync, (adjusted_mode->crtc_vsync_start - 1) |
|
||||
((adjusted_mode->crtc_vsync_end - 1) << 16));
|
||||
for (i = 0; i <= need_aux; i++) {
|
||||
REG_WRITE_WITH_AUX(map->htotal, (adjusted_mode->crtc_hdisplay - 1) |
|
||||
((adjusted_mode->crtc_htotal - 1) << 16), i);
|
||||
REG_WRITE_WITH_AUX(map->vtotal, (adjusted_mode->crtc_vdisplay - 1) |
|
||||
((adjusted_mode->crtc_vtotal - 1) << 16), i);
|
||||
REG_WRITE_WITH_AUX(map->hblank, (adjusted_mode->crtc_hblank_start - 1) |
|
||||
((adjusted_mode->crtc_hblank_end - 1) << 16), i);
|
||||
REG_WRITE_WITH_AUX(map->hsync, (adjusted_mode->crtc_hsync_start - 1) |
|
||||
((adjusted_mode->crtc_hsync_end - 1) << 16), i);
|
||||
REG_WRITE_WITH_AUX(map->vblank, (adjusted_mode->crtc_vblank_start - 1) |
|
||||
((adjusted_mode->crtc_vblank_end - 1) << 16), i);
|
||||
REG_WRITE_WITH_AUX(map->vsync, (adjusted_mode->crtc_vsync_start - 1) |
|
||||
((adjusted_mode->crtc_vsync_end - 1) << 16), i);
|
||||
}
|
||||
}
|
||||
|
||||
/* Flush the plane changes */
|
||||
@ -418,21 +503,30 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
|
||||
if (is_mipi)
|
||||
goto oaktrail_crtc_mode_set_exit;
|
||||
|
||||
refclk = dev_priv->core_freq * 1000;
|
||||
|
||||
dpll = 0; /*BIT16 = 0 for 100MHz reference */
|
||||
|
||||
ok = mrstFindBestPLL(crtc, adjusted_mode->clock, refclk, &clock);
|
||||
refclk = is_sdvo ? 96000 : dev_priv->core_freq * 1000;
|
||||
limit = mrst_limit(crtc, refclk);
|
||||
ok = limit->find_pll(limit, crtc, adjusted_mode->clock,
|
||||
refclk, &clock);
|
||||
|
||||
if (!ok) {
|
||||
dev_dbg(dev->dev, "mrstFindBestPLL fail in oaktrail_crtc_mode_set.\n");
|
||||
} else {
|
||||
dev_dbg(dev->dev, "oaktrail_crtc_mode_set pixel clock = %d,"
|
||||
"m = %x, p1 = %x.\n", clock.dot, clock.m,
|
||||
clock.p1);
|
||||
if (is_sdvo) {
|
||||
/* Convert calculated values to register values */
|
||||
clock.p1 = (1L << (clock.p1 - 1));
|
||||
clock.m -= 2;
|
||||
clock.n = (1L << (clock.n - 1));
|
||||
}
|
||||
|
||||
fp = oaktrail_m_converts[(clock.m - MRST_M_MIN)] << 8;
|
||||
if (!ok)
|
||||
DRM_ERROR("Failed to find proper PLL settings");
|
||||
|
||||
mrst_print_pll(&clock);
|
||||
|
||||
if (is_sdvo)
|
||||
fp = clock.n << 16 | clock.m;
|
||||
else
|
||||
fp = oaktrail_m_converts[(clock.m - MRST_M_MIN)] << 8;
|
||||
|
||||
dpll |= DPLL_VGA_MODE_DIS;
|
||||
|
||||
@ -456,38 +550,43 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
|
||||
|
||||
|
||||
/* compute bitmask from p1 value */
|
||||
dpll |= (1 << (clock.p1 - 2)) << 17;
|
||||
if (is_sdvo)
|
||||
dpll |= clock.p1 << 16; // dpll |= (1 << (clock.p1 - 1)) << 16;
|
||||
else
|
||||
dpll |= (1 << (clock.p1 - 2)) << 17;
|
||||
|
||||
dpll |= DPLL_VCO_ENABLE;
|
||||
|
||||
mrstPrintPll("chosen", &clock);
|
||||
|
||||
if (dpll & DPLL_VCO_ENABLE) {
|
||||
REG_WRITE(map->fp0, fp);
|
||||
REG_WRITE(map->dpll, dpll & ~DPLL_VCO_ENABLE);
|
||||
REG_READ(map->dpll);
|
||||
/* Check the DPLLA lock bit PIPEACONF[29] */
|
||||
udelay(150);
|
||||
for (i = 0; i <= need_aux; i++) {
|
||||
REG_WRITE_WITH_AUX(map->fp0, fp, i);
|
||||
REG_WRITE_WITH_AUX(map->dpll, dpll & ~DPLL_VCO_ENABLE, i);
|
||||
REG_READ_WITH_AUX(map->dpll, i);
|
||||
/* Check the DPLLA lock bit PIPEACONF[29] */
|
||||
udelay(150);
|
||||
}
|
||||
}
|
||||
|
||||
REG_WRITE(map->fp0, fp);
|
||||
REG_WRITE(map->dpll, dpll);
|
||||
REG_READ(map->dpll);
|
||||
/* Wait for the clocks to stabilize. */
|
||||
udelay(150);
|
||||
for (i = 0; i <= need_aux; i++) {
|
||||
REG_WRITE_WITH_AUX(map->fp0, fp, i);
|
||||
REG_WRITE_WITH_AUX(map->dpll, dpll, i);
|
||||
REG_READ_WITH_AUX(map->dpll, i);
|
||||
/* Wait for the clocks to stabilize. */
|
||||
udelay(150);
|
||||
|
||||
/* write it again -- the BIOS does, after all */
|
||||
REG_WRITE(map->dpll, dpll);
|
||||
REG_READ(map->dpll);
|
||||
/* Wait for the clocks to stabilize. */
|
||||
udelay(150);
|
||||
/* write it again -- the BIOS does, after all */
|
||||
REG_WRITE_WITH_AUX(map->dpll, dpll, i);
|
||||
REG_READ_WITH_AUX(map->dpll, i);
|
||||
/* Wait for the clocks to stabilize. */
|
||||
udelay(150);
|
||||
|
||||
REG_WRITE(map->conf, pipeconf);
|
||||
REG_READ(map->conf);
|
||||
gma_wait_for_vblank(dev);
|
||||
REG_WRITE_WITH_AUX(map->conf, pipeconf, i);
|
||||
REG_READ_WITH_AUX(map->conf, i);
|
||||
gma_wait_for_vblank(dev);
|
||||
|
||||
REG_WRITE(map->cntr, dspcntr);
|
||||
gma_wait_for_vblank(dev);
|
||||
REG_WRITE_WITH_AUX(map->cntr, dspcntr, i);
|
||||
gma_wait_for_vblank(dev);
|
||||
}
|
||||
|
||||
oaktrail_crtc_mode_set_exit:
|
||||
gma_power_end(dev);
|
||||
@ -565,3 +664,9 @@ const struct drm_crtc_helper_funcs oaktrail_helper_funcs = {
|
||||
.commit = gma_crtc_commit,
|
||||
};
|
||||
|
||||
/* Not used yet */
|
||||
const struct gma_clock_funcs mrst_clock_funcs = {
|
||||
.clock = mrst_lvds_clock,
|
||||
.limit = mrst_limit,
|
||||
.pll_is_valid = gma_pll_is_valid,
|
||||
};
|
||||
|
@ -40,6 +40,9 @@ static int oaktrail_output_init(struct drm_device *dev)
|
||||
dev_err(dev->dev, "DSI is not supported\n");
|
||||
if (dev_priv->hdmi_priv)
|
||||
oaktrail_hdmi_init(dev, &dev_priv->mode_dev);
|
||||
|
||||
psb_intel_sdvo_init(dev, SDVOB);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -526,6 +529,7 @@ static int oaktrail_chip_setup(struct drm_device *dev)
|
||||
psb_intel_opregion_init(dev);
|
||||
psb_intel_init_bios(dev);
|
||||
}
|
||||
gma_intel_setup_gmbus(dev);
|
||||
oaktrail_hdmi_setup(dev);
|
||||
return 0;
|
||||
}
|
||||
@ -534,6 +538,7 @@ static void oaktrail_teardown(struct drm_device *dev)
|
||||
{
|
||||
struct drm_psb_private *dev_priv = dev->dev_private;
|
||||
|
||||
gma_intel_teardown_gmbus(dev);
|
||||
oaktrail_hdmi_teardown(dev);
|
||||
if (!dev_priv->has_gct)
|
||||
psb_intel_destroy_bios(dev);
|
||||
@ -546,6 +551,7 @@ const struct psb_ops oaktrail_chip_ops = {
|
||||
.crtcs = 2,
|
||||
.hdmi_mask = (1 << 1),
|
||||
.lvds_mask = (1 << 0),
|
||||
.sdvo_mask = (1 << 1),
|
||||
.cursor_needs_phys = 0,
|
||||
.sgx_offset = MRST_SGX_OFFSET,
|
||||
|
||||
|
@ -218,30 +218,6 @@ static const struct drm_encoder_helper_funcs oaktrail_lvds_helper_funcs = {
|
||||
.commit = oaktrail_lvds_commit,
|
||||
};
|
||||
|
||||
static struct drm_display_mode lvds_configuration_modes[] = {
|
||||
/* hard coded fixed mode for TPO LTPS LPJ040K001A */
|
||||
{ DRM_MODE("800x480", DRM_MODE_TYPE_DRIVER, 33264, 800, 836,
|
||||
846, 1056, 0, 480, 489, 491, 525, 0, 0) },
|
||||
/* hard coded fixed mode for LVDS 800x480 */
|
||||
{ DRM_MODE("800x480", DRM_MODE_TYPE_DRIVER, 30994, 800, 801,
|
||||
802, 1024, 0, 480, 481, 482, 525, 0, 0) },
|
||||
/* hard coded fixed mode for Samsung 480wsvga LVDS 1024x600@75 */
|
||||
{ DRM_MODE("1024x600", DRM_MODE_TYPE_DRIVER, 53990, 1024, 1072,
|
||||
1104, 1184, 0, 600, 603, 604, 608, 0, 0) },
|
||||
/* hard coded fixed mode for Samsung 480wsvga LVDS 1024x600@75 */
|
||||
{ DRM_MODE("1024x600", DRM_MODE_TYPE_DRIVER, 53990, 1024, 1104,
|
||||
1136, 1184, 0, 600, 603, 604, 608, 0, 0) },
|
||||
/* hard coded fixed mode for Sharp wsvga LVDS 1024x600 */
|
||||
{ DRM_MODE("1024x600", DRM_MODE_TYPE_DRIVER, 48885, 1024, 1124,
|
||||
1204, 1312, 0, 600, 607, 610, 621, 0, 0) },
|
||||
/* hard coded fixed mode for LVDS 1024x768 */
|
||||
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
|
||||
1184, 1344, 0, 768, 771, 777, 806, 0, 0) },
|
||||
/* hard coded fixed mode for LVDS 1366x768 */
|
||||
{ DRM_MODE("1366x768", DRM_MODE_TYPE_DRIVER, 77500, 1366, 1430,
|
||||
1558, 1664, 0, 768, 769, 770, 776, 0, 0) },
|
||||
};
|
||||
|
||||
/* Returns the panel fixed mode from configuration. */
|
||||
|
||||
static void oaktrail_lvds_get_configuration_mode(struct drm_device *dev,
|
||||
@ -303,10 +279,10 @@ static void oaktrail_lvds_get_configuration_mode(struct drm_device *dev,
|
||||
mode_dev->panel_fixed_mode =
|
||||
drm_mode_duplicate(dev,
|
||||
dev_priv->lfp_lvds_vbt_mode);
|
||||
/* Then guess */
|
||||
|
||||
/* If we still got no mode then bail */
|
||||
if (mode_dev->panel_fixed_mode == NULL)
|
||||
mode_dev->panel_fixed_mode
|
||||
= drm_mode_duplicate(dev, &lvds_configuration_modes[2]);
|
||||
return;
|
||||
|
||||
drm_mode_set_name(mode_dev->panel_fixed_mode);
|
||||
drm_mode_set_crtcinfo(mode_dev->panel_fixed_mode, 0);
|
||||
|
@ -373,6 +373,7 @@ const struct psb_ops psb_chip_ops = {
|
||||
.crtcs = 2,
|
||||
.hdmi_mask = (1 << 0),
|
||||
.lvds_mask = (1 << 1),
|
||||
.sdvo_mask = (1 << 0),
|
||||
.cursor_needs_phys = 1,
|
||||
.sgx_offset = PSB_SGX_OFFSET,
|
||||
.chip_setup = psb_chip_setup,
|
||||
|
@ -251,6 +251,12 @@ static int psb_driver_unload(struct drm_device *dev)
|
||||
iounmap(dev_priv->sgx_reg);
|
||||
dev_priv->sgx_reg = NULL;
|
||||
}
|
||||
if (dev_priv->aux_reg) {
|
||||
iounmap(dev_priv->aux_reg);
|
||||
dev_priv->aux_reg = NULL;
|
||||
}
|
||||
if (dev_priv->aux_pdev)
|
||||
pci_dev_put(dev_priv->aux_pdev);
|
||||
|
||||
/* Destroy VBT data */
|
||||
psb_intel_destroy_bios(dev);
|
||||
@ -266,7 +272,7 @@ static int psb_driver_unload(struct drm_device *dev)
|
||||
static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
|
||||
{
|
||||
struct drm_psb_private *dev_priv;
|
||||
unsigned long resource_start;
|
||||
unsigned long resource_start, resource_len;
|
||||
unsigned long irqflags;
|
||||
int ret = -ENOMEM;
|
||||
struct drm_connector *connector;
|
||||
@ -296,6 +302,30 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
|
||||
if (!dev_priv->sgx_reg)
|
||||
goto out_err;
|
||||
|
||||
if (IS_MRST(dev)) {
|
||||
dev_priv->aux_pdev = pci_get_bus_and_slot(0, PCI_DEVFN(3, 0));
|
||||
|
||||
if (dev_priv->aux_pdev) {
|
||||
resource_start = pci_resource_start(dev_priv->aux_pdev,
|
||||
PSB_AUX_RESOURCE);
|
||||
resource_len = pci_resource_len(dev_priv->aux_pdev,
|
||||
PSB_AUX_RESOURCE);
|
||||
dev_priv->aux_reg = ioremap_nocache(resource_start,
|
||||
resource_len);
|
||||
if (!dev_priv->aux_reg)
|
||||
goto out_err;
|
||||
|
||||
DRM_DEBUG_KMS("Found aux vdc");
|
||||
} else {
|
||||
/* Couldn't find the aux vdc so map to primary vdc */
|
||||
dev_priv->aux_reg = dev_priv->vdc_reg;
|
||||
DRM_DEBUG_KMS("Couldn't find aux pci device");
|
||||
}
|
||||
dev_priv->gmbus_reg = dev_priv->aux_reg;
|
||||
} else {
|
||||
dev_priv->gmbus_reg = dev_priv->vdc_reg;
|
||||
}
|
||||
|
||||
psb_intel_opregion_setup(dev);
|
||||
|
||||
ret = dev_priv->ops->chip_setup(dev);
|
||||
@ -449,7 +479,7 @@ static int psb_gamma_ioctl(struct drm_device *dev, void *data,
|
||||
obj = drm_mode_object_find(dev, obj_id, DRM_MODE_OBJECT_CONNECTOR);
|
||||
if (!obj) {
|
||||
dev_dbg(dev->dev, "Invalid Connector object.\n");
|
||||
return -EINVAL;
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
connector = obj_to_connector(obj);
|
||||
@ -491,7 +521,7 @@ static int psb_mode_operation_ioctl(struct drm_device *dev, void *data,
|
||||
obj = drm_mode_object_find(dev, obj_id,
|
||||
DRM_MODE_OBJECT_CONNECTOR);
|
||||
if (!obj) {
|
||||
ret = -EINVAL;
|
||||
ret = -ENOENT;
|
||||
goto mode_op_out;
|
||||
}
|
||||
|
||||
|
@ -45,7 +45,7 @@ enum {
|
||||
};
|
||||
|
||||
#define IS_PSB(dev) (((dev)->pdev->device & 0xfffe) == 0x8108)
|
||||
#define IS_MRST(dev) (((dev)->pdev->device & 0xfffc) == 0x4100)
|
||||
#define IS_MRST(dev) (((dev)->pdev->device & 0xfff0) == 0x4100)
|
||||
#define IS_MFLD(dev) (((dev)->pdev->device & 0xfff8) == 0x0130)
|
||||
#define IS_CDV(dev) (((dev)->pdev->device & 0xfff0) == 0x0be0)
|
||||
|
||||
@ -75,6 +75,7 @@ enum {
|
||||
* PCI resource identifiers
|
||||
*/
|
||||
#define PSB_MMIO_RESOURCE 0
|
||||
#define PSB_AUX_RESOURCE 0
|
||||
#define PSB_GATT_RESOURCE 2
|
||||
#define PSB_GTT_RESOURCE 3
|
||||
/*
|
||||
@ -455,6 +456,7 @@ struct psb_ops;
|
||||
|
||||
struct drm_psb_private {
|
||||
struct drm_device *dev;
|
||||
struct pci_dev *aux_pdev; /* Currently only used by mrst */
|
||||
const struct psb_ops *ops;
|
||||
const struct psb_offset *regmap;
|
||||
|
||||
@ -486,6 +488,7 @@ struct drm_psb_private {
|
||||
|
||||
uint8_t __iomem *sgx_reg;
|
||||
uint8_t __iomem *vdc_reg;
|
||||
uint8_t __iomem *aux_reg; /* Auxillary vdc pipe regs */
|
||||
uint32_t gatt_free_offset;
|
||||
|
||||
/*
|
||||
@ -532,6 +535,7 @@ struct drm_psb_private {
|
||||
|
||||
/* gmbus */
|
||||
struct intel_gmbus *gmbus;
|
||||
uint8_t __iomem *gmbus_reg;
|
||||
|
||||
/* Used by SDVO */
|
||||
int crt_ddc_pin;
|
||||
@ -672,6 +676,7 @@ struct psb_ops {
|
||||
int sgx_offset; /* Base offset of SGX device */
|
||||
int hdmi_mask; /* Mask of HDMI CRTCs */
|
||||
int lvds_mask; /* Mask of LVDS CRTCs */
|
||||
int sdvo_mask; /* Mask of SDVO CRTCs */
|
||||
int cursor_needs_phys; /* If cursor base reg need physical address */
|
||||
|
||||
/* Sub functions */
|
||||
@ -927,16 +932,58 @@ static inline uint32_t REGISTER_READ(struct drm_device *dev, uint32_t reg)
|
||||
return ioread32(dev_priv->vdc_reg + reg);
|
||||
}
|
||||
|
||||
static inline uint32_t REGISTER_READ_AUX(struct drm_device *dev, uint32_t reg)
|
||||
{
|
||||
struct drm_psb_private *dev_priv = dev->dev_private;
|
||||
return ioread32(dev_priv->aux_reg + reg);
|
||||
}
|
||||
|
||||
#define REG_READ(reg) REGISTER_READ(dev, (reg))
|
||||
#define REG_READ_AUX(reg) REGISTER_READ_AUX(dev, (reg))
|
||||
|
||||
/* Useful for post reads */
|
||||
static inline uint32_t REGISTER_READ_WITH_AUX(struct drm_device *dev,
|
||||
uint32_t reg, int aux)
|
||||
{
|
||||
uint32_t val;
|
||||
|
||||
if (aux)
|
||||
val = REG_READ_AUX(reg);
|
||||
else
|
||||
val = REG_READ(reg);
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
#define REG_READ_WITH_AUX(reg, aux) REGISTER_READ_WITH_AUX(dev, (reg), (aux))
|
||||
|
||||
static inline void REGISTER_WRITE(struct drm_device *dev, uint32_t reg,
|
||||
uint32_t val)
|
||||
uint32_t val)
|
||||
{
|
||||
struct drm_psb_private *dev_priv = dev->dev_private;
|
||||
iowrite32((val), dev_priv->vdc_reg + (reg));
|
||||
}
|
||||
|
||||
static inline void REGISTER_WRITE_AUX(struct drm_device *dev, uint32_t reg,
|
||||
uint32_t val)
|
||||
{
|
||||
struct drm_psb_private *dev_priv = dev->dev_private;
|
||||
iowrite32((val), dev_priv->aux_reg + (reg));
|
||||
}
|
||||
|
||||
#define REG_WRITE(reg, val) REGISTER_WRITE(dev, (reg), (val))
|
||||
#define REG_WRITE_AUX(reg, val) REGISTER_WRITE_AUX(dev, (reg), (val))
|
||||
|
||||
static inline void REGISTER_WRITE_WITH_AUX(struct drm_device *dev, uint32_t reg,
|
||||
uint32_t val, int aux)
|
||||
{
|
||||
if (aux)
|
||||
REG_WRITE_AUX(reg, val);
|
||||
else
|
||||
REG_WRITE(reg, val);
|
||||
}
|
||||
|
||||
#define REG_WRITE_WITH_AUX(reg, val, aux) REGISTER_WRITE_WITH_AUX(dev, (reg), (val), (aux))
|
||||
|
||||
static inline void REGISTER_WRITE16(struct drm_device *dev,
|
||||
uint32_t reg, uint32_t val)
|
||||
|
@ -572,7 +572,7 @@ int psb_intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
|
||||
|
||||
if (!drmmode_obj) {
|
||||
dev_err(dev->dev, "no such CRTC id\n");
|
||||
return -EINVAL;
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
crtc = to_gma_crtc(obj_to_crtc(drmmode_obj));
|
||||
|
@ -228,24 +228,26 @@ static void psb_intel_sdvo_write_sdvox(struct psb_intel_sdvo *psb_intel_sdvo, u3
|
||||
{
|
||||
struct drm_device *dev = psb_intel_sdvo->base.base.dev;
|
||||
u32 bval = val, cval = val;
|
||||
int i;
|
||||
int i, j;
|
||||
int need_aux = IS_MRST(dev) ? 1 : 0;
|
||||
|
||||
if (psb_intel_sdvo->sdvo_reg == SDVOB) {
|
||||
cval = REG_READ(SDVOC);
|
||||
} else {
|
||||
bval = REG_READ(SDVOB);
|
||||
}
|
||||
/*
|
||||
* Write the registers twice for luck. Sometimes,
|
||||
* writing them only once doesn't appear to 'stick'.
|
||||
* The BIOS does this too. Yay, magic
|
||||
*/
|
||||
for (i = 0; i < 2; i++)
|
||||
{
|
||||
REG_WRITE(SDVOB, bval);
|
||||
REG_READ(SDVOB);
|
||||
REG_WRITE(SDVOC, cval);
|
||||
REG_READ(SDVOC);
|
||||
for (j = 0; j <= need_aux; j++) {
|
||||
if (psb_intel_sdvo->sdvo_reg == SDVOB)
|
||||
cval = REG_READ_WITH_AUX(SDVOC, j);
|
||||
else
|
||||
bval = REG_READ_WITH_AUX(SDVOB, j);
|
||||
|
||||
/*
|
||||
* Write the registers twice for luck. Sometimes,
|
||||
* writing them only once doesn't appear to 'stick'.
|
||||
* The BIOS does this too. Yay, magic
|
||||
*/
|
||||
for (i = 0; i < 2; i++) {
|
||||
REG_WRITE_WITH_AUX(SDVOB, bval, j);
|
||||
REG_READ_WITH_AUX(SDVOB, j);
|
||||
REG_WRITE_WITH_AUX(SDVOC, cval, j);
|
||||
REG_READ_WITH_AUX(SDVOC, j);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -995,6 +997,7 @@ static void psb_intel_sdvo_mode_set(struct drm_encoder *encoder,
|
||||
struct psb_intel_sdvo_dtd input_dtd;
|
||||
int pixel_multiplier = psb_intel_mode_get_pixel_multiplier(adjusted_mode);
|
||||
int rate;
|
||||
int need_aux = IS_MRST(dev) ? 1 : 0;
|
||||
|
||||
if (!mode)
|
||||
return;
|
||||
@ -1060,7 +1063,11 @@ static void psb_intel_sdvo_mode_set(struct drm_encoder *encoder,
|
||||
return;
|
||||
|
||||
/* Set the SDVO control regs. */
|
||||
sdvox = REG_READ(psb_intel_sdvo->sdvo_reg);
|
||||
if (need_aux)
|
||||
sdvox = REG_READ_AUX(psb_intel_sdvo->sdvo_reg);
|
||||
else
|
||||
sdvox = REG_READ(psb_intel_sdvo->sdvo_reg);
|
||||
|
||||
switch (psb_intel_sdvo->sdvo_reg) {
|
||||
case SDVOB:
|
||||
sdvox &= SDVOB_PRESERVE_MASK;
|
||||
@ -1090,6 +1097,8 @@ static void psb_intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct psb_intel_sdvo *psb_intel_sdvo = to_psb_intel_sdvo(encoder);
|
||||
u32 temp;
|
||||
int i;
|
||||
int need_aux = IS_MRST(dev) ? 1 : 0;
|
||||
|
||||
switch (mode) {
|
||||
case DRM_MODE_DPMS_ON:
|
||||
@ -1108,19 +1117,27 @@ static void psb_intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
|
||||
psb_intel_sdvo_set_encoder_power_state(psb_intel_sdvo, mode);
|
||||
|
||||
if (mode == DRM_MODE_DPMS_OFF) {
|
||||
temp = REG_READ(psb_intel_sdvo->sdvo_reg);
|
||||
if (need_aux)
|
||||
temp = REG_READ_AUX(psb_intel_sdvo->sdvo_reg);
|
||||
else
|
||||
temp = REG_READ(psb_intel_sdvo->sdvo_reg);
|
||||
|
||||
if ((temp & SDVO_ENABLE) != 0) {
|
||||
psb_intel_sdvo_write_sdvox(psb_intel_sdvo, temp & ~SDVO_ENABLE);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
bool input1, input2;
|
||||
int i;
|
||||
u8 status;
|
||||
|
||||
temp = REG_READ(psb_intel_sdvo->sdvo_reg);
|
||||
if (need_aux)
|
||||
temp = REG_READ_AUX(psb_intel_sdvo->sdvo_reg);
|
||||
else
|
||||
temp = REG_READ(psb_intel_sdvo->sdvo_reg);
|
||||
|
||||
if ((temp & SDVO_ENABLE) == 0)
|
||||
psb_intel_sdvo_write_sdvox(psb_intel_sdvo, temp | SDVO_ENABLE);
|
||||
|
||||
for (i = 0; i < 2; i++)
|
||||
gma_wait_for_vblank(dev);
|
||||
|
||||
|
@ -17,6 +17,7 @@
|
||||
|
||||
|
||||
|
||||
#include <linux/hdmi.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
#include <drm/drmP.h>
|
||||
@ -549,6 +550,8 @@ tda998x_write_avi(struct drm_encoder *encoder, struct drm_display_mode *mode)
|
||||
buf[HB(0)] = 0x82;
|
||||
buf[HB(1)] = 0x02;
|
||||
buf[HB(2)] = 13;
|
||||
buf[PB(1)] = HDMI_SCAN_MODE_UNDERSCAN;
|
||||
buf[PB(3)] = HDMI_QUANTIZATION_RANGE_FULL << 2;
|
||||
buf[PB(4)] = drm_match_cea_mode(mode);
|
||||
|
||||
tda998x_write_if(encoder, DIP_IF_FLAGS_IF2, REG_IF2_HB0, buf,
|
||||
|
@ -586,7 +586,53 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (IS_VALLEYVIEW(dev)) {
|
||||
if (INTEL_INFO(dev)->gen >= 8) {
|
||||
int i;
|
||||
seq_printf(m, "Master Interrupt Control:\t%08x\n",
|
||||
I915_READ(GEN8_MASTER_IRQ));
|
||||
|
||||
for (i = 0; i < 4; i++) {
|
||||
seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
|
||||
i, I915_READ(GEN8_GT_IMR(i)));
|
||||
seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
|
||||
i, I915_READ(GEN8_GT_IIR(i)));
|
||||
seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
|
||||
i, I915_READ(GEN8_GT_IER(i)));
|
||||
}
|
||||
|
||||
for_each_pipe(i) {
|
||||
seq_printf(m, "Pipe %c IMR:\t%08x\n",
|
||||
pipe_name(i),
|
||||
I915_READ(GEN8_DE_PIPE_IMR(i)));
|
||||
seq_printf(m, "Pipe %c IIR:\t%08x\n",
|
||||
pipe_name(i),
|
||||
I915_READ(GEN8_DE_PIPE_IIR(i)));
|
||||
seq_printf(m, "Pipe %c IER:\t%08x\n",
|
||||
pipe_name(i),
|
||||
I915_READ(GEN8_DE_PIPE_IER(i)));
|
||||
}
|
||||
|
||||
seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
|
||||
I915_READ(GEN8_DE_PORT_IMR));
|
||||
seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
|
||||
I915_READ(GEN8_DE_PORT_IIR));
|
||||
seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
|
||||
I915_READ(GEN8_DE_PORT_IER));
|
||||
|
||||
seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
|
||||
I915_READ(GEN8_DE_MISC_IMR));
|
||||
seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
|
||||
I915_READ(GEN8_DE_MISC_IIR));
|
||||
seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
|
||||
I915_READ(GEN8_DE_MISC_IER));
|
||||
|
||||
seq_printf(m, "PCU interrupt mask:\t%08x\n",
|
||||
I915_READ(GEN8_PCU_IMR));
|
||||
seq_printf(m, "PCU interrupt identity:\t%08x\n",
|
||||
I915_READ(GEN8_PCU_IIR));
|
||||
seq_printf(m, "PCU interrupt enable:\t%08x\n",
|
||||
I915_READ(GEN8_PCU_IER));
|
||||
} else if (IS_VALLEYVIEW(dev)) {
|
||||
seq_printf(m, "Display IER:\t%08x\n",
|
||||
I915_READ(VLV_IER));
|
||||
seq_printf(m, "Display IIR:\t%08x\n",
|
||||
@ -658,7 +704,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
|
||||
seq_printf(m, "Interrupts received: %d\n",
|
||||
atomic_read(&dev_priv->irq_received));
|
||||
for_each_ring(ring, dev_priv, i) {
|
||||
if (IS_GEN6(dev) || IS_GEN7(dev)) {
|
||||
if (INTEL_INFO(dev)->gen >= 6) {
|
||||
seq_printf(m,
|
||||
"Graphics Interrupt mask (%s): %08x\n",
|
||||
ring->name, I915_READ_IMR(ring));
|
||||
@ -1576,7 +1622,7 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
|
||||
I915_READ16(C0DRB3));
|
||||
seq_printf(m, "C1DRB3 = 0x%04x\n",
|
||||
I915_READ16(C1DRB3));
|
||||
} else if (IS_GEN6(dev) || IS_GEN7(dev)) {
|
||||
} else if (INTEL_INFO(dev)->gen >= 6) {
|
||||
seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
|
||||
I915_READ(MAD_DIMM_C0));
|
||||
seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
|
||||
@ -1585,8 +1631,12 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
|
||||
I915_READ(MAD_DIMM_C2));
|
||||
seq_printf(m, "TILECTL = 0x%08x\n",
|
||||
I915_READ(TILECTL));
|
||||
seq_printf(m, "ARB_MODE = 0x%08x\n",
|
||||
I915_READ(ARB_MODE));
|
||||
if (IS_GEN8(dev))
|
||||
seq_printf(m, "GAMTARBMODE = 0x%08x\n",
|
||||
I915_READ(GAMTARBMODE));
|
||||
else
|
||||
seq_printf(m, "ARB_MODE = 0x%08x\n",
|
||||
I915_READ(ARB_MODE));
|
||||
seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
|
||||
I915_READ(DISP_ARB_CTL));
|
||||
}
|
||||
@ -1595,18 +1645,37 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int i915_ppgtt_info(struct seq_file *m, void *data)
|
||||
static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev)
|
||||
{
|
||||
struct drm_info_node *node = (struct drm_info_node *) m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_ring_buffer *ring;
|
||||
int i, ret;
|
||||
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
|
||||
int unused, i;
|
||||
|
||||
if (!ppgtt)
|
||||
return;
|
||||
|
||||
seq_printf(m, "Page directories: %d\n", ppgtt->num_pd_pages);
|
||||
seq_printf(m, "Page tables: %d\n", ppgtt->num_pt_pages);
|
||||
for_each_ring(ring, dev_priv, unused) {
|
||||
seq_printf(m, "%s\n", ring->name);
|
||||
for (i = 0; i < 4; i++) {
|
||||
u32 offset = 0x270 + i * 8;
|
||||
u64 pdp = I915_READ(ring->mmio_base + offset + 4);
|
||||
pdp <<= 32;
|
||||
pdp |= I915_READ(ring->mmio_base + offset);
|
||||
for (i = 0; i < 4; i++)
|
||||
seq_printf(m, "\tPDP%d 0x%016llx\n", i, pdp);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_ring_buffer *ring;
|
||||
int i;
|
||||
|
||||
ret = mutex_lock_interruptible(&dev->struct_mutex);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (INTEL_INFO(dev)->gen == 6)
|
||||
seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
|
||||
|
||||
@ -1625,6 +1694,22 @@ static int i915_ppgtt_info(struct seq_file *m, void *data)
|
||||
seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd_offset);
|
||||
}
|
||||
seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
|
||||
}
|
||||
|
||||
static int i915_ppgtt_info(struct seq_file *m, void *data)
|
||||
{
|
||||
struct drm_info_node *node = (struct drm_info_node *) m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
|
||||
int ret = mutex_lock_interruptible(&dev->struct_mutex);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (INTEL_INFO(dev)->gen >= 8)
|
||||
gen8_ppgtt_info(m, dev);
|
||||
else if (INTEL_INFO(dev)->gen >= 6)
|
||||
gen6_ppgtt_info(m, dev);
|
||||
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
return 0;
|
||||
@ -2955,7 +3040,7 @@ static int i915_debugfs_create(struct dentry *root,
|
||||
return drm_add_fake_info_node(minor, ent, fops);
|
||||
}
|
||||
|
||||
static struct drm_info_list i915_debugfs_list[] = {
|
||||
static const struct drm_info_list i915_debugfs_list[] = {
|
||||
{"i915_capabilities", i915_capabilities, 0},
|
||||
{"i915_gem_objects", i915_gem_object_info, 0},
|
||||
{"i915_gem_gtt", i915_gem_gtt_info, 0},
|
||||
@ -2997,7 +3082,7 @@ static struct drm_info_list i915_debugfs_list[] = {
|
||||
};
|
||||
#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
|
||||
|
||||
static struct i915_debugfs_files {
|
||||
static const struct i915_debugfs_files {
|
||||
const char *name;
|
||||
const struct file_operations *fops;
|
||||
} i915_debugfs_files[] = {
|
||||
|
@ -1486,7 +1486,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
|
||||
|
||||
spin_lock_init(&dev_priv->irq_lock);
|
||||
spin_lock_init(&dev_priv->gpu_error.lock);
|
||||
spin_lock_init(&dev_priv->backlight.lock);
|
||||
spin_lock_init(&dev_priv->backlight_lock);
|
||||
spin_lock_init(&dev_priv->uncore.lock);
|
||||
spin_lock_init(&dev_priv->mm.object_stat_lock);
|
||||
mutex_init(&dev_priv->dpio_lock);
|
||||
|
@ -335,6 +335,24 @@ static const struct intel_device_info intel_haswell_m_info = {
|
||||
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_broadwell_d_info = {
|
||||
.is_preliminary = 1,
|
||||
.gen = 8, .num_pipes = 3,
|
||||
.need_gfx_hws = 1, .has_hotplug = 1,
|
||||
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
|
||||
.has_llc = 1,
|
||||
.has_ddi = 1,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_broadwell_m_info = {
|
||||
.is_preliminary = 1,
|
||||
.gen = 8, .is_mobile = 1, .num_pipes = 3,
|
||||
.need_gfx_hws = 1, .has_hotplug = 1,
|
||||
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
|
||||
.has_llc = 1,
|
||||
.has_ddi = 1,
|
||||
};
|
||||
|
||||
/*
|
||||
* Make sure any device matches here are from most specific to most
|
||||
* general. For example, since the Quanta match is based on the subsystem
|
||||
@ -366,7 +384,9 @@ static const struct intel_device_info intel_haswell_m_info = {
|
||||
INTEL_HSW_D_IDS(&intel_haswell_d_info), \
|
||||
INTEL_HSW_M_IDS(&intel_haswell_m_info), \
|
||||
INTEL_VLV_M_IDS(&intel_valleyview_m_info), \
|
||||
INTEL_VLV_D_IDS(&intel_valleyview_d_info)
|
||||
INTEL_VLV_D_IDS(&intel_valleyview_d_info), \
|
||||
INTEL_BDW_M_IDS(&intel_broadwell_m_info), \
|
||||
INTEL_BDW_D_IDS(&intel_broadwell_d_info)
|
||||
|
||||
static const struct pci_device_id pciidlist[] = { /* aka */
|
||||
INTEL_PCI_IDS,
|
||||
@ -427,6 +447,12 @@ void intel_detect_pch(struct drm_device *dev)
|
||||
DRM_DEBUG_KMS("Found LynxPoint PCH\n");
|
||||
WARN_ON(!IS_HASWELL(dev));
|
||||
WARN_ON(IS_ULT(dev));
|
||||
} else if (IS_BROADWELL(dev)) {
|
||||
dev_priv->pch_type = PCH_LPT;
|
||||
dev_priv->pch_id =
|
||||
INTEL_PCH_LPT_LP_DEVICE_ID_TYPE;
|
||||
DRM_DEBUG_KMS("This is Broadwell, assuming "
|
||||
"LynxPoint LP PCH\n");
|
||||
} else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
|
||||
dev_priv->pch_type = PCH_LPT;
|
||||
DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
|
||||
@ -451,6 +477,12 @@ bool i915_semaphore_is_enabled(struct drm_device *dev)
|
||||
if (INTEL_INFO(dev)->gen < 6)
|
||||
return 0;
|
||||
|
||||
/* Until we get further testing... */
|
||||
if (IS_GEN8(dev)) {
|
||||
WARN_ON(!i915_preliminary_hw_support);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (i915_semaphores >= 0)
|
||||
return i915_semaphores;
|
||||
|
||||
|
@ -54,6 +54,7 @@
|
||||
#define DRIVER_DATE "20080730"
|
||||
|
||||
enum pipe {
|
||||
INVALID_PIPE = -1,
|
||||
PIPE_A = 0,
|
||||
PIPE_B,
|
||||
PIPE_C,
|
||||
@ -129,6 +130,10 @@ enum intel_display_power_domain {
|
||||
#define HSW_ALWAYS_ON_POWER_DOMAINS ( \
|
||||
BIT(POWER_DOMAIN_PIPE_A) | \
|
||||
BIT(POWER_DOMAIN_TRANSCODER_EDP))
|
||||
#define BDW_ALWAYS_ON_POWER_DOMAINS ( \
|
||||
BIT(POWER_DOMAIN_PIPE_A) | \
|
||||
BIT(POWER_DOMAIN_TRANSCODER_EDP) | \
|
||||
BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER))
|
||||
|
||||
enum hpd_pin {
|
||||
HPD_NONE = 0,
|
||||
@ -254,6 +259,7 @@ struct intel_opregion {
|
||||
struct opregion_asle __iomem *asle;
|
||||
void __iomem *vbt;
|
||||
u32 __iomem *lid_state;
|
||||
struct work_struct asle_work;
|
||||
};
|
||||
#define OPREGION_SIZE (8*1024)
|
||||
|
||||
@ -357,6 +363,7 @@ struct drm_i915_error_state {
|
||||
enum intel_ring_hangcheck_action hangcheck_action[I915_NUM_RINGS];
|
||||
};
|
||||
|
||||
struct intel_connector;
|
||||
struct intel_crtc_config;
|
||||
struct intel_crtc;
|
||||
struct intel_limit;
|
||||
@ -419,6 +426,13 @@ struct drm_i915_display_funcs {
|
||||
/* render clock increase/decrease */
|
||||
/* display clock increase/decrease */
|
||||
/* pll clock increase/decrease */
|
||||
|
||||
int (*setup_backlight)(struct intel_connector *connector);
|
||||
uint32_t (*get_backlight)(struct intel_connector *connector);
|
||||
void (*set_backlight)(struct intel_connector *connector,
|
||||
uint32_t level);
|
||||
void (*disable_backlight)(struct intel_connector *connector);
|
||||
void (*enable_backlight)(struct intel_connector *connector);
|
||||
};
|
||||
|
||||
struct intel_uncore_funcs {
|
||||
@ -585,10 +599,21 @@ struct i915_gtt {
|
||||
struct i915_hw_ppgtt {
|
||||
struct i915_address_space base;
|
||||
unsigned num_pd_entries;
|
||||
struct page **pt_pages;
|
||||
uint32_t pd_offset;
|
||||
dma_addr_t *pt_dma_addr;
|
||||
|
||||
union {
|
||||
struct page **pt_pages;
|
||||
struct page *gen8_pt_pages;
|
||||
};
|
||||
struct page *pd_pages;
|
||||
int num_pd_pages;
|
||||
int num_pt_pages;
|
||||
union {
|
||||
uint32_t pd_offset;
|
||||
dma_addr_t pd_dma_addr[4];
|
||||
};
|
||||
union {
|
||||
dma_addr_t *pt_dma_addr;
|
||||
dma_addr_t *gen8_pt_dma_addr[4];
|
||||
};
|
||||
int (*enable)(struct drm_device *dev);
|
||||
};
|
||||
|
||||
@ -703,7 +728,6 @@ enum intel_sbi_destination {
|
||||
#define QUIRK_PIPEA_FORCE (1<<0)
|
||||
#define QUIRK_LVDS_SSC_DISABLE (1<<1)
|
||||
#define QUIRK_INVERT_BRIGHTNESS (1<<2)
|
||||
#define QUIRK_NO_PCH_PWM_ENABLE (1<<3)
|
||||
|
||||
struct intel_fbdev;
|
||||
struct intel_fbc_work;
|
||||
@ -755,6 +779,7 @@ struct i915_suspend_saved_registers {
|
||||
u32 saveBLC_HIST_CTL;
|
||||
u32 saveBLC_PWM_CTL;
|
||||
u32 saveBLC_PWM_CTL2;
|
||||
u32 saveBLC_HIST_CTL_B;
|
||||
u32 saveBLC_CPU_PWM_CTL;
|
||||
u32 saveBLC_CPU_PWM_CTL2;
|
||||
u32 saveFPB0;
|
||||
@ -1325,7 +1350,10 @@ typedef struct drm_i915_private {
|
||||
struct mutex dpio_lock;
|
||||
|
||||
/** Cached value of IMR to avoid reads in updating the bitfield */
|
||||
u32 irq_mask;
|
||||
union {
|
||||
u32 irq_mask;
|
||||
u32 de_irq_mask[I915_MAX_PIPES];
|
||||
};
|
||||
u32 gt_irq_mask;
|
||||
u32 pm_irq_mask;
|
||||
|
||||
@ -1353,13 +1381,8 @@ typedef struct drm_i915_private {
|
||||
struct intel_overlay *overlay;
|
||||
unsigned int sprite_scaling_enabled;
|
||||
|
||||
/* backlight */
|
||||
struct {
|
||||
int level;
|
||||
bool enabled;
|
||||
spinlock_t lock; /* bl registers and the above bl fields */
|
||||
struct backlight_device *device;
|
||||
} backlight;
|
||||
/* backlight registers and fields in struct intel_panel */
|
||||
spinlock_t backlight_lock;
|
||||
|
||||
/* LVDS info */
|
||||
bool no_aux_handshake;
|
||||
@ -1736,11 +1759,17 @@ struct drm_i915_file_private {
|
||||
(dev)->pdev->device == 0x010A)
|
||||
#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview)
|
||||
#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
|
||||
#define IS_BROADWELL(dev) (INTEL_INFO(dev)->gen == 8)
|
||||
#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
|
||||
#define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \
|
||||
((dev)->pdev->device & 0xFF00) == 0x0C00)
|
||||
#define IS_ULT(dev) (IS_HASWELL(dev) && \
|
||||
#define IS_BDW_ULT(dev) (IS_BROADWELL(dev) && \
|
||||
(((dev)->pdev->device & 0xf) == 0x2 || \
|
||||
((dev)->pdev->device & 0xf) == 0x6 || \
|
||||
((dev)->pdev->device & 0xf) == 0xe))
|
||||
#define IS_HSW_ULT(dev) (IS_HASWELL(dev) && \
|
||||
((dev)->pdev->device & 0xFF00) == 0x0A00)
|
||||
#define IS_ULT(dev) (IS_HSW_ULT(dev) || IS_BDW_ULT(dev))
|
||||
#define IS_HSW_GT3(dev) (IS_HASWELL(dev) && \
|
||||
((dev)->pdev->device & 0x00F0) == 0x0020)
|
||||
#define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary)
|
||||
@ -1757,6 +1786,7 @@ struct drm_i915_file_private {
|
||||
#define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5)
|
||||
#define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6)
|
||||
#define IS_GEN7(dev) (INTEL_INFO(dev)->gen == 7)
|
||||
#define IS_GEN8(dev) (INTEL_INFO(dev)->gen == 8)
|
||||
|
||||
#define RENDER_RING (1<<RCS)
|
||||
#define BSD_RING (1<<VCS)
|
||||
@ -1793,12 +1823,12 @@ struct drm_i915_file_private {
|
||||
#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
|
||||
#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
|
||||
|
||||
#define HAS_IPS(dev) (IS_ULT(dev))
|
||||
#define HAS_IPS(dev) (IS_ULT(dev) || IS_BROADWELL(dev))
|
||||
|
||||
#define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi)
|
||||
#define HAS_POWER_WELL(dev) (IS_HASWELL(dev))
|
||||
#define HAS_POWER_WELL(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev))
|
||||
#define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg)
|
||||
#define HAS_PSR(dev) (IS_HASWELL(dev))
|
||||
#define HAS_PSR(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev))
|
||||
|
||||
#define INTEL_PCH_DEVICE_ID_MASK 0xff00
|
||||
#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
|
||||
|
@ -2954,6 +2954,7 @@ static void i915_gem_write_fence(struct drm_device *dev, int reg,
|
||||
obj->stride, obj->tiling_mode);
|
||||
|
||||
switch (INTEL_INFO(dev)->gen) {
|
||||
case 8:
|
||||
case 7:
|
||||
case 6:
|
||||
case 5:
|
||||
@ -4361,6 +4362,8 @@ void i915_gem_init_swizzling(struct drm_device *dev)
|
||||
I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
|
||||
else if (IS_GEN7(dev))
|
||||
I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
|
||||
else if (IS_GEN8(dev))
|
||||
I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
|
||||
else
|
||||
BUG();
|
||||
}
|
||||
|
@ -117,6 +117,9 @@ static int get_context_size(struct drm_device *dev)
|
||||
else
|
||||
ret = GEN7_CXT_TOTAL_SIZE(reg) * 64;
|
||||
break;
|
||||
case 8:
|
||||
ret = GEN8_CXT_TOTAL_SIZE;
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
|
@ -212,6 +212,7 @@ static int
|
||||
relocate_entry_cpu(struct drm_i915_gem_object *obj,
|
||||
struct drm_i915_gem_relocation_entry *reloc)
|
||||
{
|
||||
struct drm_device *dev = obj->base.dev;
|
||||
uint32_t page_offset = offset_in_page(reloc->offset);
|
||||
char *vaddr;
|
||||
int ret = -EINVAL;
|
||||
@ -223,6 +224,19 @@ relocate_entry_cpu(struct drm_i915_gem_object *obj,
|
||||
vaddr = kmap_atomic(i915_gem_object_get_page(obj,
|
||||
reloc->offset >> PAGE_SHIFT));
|
||||
*(uint32_t *)(vaddr + page_offset) = reloc->delta;
|
||||
|
||||
if (INTEL_INFO(dev)->gen >= 8) {
|
||||
page_offset = offset_in_page(page_offset + sizeof(uint32_t));
|
||||
|
||||
if (page_offset == 0) {
|
||||
kunmap_atomic(vaddr);
|
||||
vaddr = kmap_atomic(i915_gem_object_get_page(obj,
|
||||
(reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT));
|
||||
}
|
||||
|
||||
*(uint32_t *)(vaddr + page_offset) = 0;
|
||||
}
|
||||
|
||||
kunmap_atomic(vaddr);
|
||||
|
||||
return 0;
|
||||
@ -253,6 +267,21 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
|
||||
reloc_entry = (uint32_t __iomem *)
|
||||
(reloc_page + offset_in_page(reloc->offset));
|
||||
iowrite32(reloc->delta, reloc_entry);
|
||||
|
||||
if (INTEL_INFO(dev)->gen >= 8) {
|
||||
reloc_entry += 1;
|
||||
|
||||
if (offset_in_page(reloc->offset + sizeof(uint32_t)) == 0) {
|
||||
io_mapping_unmap_atomic(reloc_page);
|
||||
reloc_page = io_mapping_map_atomic_wc(
|
||||
dev_priv->gtt.mappable,
|
||||
reloc->offset + sizeof(uint32_t));
|
||||
reloc_entry = reloc_page;
|
||||
}
|
||||
|
||||
iowrite32(0, reloc_entry);
|
||||
}
|
||||
|
||||
io_mapping_unmap_atomic(reloc_page);
|
||||
|
||||
return 0;
|
||||
@ -323,7 +352,8 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
|
||||
return 0;
|
||||
|
||||
/* Check that the relocation address is valid... */
|
||||
if (unlikely(reloc->offset > obj->base.size - 4)) {
|
||||
if (unlikely(reloc->offset >
|
||||
obj->base.size - (INTEL_INFO(dev)->gen >= 8 ? 8 : 4))) {
|
||||
DRM_DEBUG("Relocation beyond object bounds: "
|
||||
"obj %p target %d offset %d size %d.\n",
|
||||
obj, reloc->target_handle,
|
||||
@ -1116,8 +1146,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
||||
|
||||
/* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
|
||||
* batch" bit. Hence we need to pin secure batches into the global gtt.
|
||||
* hsw should have this fixed, but let's be paranoid and do it
|
||||
* unconditionally for now. */
|
||||
* hsw should have this fixed, but bdw mucks it up again. */
|
||||
if (flags & I915_DISPATCH_SECURE && !batch_obj->has_global_gtt_mapping)
|
||||
i915_gem_gtt_bind_object(batch_obj, batch_obj->cache_level);
|
||||
|
||||
|
@ -30,6 +30,8 @@
|
||||
|
||||
#define GEN6_PPGTT_PD_ENTRIES 512
|
||||
#define I915_PPGTT_PT_ENTRIES (PAGE_SIZE / sizeof(gen6_gtt_pte_t))
|
||||
typedef uint64_t gen8_gtt_pte_t;
|
||||
typedef gen8_gtt_pte_t gen8_ppgtt_pde_t;
|
||||
|
||||
/* PPGTT stuff */
|
||||
#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
|
||||
@ -57,6 +59,41 @@
|
||||
#define HSW_WB_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0xb)
|
||||
#define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6)
|
||||
|
||||
#define GEN8_PTES_PER_PAGE (PAGE_SIZE / sizeof(gen8_gtt_pte_t))
|
||||
#define GEN8_PDES_PER_PAGE (PAGE_SIZE / sizeof(gen8_ppgtt_pde_t))
|
||||
#define GEN8_LEGACY_PDPS 4
|
||||
|
||||
#define PPAT_UNCACHED_INDEX (_PAGE_PWT | _PAGE_PCD)
|
||||
#define PPAT_CACHED_PDE_INDEX 0 /* WB LLC */
|
||||
#define PPAT_CACHED_INDEX _PAGE_PAT /* WB LLCeLLC */
|
||||
#define PPAT_DISPLAY_ELLC_INDEX _PAGE_PCD /* WT eLLC */
|
||||
|
||||
static inline gen8_gtt_pte_t gen8_pte_encode(dma_addr_t addr,
|
||||
enum i915_cache_level level,
|
||||
bool valid)
|
||||
{
|
||||
gen8_gtt_pte_t pte = valid ? _PAGE_PRESENT | _PAGE_RW : 0;
|
||||
pte |= addr;
|
||||
if (level != I915_CACHE_NONE)
|
||||
pte |= PPAT_CACHED_INDEX;
|
||||
else
|
||||
pte |= PPAT_UNCACHED_INDEX;
|
||||
return pte;
|
||||
}
|
||||
|
||||
static inline gen8_ppgtt_pde_t gen8_pde_encode(struct drm_device *dev,
|
||||
dma_addr_t addr,
|
||||
enum i915_cache_level level)
|
||||
{
|
||||
gen8_ppgtt_pde_t pde = _PAGE_PRESENT | _PAGE_RW;
|
||||
pde |= addr;
|
||||
if (level != I915_CACHE_NONE)
|
||||
pde |= PPAT_CACHED_PDE_INDEX;
|
||||
else
|
||||
pde |= PPAT_UNCACHED_INDEX;
|
||||
return pde;
|
||||
}
|
||||
|
||||
static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr,
|
||||
enum i915_cache_level level,
|
||||
bool valid)
|
||||
@ -158,6 +195,257 @@ static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr,
|
||||
return pte;
|
||||
}
|
||||
|
||||
/* Broadwell Page Directory Pointer Descriptors */
|
||||
static int gen8_write_pdp(struct intel_ring_buffer *ring, unsigned entry,
|
||||
uint64_t val)
|
||||
{
|
||||
int ret;
|
||||
|
||||
BUG_ON(entry >= 4);
|
||||
|
||||
ret = intel_ring_begin(ring, 6);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
|
||||
intel_ring_emit(ring, GEN8_RING_PDP_UDW(ring, entry));
|
||||
intel_ring_emit(ring, (u32)(val >> 32));
|
||||
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
|
||||
intel_ring_emit(ring, GEN8_RING_PDP_LDW(ring, entry));
|
||||
intel_ring_emit(ring, (u32)(val));
|
||||
intel_ring_advance(ring);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gen8_ppgtt_enable(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_ring_buffer *ring;
|
||||
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
|
||||
int i, j, ret;
|
||||
|
||||
/* bit of a hack to find the actual last used pd */
|
||||
int used_pd = ppgtt->num_pd_entries / GEN8_PDES_PER_PAGE;
|
||||
|
||||
for_each_ring(ring, dev_priv, j) {
|
||||
I915_WRITE(RING_MODE_GEN7(ring),
|
||||
_MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
|
||||
}
|
||||
|
||||
for (i = used_pd - 1; i >= 0; i--) {
|
||||
dma_addr_t addr = ppgtt->pd_dma_addr[i];
|
||||
for_each_ring(ring, dev_priv, j) {
|
||||
ret = gen8_write_pdp(ring, i, addr);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
|
||||
unsigned first_entry,
|
||||
unsigned num_entries,
|
||||
bool use_scratch)
|
||||
{
|
||||
struct i915_hw_ppgtt *ppgtt =
|
||||
container_of(vm, struct i915_hw_ppgtt, base);
|
||||
gen8_gtt_pte_t *pt_vaddr, scratch_pte;
|
||||
unsigned act_pt = first_entry / GEN8_PTES_PER_PAGE;
|
||||
unsigned first_pte = first_entry % GEN8_PTES_PER_PAGE;
|
||||
unsigned last_pte, i;
|
||||
|
||||
scratch_pte = gen8_pte_encode(ppgtt->base.scratch.addr,
|
||||
I915_CACHE_LLC, use_scratch);
|
||||
|
||||
while (num_entries) {
|
||||
struct page *page_table = &ppgtt->gen8_pt_pages[act_pt];
|
||||
|
||||
last_pte = first_pte + num_entries;
|
||||
if (last_pte > GEN8_PTES_PER_PAGE)
|
||||
last_pte = GEN8_PTES_PER_PAGE;
|
||||
|
||||
pt_vaddr = kmap_atomic(page_table);
|
||||
|
||||
for (i = first_pte; i < last_pte; i++)
|
||||
pt_vaddr[i] = scratch_pte;
|
||||
|
||||
kunmap_atomic(pt_vaddr);
|
||||
|
||||
num_entries -= last_pte - first_pte;
|
||||
first_pte = 0;
|
||||
act_pt++;
|
||||
}
|
||||
}
|
||||
|
||||
static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
|
||||
struct sg_table *pages,
|
||||
unsigned first_entry,
|
||||
enum i915_cache_level cache_level)
|
||||
{
|
||||
struct i915_hw_ppgtt *ppgtt =
|
||||
container_of(vm, struct i915_hw_ppgtt, base);
|
||||
gen8_gtt_pte_t *pt_vaddr;
|
||||
unsigned act_pt = first_entry / GEN8_PTES_PER_PAGE;
|
||||
unsigned act_pte = first_entry % GEN8_PTES_PER_PAGE;
|
||||
struct sg_page_iter sg_iter;
|
||||
|
||||
pt_vaddr = kmap_atomic(&ppgtt->gen8_pt_pages[act_pt]);
|
||||
for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
|
||||
dma_addr_t page_addr;
|
||||
|
||||
page_addr = sg_dma_address(sg_iter.sg) +
|
||||
(sg_iter.sg_pgoffset << PAGE_SHIFT);
|
||||
pt_vaddr[act_pte] = gen8_pte_encode(page_addr, cache_level,
|
||||
true);
|
||||
if (++act_pte == GEN8_PTES_PER_PAGE) {
|
||||
kunmap_atomic(pt_vaddr);
|
||||
act_pt++;
|
||||
pt_vaddr = kmap_atomic(&ppgtt->gen8_pt_pages[act_pt]);
|
||||
act_pte = 0;
|
||||
|
||||
}
|
||||
}
|
||||
kunmap_atomic(pt_vaddr);
|
||||
}
|
||||
|
||||
static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
|
||||
{
|
||||
struct i915_hw_ppgtt *ppgtt =
|
||||
container_of(vm, struct i915_hw_ppgtt, base);
|
||||
int i, j;
|
||||
|
||||
for (i = 0; i < ppgtt->num_pd_pages ; i++) {
|
||||
if (ppgtt->pd_dma_addr[i]) {
|
||||
pci_unmap_page(ppgtt->base.dev->pdev,
|
||||
ppgtt->pd_dma_addr[i],
|
||||
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
|
||||
|
||||
for (j = 0; j < GEN8_PDES_PER_PAGE; j++) {
|
||||
dma_addr_t addr = ppgtt->gen8_pt_dma_addr[i][j];
|
||||
if (addr)
|
||||
pci_unmap_page(ppgtt->base.dev->pdev,
|
||||
addr,
|
||||
PAGE_SIZE,
|
||||
PCI_DMA_BIDIRECTIONAL);
|
||||
|
||||
}
|
||||
}
|
||||
kfree(ppgtt->gen8_pt_dma_addr[i]);
|
||||
}
|
||||
|
||||
__free_pages(ppgtt->gen8_pt_pages, get_order(ppgtt->num_pt_pages << PAGE_SHIFT));
|
||||
__free_pages(ppgtt->pd_pages, get_order(ppgtt->num_pd_pages << PAGE_SHIFT));
|
||||
}
|
||||
|
||||
/**
|
||||
* GEN8 legacy ppgtt programming is accomplished through 4 PDP registers with a
|
||||
* net effect resembling a 2-level page table in normal x86 terms. Each PDP
|
||||
* represents 1GB of memory
|
||||
* 4 * 512 * 512 * 4096 = 4GB legacy 32b address space.
|
||||
*
|
||||
* TODO: Do something with the size parameter
|
||||
**/
|
||||
static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
|
||||
{
|
||||
struct page *pt_pages;
|
||||
int i, j, ret = -ENOMEM;
|
||||
const int max_pdp = DIV_ROUND_UP(size, 1 << 30);
|
||||
const int num_pt_pages = GEN8_PDES_PER_PAGE * max_pdp;
|
||||
|
||||
if (size % (1<<30))
|
||||
DRM_INFO("Pages will be wasted unless GTT size (%llu) is divisible by 1GB\n", size);
|
||||
|
||||
/* FIXME: split allocation into smaller pieces. For now we only ever do
|
||||
* this once, but with full PPGTT, the multiple contiguous allocations
|
||||
* will be bad.
|
||||
*/
|
||||
ppgtt->pd_pages = alloc_pages(GFP_KERNEL, get_order(max_pdp << PAGE_SHIFT));
|
||||
if (!ppgtt->pd_pages)
|
||||
return -ENOMEM;
|
||||
|
||||
pt_pages = alloc_pages(GFP_KERNEL, get_order(num_pt_pages << PAGE_SHIFT));
|
||||
if (!pt_pages) {
|
||||
__free_pages(ppgtt->pd_pages, get_order(max_pdp << PAGE_SHIFT));
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ppgtt->gen8_pt_pages = pt_pages;
|
||||
ppgtt->num_pd_pages = 1 << get_order(max_pdp << PAGE_SHIFT);
|
||||
ppgtt->num_pt_pages = 1 << get_order(num_pt_pages << PAGE_SHIFT);
|
||||
ppgtt->num_pd_entries = max_pdp * GEN8_PDES_PER_PAGE;
|
||||
ppgtt->enable = gen8_ppgtt_enable;
|
||||
ppgtt->base.clear_range = gen8_ppgtt_clear_range;
|
||||
ppgtt->base.insert_entries = gen8_ppgtt_insert_entries;
|
||||
ppgtt->base.cleanup = gen8_ppgtt_cleanup;
|
||||
|
||||
BUG_ON(ppgtt->num_pd_pages > GEN8_LEGACY_PDPS);
|
||||
|
||||
/*
|
||||
* - Create a mapping for the page directories.
|
||||
* - For each page directory:
|
||||
* allocate space for page table mappings.
|
||||
* map each page table
|
||||
*/
|
||||
for (i = 0; i < max_pdp; i++) {
|
||||
dma_addr_t temp;
|
||||
temp = pci_map_page(ppgtt->base.dev->pdev,
|
||||
&ppgtt->pd_pages[i], 0,
|
||||
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
|
||||
if (pci_dma_mapping_error(ppgtt->base.dev->pdev, temp))
|
||||
goto err_out;
|
||||
|
||||
ppgtt->pd_dma_addr[i] = temp;
|
||||
|
||||
ppgtt->gen8_pt_dma_addr[i] = kmalloc(sizeof(dma_addr_t) * GEN8_PDES_PER_PAGE, GFP_KERNEL);
|
||||
if (!ppgtt->gen8_pt_dma_addr[i])
|
||||
goto err_out;
|
||||
|
||||
for (j = 0; j < GEN8_PDES_PER_PAGE; j++) {
|
||||
struct page *p = &pt_pages[i * GEN8_PDES_PER_PAGE + j];
|
||||
temp = pci_map_page(ppgtt->base.dev->pdev,
|
||||
p, 0, PAGE_SIZE,
|
||||
PCI_DMA_BIDIRECTIONAL);
|
||||
|
||||
if (pci_dma_mapping_error(ppgtt->base.dev->pdev, temp))
|
||||
goto err_out;
|
||||
|
||||
ppgtt->gen8_pt_dma_addr[i][j] = temp;
|
||||
}
|
||||
}
|
||||
|
||||
/* For now, the PPGTT helper functions all require that the PDEs are
|
||||
* plugged in correctly. So we do that now/here. For aliasing PPGTT, we
|
||||
* will never need to touch the PDEs again */
|
||||
for (i = 0; i < max_pdp; i++) {
|
||||
gen8_ppgtt_pde_t *pd_vaddr;
|
||||
pd_vaddr = kmap_atomic(&ppgtt->pd_pages[i]);
|
||||
for (j = 0; j < GEN8_PDES_PER_PAGE; j++) {
|
||||
dma_addr_t addr = ppgtt->gen8_pt_dma_addr[i][j];
|
||||
pd_vaddr[j] = gen8_pde_encode(ppgtt->base.dev, addr,
|
||||
I915_CACHE_LLC);
|
||||
}
|
||||
kunmap_atomic(pd_vaddr);
|
||||
}
|
||||
|
||||
ppgtt->base.clear_range(&ppgtt->base, 0,
|
||||
ppgtt->num_pd_entries * GEN8_PTES_PER_PAGE,
|
||||
true);
|
||||
|
||||
DRM_DEBUG_DRIVER("Allocated %d pages for page directories (%d wasted)\n",
|
||||
ppgtt->num_pd_pages, ppgtt->num_pd_pages - max_pdp);
|
||||
DRM_DEBUG_DRIVER("Allocated %d pages for page tables (%lld wasted)\n",
|
||||
ppgtt->num_pt_pages,
|
||||
(ppgtt->num_pt_pages - num_pt_pages) +
|
||||
size % (1<<30));
|
||||
return 0;
|
||||
|
||||
err_out:
|
||||
ppgtt->base.cleanup(&ppgtt->base);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void gen6_write_pdes(struct i915_hw_ppgtt *ppgtt)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = ppgtt->base.dev->dev_private;
|
||||
@ -410,6 +698,8 @@ static int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
|
||||
|
||||
if (INTEL_INFO(dev)->gen < 8)
|
||||
ret = gen6_ppgtt_init(ppgtt);
|
||||
else if (IS_GEN8(dev))
|
||||
ret = gen8_ppgtt_init(ppgtt, dev_priv->gtt.base.total);
|
||||
else
|
||||
BUG();
|
||||
|
||||
@ -573,6 +863,57 @@ int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void gen8_set_pte(void __iomem *addr, gen8_gtt_pte_t pte)
|
||||
{
|
||||
#ifdef writeq
|
||||
writeq(pte, addr);
|
||||
#else
|
||||
iowrite32((u32)pte, addr);
|
||||
iowrite32(pte >> 32, addr + 4);
|
||||
#endif
|
||||
}
|
||||
|
||||
static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
|
||||
struct sg_table *st,
|
||||
unsigned int first_entry,
|
||||
enum i915_cache_level level)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = vm->dev->dev_private;
|
||||
gen8_gtt_pte_t __iomem *gtt_entries =
|
||||
(gen8_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
|
||||
int i = 0;
|
||||
struct sg_page_iter sg_iter;
|
||||
dma_addr_t addr;
|
||||
|
||||
for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
|
||||
addr = sg_dma_address(sg_iter.sg) +
|
||||
(sg_iter.sg_pgoffset << PAGE_SHIFT);
|
||||
gen8_set_pte(>t_entries[i],
|
||||
gen8_pte_encode(addr, level, true));
|
||||
i++;
|
||||
}
|
||||
|
||||
/*
|
||||
* XXX: This serves as a posting read to make sure that the PTE has
|
||||
* actually been updated. There is some concern that even though
|
||||
* registers and PTEs are within the same BAR that they are potentially
|
||||
* of NUMA access patterns. Therefore, even with the way we assume
|
||||
* hardware should work, we must keep this posting read for paranoia.
|
||||
*/
|
||||
if (i != 0)
|
||||
WARN_ON(readq(>t_entries[i-1])
|
||||
!= gen8_pte_encode(addr, level, true));
|
||||
|
||||
#if 0 /* TODO: Still needed on GEN8? */
|
||||
/* This next bit makes the above posting read even more important. We
|
||||
* want to flush the TLBs only after we're certain all the PTE updates
|
||||
* have finished.
|
||||
*/
|
||||
I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
|
||||
POSTING_READ(GFX_FLSH_CNTL_GEN6);
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
* Binds an object into the global gtt with the specified cache level. The object
|
||||
* will be accessible to the GPU via commands whose operands reference offsets
|
||||
@ -615,6 +956,30 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
|
||||
POSTING_READ(GFX_FLSH_CNTL_GEN6);
|
||||
}
|
||||
|
||||
static void gen8_ggtt_clear_range(struct i915_address_space *vm,
|
||||
unsigned int first_entry,
|
||||
unsigned int num_entries,
|
||||
bool use_scratch)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = vm->dev->dev_private;
|
||||
gen8_gtt_pte_t scratch_pte, __iomem *gtt_base =
|
||||
(gen8_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
|
||||
const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
|
||||
int i;
|
||||
|
||||
if (WARN(num_entries > max_entries,
|
||||
"First entry = %d; Num entries = %d (max=%d)\n",
|
||||
first_entry, num_entries, max_entries))
|
||||
num_entries = max_entries;
|
||||
|
||||
scratch_pte = gen8_pte_encode(vm->scratch.addr,
|
||||
I915_CACHE_LLC,
|
||||
use_scratch);
|
||||
for (i = 0; i < num_entries; i++)
|
||||
gen8_set_pte(>t_base[i], scratch_pte);
|
||||
readl(gtt_base);
|
||||
}
|
||||
|
||||
static void gen6_ggtt_clear_range(struct i915_address_space *vm,
|
||||
unsigned int first_entry,
|
||||
unsigned int num_entries,
|
||||
@ -638,7 +1003,6 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm,
|
||||
readl(gtt_base);
|
||||
}
|
||||
|
||||
|
||||
static void i915_ggtt_insert_entries(struct i915_address_space *vm,
|
||||
struct sg_table *st,
|
||||
unsigned int pg_start,
|
||||
@ -720,6 +1084,7 @@ static void i915_gtt_color_adjust(struct drm_mm_node *node,
|
||||
*end -= 4096;
|
||||
}
|
||||
}
|
||||
|
||||
void i915_gem_setup_global_gtt(struct drm_device *dev,
|
||||
unsigned long start,
|
||||
unsigned long mappable_end,
|
||||
@ -816,7 +1181,8 @@ void i915_gem_init_global_gtt(struct drm_device *dev)
|
||||
|
||||
DRM_ERROR("Aliased PPGTT setup failed %d\n", ret);
|
||||
drm_mm_takedown(&dev_priv->gtt.base.mm);
|
||||
gtt_size += GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE;
|
||||
if (INTEL_INFO(dev)->gen < 8)
|
||||
gtt_size += GEN6_PPGTT_PD_ENTRIES*PAGE_SIZE;
|
||||
}
|
||||
i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
|
||||
}
|
||||
@ -866,6 +1232,20 @@ static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
|
||||
return snb_gmch_ctl << 20;
|
||||
}
|
||||
|
||||
static inline unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
|
||||
{
|
||||
bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT;
|
||||
bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
|
||||
if (bdw_gmch_ctl)
|
||||
bdw_gmch_ctl = 1 << bdw_gmch_ctl;
|
||||
if (bdw_gmch_ctl > 4) {
|
||||
WARN_ON(!i915_preliminary_hw_support);
|
||||
return 4<<20;
|
||||
}
|
||||
|
||||
return bdw_gmch_ctl << 20;
|
||||
}
|
||||
|
||||
static inline size_t gen6_get_stolen_size(u16 snb_gmch_ctl)
|
||||
{
|
||||
snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT;
|
||||
@ -873,6 +1253,108 @@ static inline size_t gen6_get_stolen_size(u16 snb_gmch_ctl)
|
||||
return snb_gmch_ctl << 25; /* 32 MB units */
|
||||
}
|
||||
|
||||
static inline size_t gen8_get_stolen_size(u16 bdw_gmch_ctl)
|
||||
{
|
||||
bdw_gmch_ctl >>= BDW_GMCH_GMS_SHIFT;
|
||||
bdw_gmch_ctl &= BDW_GMCH_GMS_MASK;
|
||||
return bdw_gmch_ctl << 25; /* 32 MB units */
|
||||
}
|
||||
|
||||
static int ggtt_probe_common(struct drm_device *dev,
|
||||
size_t gtt_size)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
phys_addr_t gtt_bus_addr;
|
||||
int ret;
|
||||
|
||||
/* For Modern GENs the PTEs and register space are split in the BAR */
|
||||
gtt_bus_addr = pci_resource_start(dev->pdev, 0) +
|
||||
(pci_resource_len(dev->pdev, 0) / 2);
|
||||
|
||||
dev_priv->gtt.gsm = ioremap_wc(gtt_bus_addr, gtt_size);
|
||||
if (!dev_priv->gtt.gsm) {
|
||||
DRM_ERROR("Failed to map the gtt page table\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ret = setup_scratch_page(dev);
|
||||
if (ret) {
|
||||
DRM_ERROR("Scratch setup failed\n");
|
||||
/* iounmap will also get called at remove, but meh */
|
||||
iounmap(dev_priv->gtt.gsm);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
|
||||
* bits. When using advanced contexts each context stores its own PAT, but
|
||||
* writing this data shouldn't be harmful even in those cases. */
|
||||
static void gen8_setup_private_ppat(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
#define GEN8_PPAT_UC (0<<0)
|
||||
#define GEN8_PPAT_WC (1<<0)
|
||||
#define GEN8_PPAT_WT (2<<0)
|
||||
#define GEN8_PPAT_WB (3<<0)
|
||||
#define GEN8_PPAT_ELLC_OVERRIDE (0<<2)
|
||||
/* FIXME(BDW): Bspec is completely confused about cache control bits. */
|
||||
#define GEN8_PPAT_LLC (1<<2)
|
||||
#define GEN8_PPAT_LLCELLC (2<<2)
|
||||
#define GEN8_PPAT_LLCeLLC (3<<2)
|
||||
#define GEN8_PPAT_AGE(x) (x<<4)
|
||||
#define GEN8_PPAT(i, x) ((uint64_t) (x) << ((i) * 8))
|
||||
uint64_t pat;
|
||||
|
||||
pat = GEN8_PPAT(0, GEN8_PPAT_WB | GEN8_PPAT_LLC) | /* for normal objects, no eLLC */
|
||||
GEN8_PPAT(1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC) | /* for something pointing to ptes? */
|
||||
GEN8_PPAT(2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC) | /* for scanout with eLLC */
|
||||
GEN8_PPAT(3, GEN8_PPAT_UC) | /* Uncached objects, mostly for scanout */
|
||||
GEN8_PPAT(4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)) |
|
||||
GEN8_PPAT(5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)) |
|
||||
GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) |
|
||||
GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
|
||||
|
||||
/* XXX: spec defines this as 2 distinct registers. It's unclear if a 64b
|
||||
* write would work. */
|
||||
I915_WRITE(GEN8_PRIVATE_PAT, pat);
|
||||
I915_WRITE(GEN8_PRIVATE_PAT + 4, pat >> 32);
|
||||
}
|
||||
|
||||
static int gen8_gmch_probe(struct drm_device *dev,
|
||||
size_t *gtt_total,
|
||||
size_t *stolen,
|
||||
phys_addr_t *mappable_base,
|
||||
unsigned long *mappable_end)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
unsigned int gtt_size;
|
||||
u16 snb_gmch_ctl;
|
||||
int ret;
|
||||
|
||||
/* TODO: We're not aware of mappable constraints on gen8 yet */
|
||||
*mappable_base = pci_resource_start(dev->pdev, 2);
|
||||
*mappable_end = pci_resource_len(dev->pdev, 2);
|
||||
|
||||
if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(39)))
|
||||
pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(39));
|
||||
|
||||
pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
|
||||
|
||||
*stolen = gen8_get_stolen_size(snb_gmch_ctl);
|
||||
|
||||
gtt_size = gen8_get_total_gtt_size(snb_gmch_ctl);
|
||||
*gtt_total = (gtt_size / sizeof(gen8_gtt_pte_t)) << PAGE_SHIFT;
|
||||
|
||||
gen8_setup_private_ppat(dev_priv);
|
||||
|
||||
ret = ggtt_probe_common(dev, gtt_size);
|
||||
|
||||
dev_priv->gtt.base.clear_range = gen8_ggtt_clear_range;
|
||||
dev_priv->gtt.base.insert_entries = gen8_ggtt_insert_entries;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int gen6_gmch_probe(struct drm_device *dev,
|
||||
size_t *gtt_total,
|
||||
size_t *stolen,
|
||||
@ -880,7 +1362,6 @@ static int gen6_gmch_probe(struct drm_device *dev,
|
||||
unsigned long *mappable_end)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
phys_addr_t gtt_bus_addr;
|
||||
unsigned int gtt_size;
|
||||
u16 snb_gmch_ctl;
|
||||
int ret;
|
||||
@ -900,24 +1381,13 @@ static int gen6_gmch_probe(struct drm_device *dev,
|
||||
if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(40)))
|
||||
pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40));
|
||||
pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
|
||||
gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl);
|
||||
|
||||
*stolen = gen6_get_stolen_size(snb_gmch_ctl);
|
||||
|
||||
gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl);
|
||||
*gtt_total = (gtt_size / sizeof(gen6_gtt_pte_t)) << PAGE_SHIFT;
|
||||
|
||||
/* For Modern GENs the PTEs and register space are split in the BAR */
|
||||
gtt_bus_addr = pci_resource_start(dev->pdev, 0) +
|
||||
(pci_resource_len(dev->pdev, 0) / 2);
|
||||
|
||||
dev_priv->gtt.gsm = ioremap_wc(gtt_bus_addr, gtt_size);
|
||||
if (!dev_priv->gtt.gsm) {
|
||||
DRM_ERROR("Failed to map the gtt page table\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ret = setup_scratch_page(dev);
|
||||
if (ret)
|
||||
DRM_ERROR("Scratch setup failed\n");
|
||||
ret = ggtt_probe_common(dev, gtt_size);
|
||||
|
||||
dev_priv->gtt.base.clear_range = gen6_ggtt_clear_range;
|
||||
dev_priv->gtt.base.insert_entries = gen6_ggtt_insert_entries;
|
||||
@ -971,7 +1441,7 @@ int i915_gem_gtt_init(struct drm_device *dev)
|
||||
if (INTEL_INFO(dev)->gen <= 5) {
|
||||
gtt->gtt_probe = i915_gmch_probe;
|
||||
gtt->base.cleanup = i915_gmch_remove;
|
||||
} else {
|
||||
} else if (INTEL_INFO(dev)->gen < 8) {
|
||||
gtt->gtt_probe = gen6_gmch_probe;
|
||||
gtt->base.cleanup = gen6_gmch_remove;
|
||||
if (IS_HASWELL(dev) && dev_priv->ellc_size)
|
||||
@ -984,6 +1454,9 @@ int i915_gem_gtt_init(struct drm_device *dev)
|
||||
gtt->base.pte_encode = ivb_pte_encode;
|
||||
else
|
||||
gtt->base.pte_encode = snb_pte_encode;
|
||||
} else {
|
||||
dev_priv->gtt.gtt_probe = gen8_gmch_probe;
|
||||
dev_priv->gtt.base.cleanup = gen6_gmch_remove;
|
||||
}
|
||||
|
||||
ret = gtt->gtt_probe(dev, >t->base.total, >t->stolen_size,
|
||||
|
@ -624,6 +624,7 @@ static void i915_gem_record_fences(struct drm_device *dev,
|
||||
|
||||
/* Fences */
|
||||
switch (INTEL_INFO(dev)->gen) {
|
||||
case 8:
|
||||
case 7:
|
||||
case 6:
|
||||
for (i = 0; i < dev_priv->num_fence_regs; i++)
|
||||
@ -1044,6 +1045,7 @@ void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone)
|
||||
default:
|
||||
WARN_ONCE(1, "Unsupported platform\n");
|
||||
case 7:
|
||||
case 8:
|
||||
instdone[0] = I915_READ(GEN7_INSTDONE_1);
|
||||
instdone[1] = I915_READ(GEN7_SC_INSTDONE);
|
||||
instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
|
||||
|
@ -270,6 +270,21 @@ static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
|
||||
}
|
||||
}
|
||||
|
||||
static void broadwell_set_fifo_underrun_reporting(struct drm_device *dev,
|
||||
enum pipe pipe, bool enable)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
assert_spin_locked(&dev_priv->irq_lock);
|
||||
|
||||
if (enable)
|
||||
dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_FIFO_UNDERRUN;
|
||||
else
|
||||
dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_FIFO_UNDERRUN;
|
||||
I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
|
||||
POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
|
||||
}
|
||||
|
||||
/**
|
||||
* ibx_display_interrupt_update - update SDEIMR
|
||||
* @dev_priv: driver private
|
||||
@ -382,6 +397,8 @@ bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
|
||||
ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
|
||||
else if (IS_GEN7(dev))
|
||||
ivybridge_set_fifo_underrun_reporting(dev, pipe, enable);
|
||||
else if (IS_GEN8(dev))
|
||||
broadwell_set_fifo_underrun_reporting(dev, pipe, enable);
|
||||
|
||||
done:
|
||||
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
|
||||
@ -600,35 +617,40 @@ static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
|
||||
return I915_READ(reg);
|
||||
}
|
||||
|
||||
static bool intel_pipe_in_vblank(struct drm_device *dev, enum pipe pipe)
|
||||
/* raw reads, only for fast reads of display block, no need for forcewake etc. */
|
||||
#define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
|
||||
#define __raw_i915_read16(dev_priv__, reg__) readw((dev_priv__)->regs + (reg__))
|
||||
|
||||
static bool intel_pipe_in_vblank_locked(struct drm_device *dev, enum pipe pipe)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
uint32_t status;
|
||||
int reg;
|
||||
|
||||
if (IS_VALLEYVIEW(dev)) {
|
||||
status = pipe == PIPE_A ?
|
||||
I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT :
|
||||
I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
|
||||
|
||||
return I915_READ(VLV_ISR) & status;
|
||||
reg = VLV_ISR;
|
||||
} else if (IS_GEN2(dev)) {
|
||||
status = pipe == PIPE_A ?
|
||||
I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT :
|
||||
I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
|
||||
|
||||
return I915_READ16(ISR) & status;
|
||||
reg = ISR;
|
||||
} else if (INTEL_INFO(dev)->gen < 5) {
|
||||
status = pipe == PIPE_A ?
|
||||
I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT :
|
||||
I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
|
||||
|
||||
return I915_READ(ISR) & status;
|
||||
reg = ISR;
|
||||
} else if (INTEL_INFO(dev)->gen < 7) {
|
||||
status = pipe == PIPE_A ?
|
||||
DE_PIPEA_VBLANK :
|
||||
DE_PIPEB_VBLANK;
|
||||
|
||||
return I915_READ(DEISR) & status;
|
||||
reg = DEISR;
|
||||
} else {
|
||||
switch (pipe) {
|
||||
default:
|
||||
@ -643,12 +665,17 @@ static bool intel_pipe_in_vblank(struct drm_device *dev, enum pipe pipe)
|
||||
break;
|
||||
}
|
||||
|
||||
return I915_READ(DEISR) & status;
|
||||
reg = DEISR;
|
||||
}
|
||||
|
||||
if (IS_GEN2(dev))
|
||||
return __raw_i915_read16(dev_priv, reg) & status;
|
||||
else
|
||||
return __raw_i915_read32(dev_priv, reg) & status;
|
||||
}
|
||||
|
||||
static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
|
||||
int *vpos, int *hpos)
|
||||
int *vpos, int *hpos, ktime_t *stime, ktime_t *etime)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
|
||||
@ -658,6 +685,7 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
|
||||
int vbl_start, vbl_end, htotal, vtotal;
|
||||
bool in_vbl = true;
|
||||
int ret = 0;
|
||||
unsigned long irqflags;
|
||||
|
||||
if (!intel_crtc->active) {
|
||||
DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
|
||||
@ -672,14 +700,27 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
|
||||
|
||||
ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
|
||||
|
||||
/*
|
||||
* Lock uncore.lock, as we will do multiple timing critical raw
|
||||
* register reads, potentially with preemption disabled, so the
|
||||
* following code must not block on uncore.lock.
|
||||
*/
|
||||
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
|
||||
|
||||
/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
|
||||
|
||||
/* Get optional system timestamp before query. */
|
||||
if (stime)
|
||||
*stime = ktime_get();
|
||||
|
||||
if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
|
||||
/* No obvious pixelcount register. Only query vertical
|
||||
* scanout position from Display scan line register.
|
||||
*/
|
||||
if (IS_GEN2(dev))
|
||||
position = I915_READ(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
|
||||
position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
|
||||
else
|
||||
position = I915_READ(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
|
||||
position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
|
||||
|
||||
/*
|
||||
* The scanline counter increments at the leading edge
|
||||
@ -688,7 +729,7 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
|
||||
* to get a more accurate picture whether we're in vblank
|
||||
* or not.
|
||||
*/
|
||||
in_vbl = intel_pipe_in_vblank(dev, pipe);
|
||||
in_vbl = intel_pipe_in_vblank_locked(dev, pipe);
|
||||
if ((in_vbl && position == vbl_start - 1) ||
|
||||
(!in_vbl && position == vbl_end - 1))
|
||||
position = (position + 1) % vtotal;
|
||||
@ -697,7 +738,7 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
|
||||
* We can split this into vertical and horizontal
|
||||
* scanout position.
|
||||
*/
|
||||
position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
|
||||
position = (__raw_i915_read32(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
|
||||
|
||||
/* convert to pixel counts */
|
||||
vbl_start *= htotal;
|
||||
@ -705,6 +746,14 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
|
||||
vtotal *= htotal;
|
||||
}
|
||||
|
||||
/* Get optional system timestamp after query. */
|
||||
if (etime)
|
||||
*etime = ktime_get();
|
||||
|
||||
/* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
|
||||
|
||||
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
|
||||
|
||||
in_vbl = position >= vbl_start && position < vbl_end;
|
||||
|
||||
/*
|
||||
@ -1038,7 +1087,7 @@ static void ivybridge_parity_work(struct work_struct *work)
|
||||
parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
|
||||
parity_event[5] = NULL;
|
||||
|
||||
kobject_uevent_env(&dev_priv->dev->primary->kdev.kobj,
|
||||
kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj,
|
||||
KOBJ_CHANGE, parity_event);
|
||||
|
||||
DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
|
||||
@ -1117,6 +1166,56 @@ static void snb_gt_irq_handler(struct drm_device *dev,
|
||||
ivybridge_parity_error_irq_handler(dev, gt_iir);
|
||||
}
|
||||
|
||||
static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
|
||||
struct drm_i915_private *dev_priv,
|
||||
u32 master_ctl)
|
||||
{
|
||||
u32 rcs, bcs, vcs;
|
||||
uint32_t tmp = 0;
|
||||
irqreturn_t ret = IRQ_NONE;
|
||||
|
||||
if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
|
||||
tmp = I915_READ(GEN8_GT_IIR(0));
|
||||
if (tmp) {
|
||||
ret = IRQ_HANDLED;
|
||||
rcs = tmp >> GEN8_RCS_IRQ_SHIFT;
|
||||
bcs = tmp >> GEN8_BCS_IRQ_SHIFT;
|
||||
if (rcs & GT_RENDER_USER_INTERRUPT)
|
||||
notify_ring(dev, &dev_priv->ring[RCS]);
|
||||
if (bcs & GT_RENDER_USER_INTERRUPT)
|
||||
notify_ring(dev, &dev_priv->ring[BCS]);
|
||||
I915_WRITE(GEN8_GT_IIR(0), tmp);
|
||||
} else
|
||||
DRM_ERROR("The master control interrupt lied (GT0)!\n");
|
||||
}
|
||||
|
||||
if (master_ctl & GEN8_GT_VCS1_IRQ) {
|
||||
tmp = I915_READ(GEN8_GT_IIR(1));
|
||||
if (tmp) {
|
||||
ret = IRQ_HANDLED;
|
||||
vcs = tmp >> GEN8_VCS1_IRQ_SHIFT;
|
||||
if (vcs & GT_RENDER_USER_INTERRUPT)
|
||||
notify_ring(dev, &dev_priv->ring[VCS]);
|
||||
I915_WRITE(GEN8_GT_IIR(1), tmp);
|
||||
} else
|
||||
DRM_ERROR("The master control interrupt lied (GT1)!\n");
|
||||
}
|
||||
|
||||
if (master_ctl & GEN8_GT_VECS_IRQ) {
|
||||
tmp = I915_READ(GEN8_GT_IIR(3));
|
||||
if (tmp) {
|
||||
ret = IRQ_HANDLED;
|
||||
vcs = tmp >> GEN8_VECS_IRQ_SHIFT;
|
||||
if (vcs & GT_RENDER_USER_INTERRUPT)
|
||||
notify_ring(dev, &dev_priv->ring[VECS]);
|
||||
I915_WRITE(GEN8_GT_IIR(3), tmp);
|
||||
} else
|
||||
DRM_ERROR("The master control interrupt lied (GT3)!\n");
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#define HPD_STORM_DETECT_PERIOD 1000
|
||||
#define HPD_STORM_THRESHOLD 5
|
||||
|
||||
@ -1351,7 +1450,7 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
|
||||
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
|
||||
|
||||
for_each_pipe(pipe) {
|
||||
if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
|
||||
if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
|
||||
drm_handle_vblank(dev, pipe);
|
||||
|
||||
if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) {
|
||||
@ -1690,6 +1789,117 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static irqreturn_t gen8_irq_handler(int irq, void *arg)
|
||||
{
|
||||
struct drm_device *dev = arg;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 master_ctl;
|
||||
irqreturn_t ret = IRQ_NONE;
|
||||
uint32_t tmp = 0;
|
||||
enum pipe pipe;
|
||||
|
||||
atomic_inc(&dev_priv->irq_received);
|
||||
|
||||
master_ctl = I915_READ(GEN8_MASTER_IRQ);
|
||||
master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
|
||||
if (!master_ctl)
|
||||
return IRQ_NONE;
|
||||
|
||||
I915_WRITE(GEN8_MASTER_IRQ, 0);
|
||||
POSTING_READ(GEN8_MASTER_IRQ);
|
||||
|
||||
ret = gen8_gt_irq_handler(dev, dev_priv, master_ctl);
|
||||
|
||||
if (master_ctl & GEN8_DE_MISC_IRQ) {
|
||||
tmp = I915_READ(GEN8_DE_MISC_IIR);
|
||||
if (tmp & GEN8_DE_MISC_GSE)
|
||||
intel_opregion_asle_intr(dev);
|
||||
else if (tmp)
|
||||
DRM_ERROR("Unexpected DE Misc interrupt\n");
|
||||
else
|
||||
DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
|
||||
|
||||
if (tmp) {
|
||||
I915_WRITE(GEN8_DE_MISC_IIR, tmp);
|
||||
ret = IRQ_HANDLED;
|
||||
}
|
||||
}
|
||||
|
||||
if (master_ctl & GEN8_DE_PORT_IRQ) {
|
||||
tmp = I915_READ(GEN8_DE_PORT_IIR);
|
||||
if (tmp & GEN8_AUX_CHANNEL_A)
|
||||
dp_aux_irq_handler(dev);
|
||||
else if (tmp)
|
||||
DRM_ERROR("Unexpected DE Port interrupt\n");
|
||||
else
|
||||
DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
|
||||
|
||||
if (tmp) {
|
||||
I915_WRITE(GEN8_DE_PORT_IIR, tmp);
|
||||
ret = IRQ_HANDLED;
|
||||
}
|
||||
}
|
||||
|
||||
for_each_pipe(pipe) {
|
||||
uint32_t pipe_iir;
|
||||
|
||||
if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
|
||||
continue;
|
||||
|
||||
pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
|
||||
if (pipe_iir & GEN8_PIPE_VBLANK)
|
||||
drm_handle_vblank(dev, pipe);
|
||||
|
||||
if (pipe_iir & GEN8_PIPE_FLIP_DONE) {
|
||||
intel_prepare_page_flip(dev, pipe);
|
||||
intel_finish_page_flip_plane(dev, pipe);
|
||||
}
|
||||
|
||||
if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE)
|
||||
hsw_pipe_crc_irq_handler(dev, pipe);
|
||||
|
||||
if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) {
|
||||
if (intel_set_cpu_fifo_underrun_reporting(dev, pipe,
|
||||
false))
|
||||
DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n",
|
||||
pipe_name(pipe));
|
||||
}
|
||||
|
||||
if (pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS) {
|
||||
DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
|
||||
pipe_name(pipe),
|
||||
pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS);
|
||||
}
|
||||
|
||||
if (pipe_iir) {
|
||||
ret = IRQ_HANDLED;
|
||||
I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir);
|
||||
} else
|
||||
DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
|
||||
}
|
||||
|
||||
if (!HAS_PCH_NOP(dev) && master_ctl & GEN8_DE_PCH_IRQ) {
|
||||
/*
|
||||
* FIXME(BDW): Assume for now that the new interrupt handling
|
||||
* scheme also closed the SDE interrupt handling race we've seen
|
||||
* on older pch-split platforms. But this needs testing.
|
||||
*/
|
||||
u32 pch_iir = I915_READ(SDEIIR);
|
||||
|
||||
cpt_irq_handler(dev, pch_iir);
|
||||
|
||||
if (pch_iir) {
|
||||
I915_WRITE(SDEIIR, pch_iir);
|
||||
ret = IRQ_HANDLED;
|
||||
}
|
||||
}
|
||||
|
||||
I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
|
||||
POSTING_READ(GEN8_MASTER_IRQ);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void i915_error_wake_up(struct drm_i915_private *dev_priv,
|
||||
bool reset_completed)
|
||||
{
|
||||
@ -1737,7 +1947,7 @@ static void i915_error_work_func(struct work_struct *work)
|
||||
char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
|
||||
int ret;
|
||||
|
||||
kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
|
||||
kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event);
|
||||
|
||||
/*
|
||||
* Note that there's only one work item which does gpu resets, so we
|
||||
@ -1751,7 +1961,7 @@ static void i915_error_work_func(struct work_struct *work)
|
||||
*/
|
||||
if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
|
||||
DRM_DEBUG_DRIVER("resetting chip\n");
|
||||
kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE,
|
||||
kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE,
|
||||
reset_event);
|
||||
|
||||
/*
|
||||
@ -1778,7 +1988,7 @@ static void i915_error_work_func(struct work_struct *work)
|
||||
smp_mb__before_atomic_inc();
|
||||
atomic_inc(&dev_priv->gpu_error.reset_counter);
|
||||
|
||||
kobject_uevent_env(&dev->primary->kdev.kobj,
|
||||
kobject_uevent_env(&dev->primary->kdev->kobj,
|
||||
KOBJ_CHANGE, reset_done_event);
|
||||
} else {
|
||||
atomic_set_mask(I915_WEDGED, &error->reset_counter);
|
||||
@ -2043,6 +2253,22 @@ static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gen8_enable_vblank(struct drm_device *dev, int pipe)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
unsigned long irqflags;
|
||||
|
||||
if (!i915_pipe_enabled(dev, pipe))
|
||||
return -EINVAL;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
|
||||
dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK;
|
||||
I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
|
||||
POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
|
||||
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Called from drm generic code, passed 'crtc' which
|
||||
* we use as a pipe index
|
||||
*/
|
||||
@ -2091,6 +2317,21 @@ static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
|
||||
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
|
||||
}
|
||||
|
||||
static void gen8_disable_vblank(struct drm_device *dev, int pipe)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
unsigned long irqflags;
|
||||
|
||||
if (!i915_pipe_enabled(dev, pipe))
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
|
||||
dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK;
|
||||
I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
|
||||
POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
|
||||
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
|
||||
}
|
||||
|
||||
static u32
|
||||
ring_last_seqno(struct intel_ring_buffer *ring)
|
||||
{
|
||||
@ -2425,6 +2666,53 @@ static void valleyview_irq_preinstall(struct drm_device *dev)
|
||||
POSTING_READ(VLV_IER);
|
||||
}
|
||||
|
||||
static void gen8_irq_preinstall(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int pipe;
|
||||
|
||||
atomic_set(&dev_priv->irq_received, 0);
|
||||
|
||||
I915_WRITE(GEN8_MASTER_IRQ, 0);
|
||||
POSTING_READ(GEN8_MASTER_IRQ);
|
||||
|
||||
/* IIR can theoretically queue up two events. Be paranoid */
|
||||
#define GEN8_IRQ_INIT_NDX(type, which) do { \
|
||||
I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
|
||||
POSTING_READ(GEN8_##type##_IMR(which)); \
|
||||
I915_WRITE(GEN8_##type##_IER(which), 0); \
|
||||
I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
|
||||
POSTING_READ(GEN8_##type##_IIR(which)); \
|
||||
I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
|
||||
} while (0)
|
||||
|
||||
#define GEN8_IRQ_INIT(type) do { \
|
||||
I915_WRITE(GEN8_##type##_IMR, 0xffffffff); \
|
||||
POSTING_READ(GEN8_##type##_IMR); \
|
||||
I915_WRITE(GEN8_##type##_IER, 0); \
|
||||
I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
|
||||
POSTING_READ(GEN8_##type##_IIR); \
|
||||
I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
|
||||
} while (0)
|
||||
|
||||
GEN8_IRQ_INIT_NDX(GT, 0);
|
||||
GEN8_IRQ_INIT_NDX(GT, 1);
|
||||
GEN8_IRQ_INIT_NDX(GT, 2);
|
||||
GEN8_IRQ_INIT_NDX(GT, 3);
|
||||
|
||||
for_each_pipe(pipe) {
|
||||
GEN8_IRQ_INIT_NDX(DE_PIPE, pipe);
|
||||
}
|
||||
|
||||
GEN8_IRQ_INIT(DE_PORT);
|
||||
GEN8_IRQ_INIT(DE_MISC);
|
||||
GEN8_IRQ_INIT(PCU);
|
||||
#undef GEN8_IRQ_INIT
|
||||
#undef GEN8_IRQ_INIT_NDX
|
||||
|
||||
POSTING_READ(GEN8_PCU_IIR);
|
||||
}
|
||||
|
||||
static void ibx_hpd_irq_setup(struct drm_device *dev)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
|
||||
@ -2630,6 +2918,117 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
int i;
|
||||
|
||||
/* These are interrupts we'll toggle with the ring mask register */
|
||||
uint32_t gt_interrupts[] = {
|
||||
GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
|
||||
GT_RENDER_L3_PARITY_ERROR_INTERRUPT |
|
||||
GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
|
||||
GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
|
||||
GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
|
||||
0,
|
||||
GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT
|
||||
};
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(gt_interrupts); i++) {
|
||||
u32 tmp = I915_READ(GEN8_GT_IIR(i));
|
||||
if (tmp)
|
||||
DRM_ERROR("Interrupt (%d) should have been masked in pre-install 0x%08x\n",
|
||||
i, tmp);
|
||||
I915_WRITE(GEN8_GT_IMR(i), ~gt_interrupts[i]);
|
||||
I915_WRITE(GEN8_GT_IER(i), gt_interrupts[i]);
|
||||
}
|
||||
POSTING_READ(GEN8_GT_IER(0));
|
||||
}
|
||||
|
||||
static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
uint32_t de_pipe_masked = GEN8_PIPE_FLIP_DONE |
|
||||
GEN8_PIPE_CDCLK_CRC_DONE |
|
||||
GEN8_PIPE_FIFO_UNDERRUN |
|
||||
GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
|
||||
uint32_t de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK;
|
||||
int pipe;
|
||||
dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
|
||||
dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
|
||||
dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
|
||||
|
||||
for_each_pipe(pipe) {
|
||||
u32 tmp = I915_READ(GEN8_DE_PIPE_IIR(pipe));
|
||||
if (tmp)
|
||||
DRM_ERROR("Interrupt (%d) should have been masked in pre-install 0x%08x\n",
|
||||
pipe, tmp);
|
||||
I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
|
||||
I915_WRITE(GEN8_DE_PIPE_IER(pipe), de_pipe_enables);
|
||||
}
|
||||
POSTING_READ(GEN8_DE_PIPE_ISR(0));
|
||||
|
||||
I915_WRITE(GEN8_DE_PORT_IMR, ~GEN8_AUX_CHANNEL_A);
|
||||
I915_WRITE(GEN8_DE_PORT_IER, GEN8_AUX_CHANNEL_A);
|
||||
POSTING_READ(GEN8_DE_PORT_IER);
|
||||
}
|
||||
|
||||
static int gen8_irq_postinstall(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
gen8_gt_irq_postinstall(dev_priv);
|
||||
gen8_de_irq_postinstall(dev_priv);
|
||||
|
||||
ibx_irq_postinstall(dev);
|
||||
|
||||
I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
|
||||
POSTING_READ(GEN8_MASTER_IRQ);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void gen8_irq_uninstall(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int pipe;
|
||||
|
||||
if (!dev_priv)
|
||||
return;
|
||||
|
||||
atomic_set(&dev_priv->irq_received, 0);
|
||||
|
||||
I915_WRITE(GEN8_MASTER_IRQ, 0);
|
||||
|
||||
#define GEN8_IRQ_FINI_NDX(type, which) do { \
|
||||
I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
|
||||
I915_WRITE(GEN8_##type##_IER(which), 0); \
|
||||
I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
|
||||
} while (0)
|
||||
|
||||
#define GEN8_IRQ_FINI(type) do { \
|
||||
I915_WRITE(GEN8_##type##_IMR, 0xffffffff); \
|
||||
I915_WRITE(GEN8_##type##_IER, 0); \
|
||||
I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
|
||||
} while (0)
|
||||
|
||||
GEN8_IRQ_FINI_NDX(GT, 0);
|
||||
GEN8_IRQ_FINI_NDX(GT, 1);
|
||||
GEN8_IRQ_FINI_NDX(GT, 2);
|
||||
GEN8_IRQ_FINI_NDX(GT, 3);
|
||||
|
||||
for_each_pipe(pipe) {
|
||||
GEN8_IRQ_FINI_NDX(DE_PIPE, pipe);
|
||||
}
|
||||
|
||||
GEN8_IRQ_FINI(DE_PORT);
|
||||
GEN8_IRQ_FINI(DE_MISC);
|
||||
GEN8_IRQ_FINI(PCU);
|
||||
#undef GEN8_IRQ_FINI
|
||||
#undef GEN8_IRQ_FINI_NDX
|
||||
|
||||
POSTING_READ(GEN8_PCU_IIR);
|
||||
}
|
||||
|
||||
static void valleyview_irq_uninstall(struct drm_device *dev)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
|
||||
@ -3409,6 +3808,14 @@ void intel_irq_init(struct drm_device *dev)
|
||||
dev->driver->enable_vblank = valleyview_enable_vblank;
|
||||
dev->driver->disable_vblank = valleyview_disable_vblank;
|
||||
dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
|
||||
} else if (IS_GEN8(dev)) {
|
||||
dev->driver->irq_handler = gen8_irq_handler;
|
||||
dev->driver->irq_preinstall = gen8_irq_preinstall;
|
||||
dev->driver->irq_postinstall = gen8_irq_postinstall;
|
||||
dev->driver->irq_uninstall = gen8_irq_uninstall;
|
||||
dev->driver->enable_vblank = gen8_enable_vblank;
|
||||
dev->driver->disable_vblank = gen8_disable_vblank;
|
||||
dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
|
||||
} else if (HAS_PCH_SPLIT(dev)) {
|
||||
dev->driver->irq_handler = ironlake_irq_handler;
|
||||
dev->driver->irq_preinstall = ironlake_irq_preinstall;
|
||||
|
@ -110,6 +110,9 @@
|
||||
#define RING_PP_DIR_DCLV(ring) ((ring)->mmio_base+0x220)
|
||||
#define PP_DIR_DCLV_2G 0xffffffff
|
||||
|
||||
#define GEN8_RING_PDP_UDW(ring, n) ((ring)->mmio_base+0x270 + ((n) * 8 + 4))
|
||||
#define GEN8_RING_PDP_LDW(ring, n) ((ring)->mmio_base+0x270 + (n) * 8)
|
||||
|
||||
#define GAM_ECOCHK 0x4090
|
||||
#define ECOCHK_SNB_BIT (1<<10)
|
||||
#define HSW_ECOCHK_ARB_PRIO_SOL (1<<6)
|
||||
@ -247,6 +250,7 @@
|
||||
#define MI_BATCH_NON_SECURE_HSW (1<<13)
|
||||
#define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0)
|
||||
#define MI_BATCH_GTT (2<<6) /* aliased with (1<<7) on gen4 */
|
||||
#define MI_BATCH_BUFFER_START_GEN8 MI_INSTR(0x31, 1)
|
||||
#define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6+ */
|
||||
#define MI_SEMAPHORE_GLOBAL_GTT (1<<22)
|
||||
#define MI_SEMAPHORE_UPDATE (1<<21)
|
||||
@ -657,6 +661,9 @@
|
||||
#define ARB_MODE 0x04030
|
||||
#define ARB_MODE_SWIZZLE_SNB (1<<4)
|
||||
#define ARB_MODE_SWIZZLE_IVB (1<<5)
|
||||
#define GAMTARBMODE 0x04a08
|
||||
#define ARB_MODE_BWGTLB_DISABLE (1<<9)
|
||||
#define ARB_MODE_SWIZZLE_BDW (1<<1)
|
||||
#define RENDER_HWS_PGA_GEN7 (0x04080)
|
||||
#define RING_FAULT_REG(ring) (0x4094 + 0x100*(ring)->id)
|
||||
#define RING_FAULT_GTTSEL_MASK (1<<11)
|
||||
@ -664,6 +671,7 @@
|
||||
#define RING_FAULT_FAULT_TYPE(x) ((x >> 1) & 0x3)
|
||||
#define RING_FAULT_VALID (1<<0)
|
||||
#define DONE_REG 0x40b0
|
||||
#define GEN8_PRIVATE_PAT 0x40e0
|
||||
#define BSD_HWS_PGA_GEN7 (0x04180)
|
||||
#define BLT_HWS_PGA_GEN7 (0x04280)
|
||||
#define VEBOX_HWS_PGA_GEN7 (0x04380)
|
||||
@ -743,6 +751,7 @@
|
||||
#define FPGA_DBG_RM_NOCLAIM (1<<31)
|
||||
|
||||
#define DERRMR 0x44050
|
||||
/* Note that HBLANK events are reserved on bdw+ */
|
||||
#define DERRMR_PIPEA_SCANLINE (1<<0)
|
||||
#define DERRMR_PIPEA_PRI_FLIP_DONE (1<<1)
|
||||
#define DERRMR_PIPEA_SPR_FLIP_DONE (1<<2)
|
||||
@ -776,6 +785,7 @@
|
||||
#define _3D_CHICKEN3 0x02090
|
||||
#define _3D_CHICKEN_SF_DISABLE_OBJEND_CULL (1 << 10)
|
||||
#define _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL (1 << 5)
|
||||
#define _3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(x) ((x)<<1)
|
||||
|
||||
#define MI_MODE 0x0209c
|
||||
# define VS_TIMER_DISPATCH (1 << 6)
|
||||
@ -1822,6 +1832,9 @@
|
||||
* on HSW) - so the final size is 66944 bytes, which rounds to 17 pages.
|
||||
*/
|
||||
#define HSW_CXT_TOTAL_SIZE (17 * PAGE_SIZE)
|
||||
/* Same as Haswell, but 72064 bytes now. */
|
||||
#define GEN8_CXT_TOTAL_SIZE (18 * PAGE_SIZE)
|
||||
|
||||
|
||||
#define VLV_CLK_CTL2 0x101104
|
||||
#define CLK_CTL2_CZCOUNT_30NS_SHIFT 28
|
||||
@ -1952,8 +1965,8 @@
|
||||
#define BCLRPAT(pipe) _PIPE(pipe, _BCLRPAT_A, _BCLRPAT_B)
|
||||
#define VSYNCSHIFT(trans) _TRANSCODER(trans, _VSYNCSHIFT_A, _VSYNCSHIFT_B)
|
||||
|
||||
/* HSW eDP PSR registers */
|
||||
#define EDP_PSR_BASE(dev) 0x64800
|
||||
/* HSW+ eDP PSR registers */
|
||||
#define EDP_PSR_BASE(dev) (IS_HASWELL(dev) ? 0x64800 : 0x6f800)
|
||||
#define EDP_PSR_CTL(dev) (EDP_PSR_BASE(dev) + 0)
|
||||
#define EDP_PSR_ENABLE (1<<31)
|
||||
#define EDP_PSR_LINK_DISABLE (0<<27)
|
||||
@ -2397,6 +2410,21 @@
|
||||
|
||||
#define PFIT_AUTO_RATIOS (dev_priv->info->display_mmio_offset + 0x61238)
|
||||
|
||||
#define _VLV_BLC_PWM_CTL2_A (dev_priv->info->display_mmio_offset + 0x61250)
|
||||
#define _VLV_BLC_PWM_CTL2_B (dev_priv->info->display_mmio_offset + 0x61350)
|
||||
#define VLV_BLC_PWM_CTL2(pipe) _PIPE(pipe, _VLV_BLC_PWM_CTL2_A, \
|
||||
_VLV_BLC_PWM_CTL2_B)
|
||||
|
||||
#define _VLV_BLC_PWM_CTL_A (dev_priv->info->display_mmio_offset + 0x61254)
|
||||
#define _VLV_BLC_PWM_CTL_B (dev_priv->info->display_mmio_offset + 0x61354)
|
||||
#define VLV_BLC_PWM_CTL(pipe) _PIPE(pipe, _VLV_BLC_PWM_CTL_A, \
|
||||
_VLV_BLC_PWM_CTL_B)
|
||||
|
||||
#define _VLV_BLC_HIST_CTL_A (dev_priv->info->display_mmio_offset + 0x61260)
|
||||
#define _VLV_BLC_HIST_CTL_B (dev_priv->info->display_mmio_offset + 0x61360)
|
||||
#define VLV_BLC_HIST_CTL(pipe) _PIPE(pipe, _VLV_BLC_HIST_CTL_A, \
|
||||
_VLV_BLC_HIST_CTL_B)
|
||||
|
||||
/* Backlight control */
|
||||
#define BLC_PWM_CTL2 (dev_priv->info->display_mmio_offset + 0x61250) /* 965+ only */
|
||||
#define BLM_PWM_ENABLE (1 << 31)
|
||||
@ -3228,6 +3256,18 @@
|
||||
#define PIPEFRAMEPIXEL(pipe) _PIPE(pipe, _PIPEAFRAMEPIXEL, _PIPEBFRAMEPIXEL)
|
||||
#define PIPESTAT(pipe) _PIPE(pipe, _PIPEASTAT, _PIPEBSTAT)
|
||||
|
||||
#define _PIPE_MISC_A 0x70030
|
||||
#define _PIPE_MISC_B 0x71030
|
||||
#define PIPEMISC_DITHER_BPC_MASK (7<<5)
|
||||
#define PIPEMISC_DITHER_8_BPC (0<<5)
|
||||
#define PIPEMISC_DITHER_10_BPC (1<<5)
|
||||
#define PIPEMISC_DITHER_6_BPC (2<<5)
|
||||
#define PIPEMISC_DITHER_12_BPC (3<<5)
|
||||
#define PIPEMISC_DITHER_ENABLE (1<<4)
|
||||
#define PIPEMISC_DITHER_TYPE_MASK (3<<2)
|
||||
#define PIPEMISC_DITHER_TYPE_SP (0<<2)
|
||||
#define PIPEMISC(pipe) _PIPE(pipe, _PIPE_MISC_A, _PIPE_MISC_B)
|
||||
|
||||
#define VLV_DPFLIPSTAT (VLV_DISPLAY_BASE + 0x70028)
|
||||
#define PIPEB_LINE_COMPARE_INT_EN (1<<29)
|
||||
#define PIPEB_HLINE_INT_EN (1<<28)
|
||||
@ -3358,6 +3398,7 @@
|
||||
#define WM1_LP_LATENCY_MASK (0x7f<<24)
|
||||
#define WM1_LP_FBC_MASK (0xf<<20)
|
||||
#define WM1_LP_FBC_SHIFT 20
|
||||
#define WM1_LP_FBC_SHIFT_BDW 19
|
||||
#define WM1_LP_SR_MASK (0x7ff<<8)
|
||||
#define WM1_LP_SR_SHIFT 8
|
||||
#define WM1_LP_CURSOR_MASK (0xff)
|
||||
@ -3998,6 +4039,71 @@
|
||||
#define GTIIR 0x44018
|
||||
#define GTIER 0x4401c
|
||||
|
||||
#define GEN8_MASTER_IRQ 0x44200
|
||||
#define GEN8_MASTER_IRQ_CONTROL (1<<31)
|
||||
#define GEN8_PCU_IRQ (1<<30)
|
||||
#define GEN8_DE_PCH_IRQ (1<<23)
|
||||
#define GEN8_DE_MISC_IRQ (1<<22)
|
||||
#define GEN8_DE_PORT_IRQ (1<<20)
|
||||
#define GEN8_DE_PIPE_C_IRQ (1<<18)
|
||||
#define GEN8_DE_PIPE_B_IRQ (1<<17)
|
||||
#define GEN8_DE_PIPE_A_IRQ (1<<16)
|
||||
#define GEN8_DE_PIPE_IRQ(pipe) (1<<(16+pipe))
|
||||
#define GEN8_GT_VECS_IRQ (1<<6)
|
||||
#define GEN8_GT_VCS2_IRQ (1<<3)
|
||||
#define GEN8_GT_VCS1_IRQ (1<<2)
|
||||
#define GEN8_GT_BCS_IRQ (1<<1)
|
||||
#define GEN8_GT_RCS_IRQ (1<<0)
|
||||
|
||||
#define GEN8_GT_ISR(which) (0x44300 + (0x10 * (which)))
|
||||
#define GEN8_GT_IMR(which) (0x44304 + (0x10 * (which)))
|
||||
#define GEN8_GT_IIR(which) (0x44308 + (0x10 * (which)))
|
||||
#define GEN8_GT_IER(which) (0x4430c + (0x10 * (which)))
|
||||
|
||||
#define GEN8_BCS_IRQ_SHIFT 16
|
||||
#define GEN8_RCS_IRQ_SHIFT 0
|
||||
#define GEN8_VCS2_IRQ_SHIFT 16
|
||||
#define GEN8_VCS1_IRQ_SHIFT 0
|
||||
#define GEN8_VECS_IRQ_SHIFT 0
|
||||
|
||||
#define GEN8_DE_PIPE_ISR(pipe) (0x44400 + (0x10 * (pipe)))
|
||||
#define GEN8_DE_PIPE_IMR(pipe) (0x44404 + (0x10 * (pipe)))
|
||||
#define GEN8_DE_PIPE_IIR(pipe) (0x44408 + (0x10 * (pipe)))
|
||||
#define GEN8_DE_PIPE_IER(pipe) (0x4440c + (0x10 * (pipe)))
|
||||
#define GEN8_PIPE_FIFO_UNDERRUN (1 << 31)
|
||||
#define GEN8_PIPE_CDCLK_CRC_ERROR (1 << 29)
|
||||
#define GEN8_PIPE_CDCLK_CRC_DONE (1 << 28)
|
||||
#define GEN8_PIPE_CURSOR_FAULT (1 << 10)
|
||||
#define GEN8_PIPE_SPRITE_FAULT (1 << 9)
|
||||
#define GEN8_PIPE_PRIMARY_FAULT (1 << 8)
|
||||
#define GEN8_PIPE_SPRITE_FLIP_DONE (1 << 5)
|
||||
#define GEN8_PIPE_FLIP_DONE (1 << 4)
|
||||
#define GEN8_PIPE_SCAN_LINE_EVENT (1 << 2)
|
||||
#define GEN8_PIPE_VSYNC (1 << 1)
|
||||
#define GEN8_PIPE_VBLANK (1 << 0)
|
||||
#define GEN8_DE_PIPE_IRQ_FAULT_ERRORS \
|
||||
(GEN8_PIPE_CURSOR_FAULT | \
|
||||
GEN8_PIPE_SPRITE_FAULT | \
|
||||
GEN8_PIPE_PRIMARY_FAULT)
|
||||
|
||||
#define GEN8_DE_PORT_ISR 0x44440
|
||||
#define GEN8_DE_PORT_IMR 0x44444
|
||||
#define GEN8_DE_PORT_IIR 0x44448
|
||||
#define GEN8_DE_PORT_IER 0x4444c
|
||||
#define GEN8_PORT_DP_A_HOTPLUG (1 << 3)
|
||||
#define GEN8_AUX_CHANNEL_A (1 << 0)
|
||||
|
||||
#define GEN8_DE_MISC_ISR 0x44460
|
||||
#define GEN8_DE_MISC_IMR 0x44464
|
||||
#define GEN8_DE_MISC_IIR 0x44468
|
||||
#define GEN8_DE_MISC_IER 0x4446c
|
||||
#define GEN8_DE_MISC_GSE (1 << 27)
|
||||
|
||||
#define GEN8_PCU_ISR 0x444e0
|
||||
#define GEN8_PCU_IMR 0x444e4
|
||||
#define GEN8_PCU_IIR 0x444e8
|
||||
#define GEN8_PCU_IER 0x444ec
|
||||
|
||||
#define ILK_DISPLAY_CHICKEN2 0x42004
|
||||
/* Required on all Ironlake and Sandybridge according to the B-Spec. */
|
||||
#define ILK_ELPIN_409_SELECT (1 << 25)
|
||||
@ -4023,8 +4129,14 @@
|
||||
# define CHICKEN3_DGMG_DONE_FIX_DISABLE (1 << 2)
|
||||
|
||||
#define CHICKEN_PAR1_1 0x42080
|
||||
#define DPA_MASK_VBLANK_SRD (1 << 15)
|
||||
#define FORCE_ARB_IDLE_PLANES (1 << 14)
|
||||
|
||||
#define _CHICKEN_PIPESL_1_A 0x420b0
|
||||
#define _CHICKEN_PIPESL_1_B 0x420b4
|
||||
#define DPRS_MASK_VBLANK_SRD (1 << 0)
|
||||
#define CHICKEN_PIPESL_1(pipe) _PIPE(pipe, _CHICKEN_PIPESL_1_A, _CHICKEN_PIPESL_1_B)
|
||||
|
||||
#define DISP_ARB_CTL 0x45000
|
||||
#define DISP_TILE_SURFACE_SWIZZLING (1<<13)
|
||||
#define DISP_FBC_WM_DIS (1<<15)
|
||||
@ -4035,6 +4147,8 @@
|
||||
/* GEN7 chicken */
|
||||
#define GEN7_COMMON_SLICE_CHICKEN1 0x7010
|
||||
# define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC ((1<<10) | (1<<26))
|
||||
#define COMMON_SLICE_CHICKEN2 0x7014
|
||||
# define GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE (1<<0)
|
||||
|
||||
#define GEN7_L3CNTLREG1 0xB01C
|
||||
#define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C4FFF8C
|
||||
@ -4863,6 +4977,7 @@
|
||||
#define GEN6_PCODE_WRITE_D_COMP 0x11
|
||||
#define GEN6_ENCODE_RC6_VID(mv) (((mv) - 245) / 5)
|
||||
#define GEN6_DECODE_RC6_VID(vids) (((vids) * 5) + 245)
|
||||
#define DISPLAY_IPS_CONTROL 0x19
|
||||
#define GEN6_PCODE_DATA 0x138128
|
||||
#define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8
|
||||
#define GEN6_PCODE_FREQ_RING_RATIO_SHIFT 16
|
||||
@ -4900,6 +5015,7 @@
|
||||
#define GEN7_HALF_SLICE_CHICKEN1 0xe100 /* IVB GT1 + VLV */
|
||||
#define GEN7_HALF_SLICE_CHICKEN1_GT2 0xf100
|
||||
#define GEN7_MAX_PS_THREAD_DEP (8<<12)
|
||||
#define GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE (1<<10)
|
||||
#define GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE (1<<3)
|
||||
|
||||
#define GEN7_ROW_CHICKEN2 0xe4f4
|
||||
@ -4909,6 +5025,10 @@
|
||||
#define HSW_ROW_CHICKEN3 0xe49c
|
||||
#define HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE (1 << 6)
|
||||
|
||||
#define HALF_SLICE_CHICKEN3 0xe184
|
||||
#define GEN8_CENTROID_PIXEL_OPT_DIS (1<<8)
|
||||
#define GEN8_SAMPLER_POWER_BYPASS_DIS (1<<1)
|
||||
|
||||
#define G4X_AUD_VID_DID (dev_priv->info->display_mmio_offset + 0x62020)
|
||||
#define INTEL_AUDIO_DEVCL 0x808629FB
|
||||
#define INTEL_AUDIO_DEVBLC 0x80862801
|
||||
@ -4950,6 +5070,18 @@
|
||||
CPT_AUD_CNTL_ST_B)
|
||||
#define CPT_AUD_CNTRL_ST2 0xE50C0
|
||||
|
||||
#define VLV_HDMIW_HDMIEDID_A (VLV_DISPLAY_BASE + 0x62050)
|
||||
#define VLV_HDMIW_HDMIEDID_B (VLV_DISPLAY_BASE + 0x62150)
|
||||
#define VLV_HDMIW_HDMIEDID(pipe) _PIPE(pipe, \
|
||||
VLV_HDMIW_HDMIEDID_A, \
|
||||
VLV_HDMIW_HDMIEDID_B)
|
||||
#define VLV_AUD_CNTL_ST_A (VLV_DISPLAY_BASE + 0x620B4)
|
||||
#define VLV_AUD_CNTL_ST_B (VLV_DISPLAY_BASE + 0x621B4)
|
||||
#define VLV_AUD_CNTL_ST(pipe) _PIPE(pipe, \
|
||||
VLV_AUD_CNTL_ST_A, \
|
||||
VLV_AUD_CNTL_ST_B)
|
||||
#define VLV_AUD_CNTL_ST2 (VLV_DISPLAY_BASE + 0x620C0)
|
||||
|
||||
/* These are the 4 32-bit write offset registers for each stream
|
||||
* output buffer. It determines the offset from the
|
||||
* 3DSTATE_SO_BUFFERs that the next streamed vertex output goes to.
|
||||
@ -4966,6 +5098,12 @@
|
||||
#define CPT_AUD_CFG(pipe) _PIPE(pipe, \
|
||||
CPT_AUD_CONFIG_A, \
|
||||
CPT_AUD_CONFIG_B)
|
||||
#define VLV_AUD_CONFIG_A (VLV_DISPLAY_BASE + 0x62000)
|
||||
#define VLV_AUD_CONFIG_B (VLV_DISPLAY_BASE + 0x62100)
|
||||
#define VLV_AUD_CFG(pipe) _PIPE(pipe, \
|
||||
VLV_AUD_CONFIG_A, \
|
||||
VLV_AUD_CONFIG_B)
|
||||
|
||||
#define AUD_CONFIG_N_VALUE_INDEX (1 << 29)
|
||||
#define AUD_CONFIG_N_PROG_ENABLE (1 << 28)
|
||||
#define AUD_CONFIG_UPPER_N_SHIFT 20
|
||||
@ -5108,6 +5246,7 @@
|
||||
#define DDI_BUF_CTL_B 0x64100
|
||||
#define DDI_BUF_CTL(port) _PORT(port, DDI_BUF_CTL_A, DDI_BUF_CTL_B)
|
||||
#define DDI_BUF_CTL_ENABLE (1<<31)
|
||||
/* Haswell */
|
||||
#define DDI_BUF_EMP_400MV_0DB_HSW (0<<24) /* Sel0 */
|
||||
#define DDI_BUF_EMP_400MV_3_5DB_HSW (1<<24) /* Sel1 */
|
||||
#define DDI_BUF_EMP_400MV_6DB_HSW (2<<24) /* Sel2 */
|
||||
@ -5117,6 +5256,16 @@
|
||||
#define DDI_BUF_EMP_600MV_6DB_HSW (6<<24) /* Sel6 */
|
||||
#define DDI_BUF_EMP_800MV_0DB_HSW (7<<24) /* Sel7 */
|
||||
#define DDI_BUF_EMP_800MV_3_5DB_HSW (8<<24) /* Sel8 */
|
||||
/* Broadwell */
|
||||
#define DDI_BUF_EMP_400MV_0DB_BDW (0<<24) /* Sel0 */
|
||||
#define DDI_BUF_EMP_400MV_3_5DB_BDW (1<<24) /* Sel1 */
|
||||
#define DDI_BUF_EMP_400MV_6DB_BDW (2<<24) /* Sel2 */
|
||||
#define DDI_BUF_EMP_600MV_0DB_BDW (3<<24) /* Sel3 */
|
||||
#define DDI_BUF_EMP_600MV_3_5DB_BDW (4<<24) /* Sel4 */
|
||||
#define DDI_BUF_EMP_600MV_6DB_BDW (5<<24) /* Sel5 */
|
||||
#define DDI_BUF_EMP_800MV_0DB_BDW (6<<24) /* Sel6 */
|
||||
#define DDI_BUF_EMP_800MV_3_5DB_BDW (7<<24) /* Sel7 */
|
||||
#define DDI_BUF_EMP_1200MV_0DB_BDW (8<<24) /* Sel8 */
|
||||
#define DDI_BUF_EMP_MASK (0xf<<24)
|
||||
#define DDI_BUF_PORT_REVERSAL (1<<16)
|
||||
#define DDI_BUF_IS_IDLE (1<<7)
|
||||
@ -5226,6 +5375,9 @@
|
||||
#define LCPLL_PLL_LOCK (1<<30)
|
||||
#define LCPLL_CLK_FREQ_MASK (3<<26)
|
||||
#define LCPLL_CLK_FREQ_450 (0<<26)
|
||||
#define LCPLL_CLK_FREQ_54O_BDW (1<<26)
|
||||
#define LCPLL_CLK_FREQ_337_5_BDW (2<<26)
|
||||
#define LCPLL_CLK_FREQ_675_BDW (3<<26)
|
||||
#define LCPLL_CD_CLOCK_DISABLE (1<<25)
|
||||
#define LCPLL_CD2X_CLOCK_DISABLE (1<<23)
|
||||
#define LCPLL_POWER_DOWN_ALLOW (1<<22)
|
||||
|
@ -192,7 +192,6 @@ static void i915_restore_vga(struct drm_device *dev)
|
||||
static void i915_save_display(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
unsigned long flags;
|
||||
|
||||
/* Display arbitration control */
|
||||
if (INTEL_INFO(dev)->gen <= 4)
|
||||
@ -203,30 +202,27 @@ static void i915_save_display(struct drm_device *dev)
|
||||
if (!drm_core_check_feature(dev, DRIVER_MODESET))
|
||||
i915_save_display_reg(dev);
|
||||
|
||||
spin_lock_irqsave(&dev_priv->backlight.lock, flags);
|
||||
|
||||
/* LVDS state */
|
||||
if (HAS_PCH_SPLIT(dev)) {
|
||||
dev_priv->regfile.savePP_CONTROL = I915_READ(PCH_PP_CONTROL);
|
||||
dev_priv->regfile.saveBLC_PWM_CTL = I915_READ(BLC_PWM_PCH_CTL1);
|
||||
dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_PCH_CTL2);
|
||||
dev_priv->regfile.saveBLC_CPU_PWM_CTL = I915_READ(BLC_PWM_CPU_CTL);
|
||||
dev_priv->regfile.saveBLC_CPU_PWM_CTL2 = I915_READ(BLC_PWM_CPU_CTL2);
|
||||
if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
|
||||
dev_priv->regfile.saveLVDS = I915_READ(PCH_LVDS);
|
||||
} else if (IS_VALLEYVIEW(dev)) {
|
||||
dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL);
|
||||
dev_priv->regfile.savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
|
||||
|
||||
dev_priv->regfile.saveBLC_HIST_CTL =
|
||||
I915_READ(VLV_BLC_HIST_CTL(PIPE_A));
|
||||
dev_priv->regfile.saveBLC_HIST_CTL_B =
|
||||
I915_READ(VLV_BLC_HIST_CTL(PIPE_B));
|
||||
} else {
|
||||
dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL);
|
||||
dev_priv->regfile.savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
|
||||
dev_priv->regfile.saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
|
||||
dev_priv->regfile.saveBLC_HIST_CTL = I915_READ(BLC_HIST_CTL);
|
||||
if (INTEL_INFO(dev)->gen >= 4)
|
||||
dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
|
||||
if (IS_MOBILE(dev) && !IS_I830(dev))
|
||||
dev_priv->regfile.saveLVDS = I915_READ(LVDS);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
|
||||
|
||||
if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev))
|
||||
dev_priv->regfile.savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
|
||||
|
||||
@ -262,7 +258,6 @@ static void i915_restore_display(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 mask = 0xffffffff;
|
||||
unsigned long flags;
|
||||
|
||||
/* Display arbitration */
|
||||
if (INTEL_INFO(dev)->gen <= 4)
|
||||
@ -271,12 +266,6 @@ static void i915_restore_display(struct drm_device *dev)
|
||||
if (!drm_core_check_feature(dev, DRIVER_MODESET))
|
||||
i915_restore_display_reg(dev);
|
||||
|
||||
spin_lock_irqsave(&dev_priv->backlight.lock, flags);
|
||||
|
||||
/* LVDS state */
|
||||
if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
|
||||
I915_WRITE(BLC_PWM_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2);
|
||||
|
||||
if (drm_core_check_feature(dev, DRIVER_MODESET))
|
||||
mask = ~LVDS_PORT_EN;
|
||||
|
||||
@ -289,22 +278,19 @@ static void i915_restore_display(struct drm_device *dev)
|
||||
I915_WRITE(PFIT_CONTROL, dev_priv->regfile.savePFIT_CONTROL);
|
||||
|
||||
if (HAS_PCH_SPLIT(dev)) {
|
||||
I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->regfile.saveBLC_PWM_CTL);
|
||||
I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2);
|
||||
/* NOTE: BLC_PWM_CPU_CTL must be written after BLC_PWM_CPU_CTL2;
|
||||
* otherwise we get blank eDP screen after S3 on some machines
|
||||
*/
|
||||
I915_WRITE(BLC_PWM_CPU_CTL2, dev_priv->regfile.saveBLC_CPU_PWM_CTL2);
|
||||
I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->regfile.saveBLC_CPU_PWM_CTL);
|
||||
I915_WRITE(PCH_PP_ON_DELAYS, dev_priv->regfile.savePP_ON_DELAYS);
|
||||
I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS);
|
||||
I915_WRITE(PCH_PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR);
|
||||
I915_WRITE(PCH_PP_CONTROL, dev_priv->regfile.savePP_CONTROL);
|
||||
I915_WRITE(RSTDBYCTL,
|
||||
dev_priv->regfile.saveMCHBAR_RENDER_STANDBY);
|
||||
} else if (IS_VALLEYVIEW(dev)) {
|
||||
I915_WRITE(VLV_BLC_HIST_CTL(PIPE_A),
|
||||
dev_priv->regfile.saveBLC_HIST_CTL);
|
||||
I915_WRITE(VLV_BLC_HIST_CTL(PIPE_B),
|
||||
dev_priv->regfile.saveBLC_HIST_CTL);
|
||||
} else {
|
||||
I915_WRITE(PFIT_PGM_RATIOS, dev_priv->regfile.savePFIT_PGM_RATIOS);
|
||||
I915_WRITE(BLC_PWM_CTL, dev_priv->regfile.saveBLC_PWM_CTL);
|
||||
I915_WRITE(BLC_HIST_CTL, dev_priv->regfile.saveBLC_HIST_CTL);
|
||||
I915_WRITE(PP_ON_DELAYS, dev_priv->regfile.savePP_ON_DELAYS);
|
||||
I915_WRITE(PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS);
|
||||
@ -312,8 +298,6 @@ static void i915_restore_display(struct drm_device *dev)
|
||||
I915_WRITE(PP_CONTROL, dev_priv->regfile.savePP_CONTROL);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
|
||||
|
||||
/* only restore FBC info on the platform that supports FBC*/
|
||||
intel_disable_fbc(dev);
|
||||
if (I915_HAS_FBC(dev)) {
|
||||
|
@ -32,6 +32,8 @@
|
||||
#include "intel_drv.h"
|
||||
#include "i915_drv.h"
|
||||
|
||||
#define dev_to_drm_minor(d) dev_get_drvdata((d))
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
static u32 calc_residency(struct drm_device *dev, const u32 reg)
|
||||
{
|
||||
@ -66,14 +68,14 @@ static u32 calc_residency(struct drm_device *dev, const u32 reg)
|
||||
static ssize_t
|
||||
show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
|
||||
struct drm_minor *dminor = dev_to_drm_minor(kdev);
|
||||
return snprintf(buf, PAGE_SIZE, "%x\n", intel_enable_rc6(dminor->dev));
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
show_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
|
||||
struct drm_minor *dminor = dev_get_drvdata(kdev);
|
||||
u32 rc6_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6);
|
||||
return snprintf(buf, PAGE_SIZE, "%u\n", rc6_residency);
|
||||
}
|
||||
@ -81,7 +83,7 @@ show_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf)
|
||||
static ssize_t
|
||||
show_rc6p_ms(struct device *kdev, struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
|
||||
struct drm_minor *dminor = dev_to_drm_minor(kdev);
|
||||
u32 rc6p_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6p);
|
||||
if (IS_VALLEYVIEW(dminor->dev))
|
||||
rc6p_residency = 0;
|
||||
@ -91,7 +93,7 @@ show_rc6p_ms(struct device *kdev, struct device_attribute *attr, char *buf)
|
||||
static ssize_t
|
||||
show_rc6pp_ms(struct device *kdev, struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
|
||||
struct drm_minor *dminor = dev_to_drm_minor(kdev);
|
||||
u32 rc6pp_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6pp);
|
||||
if (IS_VALLEYVIEW(dminor->dev))
|
||||
rc6pp_residency = 0;
|
||||
@ -137,7 +139,7 @@ i915_l3_read(struct file *filp, struct kobject *kobj,
|
||||
loff_t offset, size_t count)
|
||||
{
|
||||
struct device *dev = container_of(kobj, struct device, kobj);
|
||||
struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev);
|
||||
struct drm_minor *dminor = dev_to_drm_minor(dev);
|
||||
struct drm_device *drm_dev = dminor->dev;
|
||||
struct drm_i915_private *dev_priv = drm_dev->dev_private;
|
||||
int slice = (int)(uintptr_t)attr->private;
|
||||
@ -173,7 +175,7 @@ i915_l3_write(struct file *filp, struct kobject *kobj,
|
||||
loff_t offset, size_t count)
|
||||
{
|
||||
struct device *dev = container_of(kobj, struct device, kobj);
|
||||
struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev);
|
||||
struct drm_minor *dminor = dev_to_drm_minor(dev);
|
||||
struct drm_device *drm_dev = dminor->dev;
|
||||
struct drm_i915_private *dev_priv = drm_dev->dev_private;
|
||||
struct i915_hw_context *ctx;
|
||||
@ -246,7 +248,7 @@ static struct bin_attribute dpf_attrs_1 = {
|
||||
static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
|
||||
struct drm_minor *minor = dev_to_drm_minor(kdev);
|
||||
struct drm_device *dev = minor->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int ret;
|
||||
@ -269,7 +271,7 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
|
||||
static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
|
||||
struct drm_minor *minor = dev_to_drm_minor(kdev);
|
||||
struct drm_device *dev = minor->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
@ -279,7 +281,7 @@ static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
|
||||
|
||||
static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
|
||||
struct drm_minor *minor = dev_to_drm_minor(kdev);
|
||||
struct drm_device *dev = minor->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int ret;
|
||||
@ -300,7 +302,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
|
||||
struct device_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
|
||||
struct drm_minor *minor = dev_to_drm_minor(kdev);
|
||||
struct drm_device *dev = minor->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 val, rp_state_cap, hw_max, hw_min, non_oc_max;
|
||||
@ -355,7 +357,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
|
||||
|
||||
static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
|
||||
struct drm_minor *minor = dev_to_drm_minor(kdev);
|
||||
struct drm_device *dev = minor->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int ret;
|
||||
@ -376,7 +378,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
|
||||
struct device_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
|
||||
struct drm_minor *minor = dev_to_drm_minor(kdev);
|
||||
struct drm_device *dev = minor->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 val, rp_state_cap, hw_max, hw_min;
|
||||
@ -437,7 +439,7 @@ static DEVICE_ATTR(gt_RPn_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
|
||||
/* For now we have a static number of RP states */
|
||||
static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
|
||||
struct drm_minor *minor = dev_to_drm_minor(kdev);
|
||||
struct drm_device *dev = minor->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 val, rp_state_cap;
|
||||
@ -485,7 +487,7 @@ static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
|
||||
{
|
||||
|
||||
struct device *kdev = container_of(kobj, struct device, kobj);
|
||||
struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
|
||||
struct drm_minor *minor = dev_to_drm_minor(kdev);
|
||||
struct drm_device *dev = minor->dev;
|
||||
struct i915_error_state_file_priv error_priv;
|
||||
struct drm_i915_error_state_buf error_str;
|
||||
@ -520,7 +522,7 @@ static ssize_t error_state_write(struct file *file, struct kobject *kobj,
|
||||
loff_t off, size_t count)
|
||||
{
|
||||
struct device *kdev = container_of(kobj, struct device, kobj);
|
||||
struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
|
||||
struct drm_minor *minor = dev_to_drm_minor(kdev);
|
||||
struct drm_device *dev = minor->dev;
|
||||
int ret;
|
||||
|
||||
@ -550,19 +552,19 @@ void i915_setup_sysfs(struct drm_device *dev)
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
if (INTEL_INFO(dev)->gen >= 6) {
|
||||
ret = sysfs_merge_group(&dev->primary->kdev.kobj,
|
||||
ret = sysfs_merge_group(&dev->primary->kdev->kobj,
|
||||
&rc6_attr_group);
|
||||
if (ret)
|
||||
DRM_ERROR("RC6 residency sysfs setup failed\n");
|
||||
}
|
||||
#endif
|
||||
if (HAS_L3_DPF(dev)) {
|
||||
ret = device_create_bin_file(&dev->primary->kdev, &dpf_attrs);
|
||||
ret = device_create_bin_file(dev->primary->kdev, &dpf_attrs);
|
||||
if (ret)
|
||||
DRM_ERROR("l3 parity sysfs setup failed\n");
|
||||
|
||||
if (NUM_L3_SLICES(dev) > 1) {
|
||||
ret = device_create_bin_file(&dev->primary->kdev,
|
||||
ret = device_create_bin_file(dev->primary->kdev,
|
||||
&dpf_attrs_1);
|
||||
if (ret)
|
||||
DRM_ERROR("l3 parity slice 1 setup failed\n");
|
||||
@ -571,13 +573,13 @@ void i915_setup_sysfs(struct drm_device *dev)
|
||||
|
||||
ret = 0;
|
||||
if (IS_VALLEYVIEW(dev))
|
||||
ret = sysfs_create_files(&dev->primary->kdev.kobj, vlv_attrs);
|
||||
ret = sysfs_create_files(&dev->primary->kdev->kobj, vlv_attrs);
|
||||
else if (INTEL_INFO(dev)->gen >= 6)
|
||||
ret = sysfs_create_files(&dev->primary->kdev.kobj, gen6_attrs);
|
||||
ret = sysfs_create_files(&dev->primary->kdev->kobj, gen6_attrs);
|
||||
if (ret)
|
||||
DRM_ERROR("RPS sysfs setup failed\n");
|
||||
|
||||
ret = sysfs_create_bin_file(&dev->primary->kdev.kobj,
|
||||
ret = sysfs_create_bin_file(&dev->primary->kdev->kobj,
|
||||
&error_state_attr);
|
||||
if (ret)
|
||||
DRM_ERROR("error_state sysfs setup failed\n");
|
||||
@ -585,14 +587,14 @@ void i915_setup_sysfs(struct drm_device *dev)
|
||||
|
||||
void i915_teardown_sysfs(struct drm_device *dev)
|
||||
{
|
||||
sysfs_remove_bin_file(&dev->primary->kdev.kobj, &error_state_attr);
|
||||
sysfs_remove_bin_file(&dev->primary->kdev->kobj, &error_state_attr);
|
||||
if (IS_VALLEYVIEW(dev))
|
||||
sysfs_remove_files(&dev->primary->kdev.kobj, vlv_attrs);
|
||||
sysfs_remove_files(&dev->primary->kdev->kobj, vlv_attrs);
|
||||
else
|
||||
sysfs_remove_files(&dev->primary->kdev.kobj, gen6_attrs);
|
||||
device_remove_bin_file(&dev->primary->kdev, &dpf_attrs_1);
|
||||
device_remove_bin_file(&dev->primary->kdev, &dpf_attrs);
|
||||
sysfs_remove_files(&dev->primary->kdev->kobj, gen6_attrs);
|
||||
device_remove_bin_file(dev->primary->kdev, &dpf_attrs_1);
|
||||
device_remove_bin_file(dev->primary->kdev, &dpf_attrs);
|
||||
#ifdef CONFIG_PM
|
||||
sysfs_unmerge_group(&dev->primary->kdev.kobj, &rc6_attr_group);
|
||||
sysfs_unmerge_group(&dev->primary->kdev->kobj, &rc6_attr_group);
|
||||
#endif
|
||||
}
|
||||
|
@ -270,6 +270,18 @@ void i915_save_display_reg(struct drm_device *dev)
|
||||
}
|
||||
/* FIXME: regfile.save TV & SDVO state */
|
||||
|
||||
/* Backlight */
|
||||
if (HAS_PCH_SPLIT(dev)) {
|
||||
dev_priv->regfile.saveBLC_PWM_CTL = I915_READ(BLC_PWM_PCH_CTL1);
|
||||
dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_PCH_CTL2);
|
||||
dev_priv->regfile.saveBLC_CPU_PWM_CTL = I915_READ(BLC_PWM_CPU_CTL);
|
||||
dev_priv->regfile.saveBLC_CPU_PWM_CTL2 = I915_READ(BLC_PWM_CPU_CTL2);
|
||||
} else {
|
||||
dev_priv->regfile.saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
|
||||
if (INTEL_INFO(dev)->gen >= 4)
|
||||
dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
@ -280,6 +292,21 @@ void i915_restore_display_reg(struct drm_device *dev)
|
||||
int dpll_b_reg, fpb0_reg, fpb1_reg;
|
||||
int i;
|
||||
|
||||
/* Backlight */
|
||||
if (HAS_PCH_SPLIT(dev)) {
|
||||
I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->regfile.saveBLC_PWM_CTL);
|
||||
I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2);
|
||||
/* NOTE: BLC_PWM_CPU_CTL must be written after BLC_PWM_CPU_CTL2;
|
||||
* otherwise we get blank eDP screen after S3 on some machines
|
||||
*/
|
||||
I915_WRITE(BLC_PWM_CPU_CTL2, dev_priv->regfile.saveBLC_CPU_PWM_CTL2);
|
||||
I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->regfile.saveBLC_CPU_PWM_CTL);
|
||||
} else {
|
||||
if (INTEL_INFO(dev)->gen >= 4)
|
||||
I915_WRITE(BLC_PWM_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2);
|
||||
I915_WRITE(BLC_PWM_CTL, dev_priv->regfile.saveBLC_PWM_CTL);
|
||||
}
|
||||
|
||||
/* Display port ratios (must be done before clock is set) */
|
||||
if (SUPPORTS_INTEGRATED_DP(dev)) {
|
||||
I915_WRITE(_PIPEA_DATA_M_G4X, dev_priv->regfile.savePIPEA_GMCH_DATA_M);
|
||||
|
@ -624,11 +624,11 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
|
||||
|
||||
aux_channel = child->raw[25];
|
||||
|
||||
is_dvi = child->common.device_type & (1 << 4);
|
||||
is_dp = child->common.device_type & (1 << 2);
|
||||
is_crt = child->common.device_type & (1 << 0);
|
||||
is_hdmi = is_dvi && (child->common.device_type & (1 << 11)) == 0;
|
||||
is_edp = is_dp && (child->common.device_type & (1 << 12));
|
||||
is_dvi = child->common.device_type & DEVICE_TYPE_TMDS_DVI_SIGNALING;
|
||||
is_dp = child->common.device_type & DEVICE_TYPE_DISPLAYPORT_OUTPUT;
|
||||
is_crt = child->common.device_type & DEVICE_TYPE_ANALOG_OUTPUT;
|
||||
is_hdmi = is_dvi && (child->common.device_type & DEVICE_TYPE_NOT_HDMI_OUTPUT) == 0;
|
||||
is_edp = is_dp && (child->common.device_type & DEVICE_TYPE_INTERNAL_CONNECTOR);
|
||||
|
||||
info->supports_dvi = is_dvi;
|
||||
info->supports_hdmi = is_hdmi;
|
||||
|
@ -638,6 +638,40 @@ int intel_parse_bios(struct drm_device *dev);
|
||||
#define DEVICE_TYPE_DP 0x68C6
|
||||
#define DEVICE_TYPE_eDP 0x78C6
|
||||
|
||||
#define DEVICE_TYPE_CLASS_EXTENSION (1 << 15)
|
||||
#define DEVICE_TYPE_POWER_MANAGEMENT (1 << 14)
|
||||
#define DEVICE_TYPE_HOTPLUG_SIGNALING (1 << 13)
|
||||
#define DEVICE_TYPE_INTERNAL_CONNECTOR (1 << 12)
|
||||
#define DEVICE_TYPE_NOT_HDMI_OUTPUT (1 << 11)
|
||||
#define DEVICE_TYPE_MIPI_OUTPUT (1 << 10)
|
||||
#define DEVICE_TYPE_COMPOSITE_OUTPUT (1 << 9)
|
||||
#define DEVICE_TYPE_DUAL_CHANNEL (1 << 8)
|
||||
#define DEVICE_TYPE_HIGH_SPEED_LINK (1 << 6)
|
||||
#define DEVICE_TYPE_LVDS_SINGALING (1 << 5)
|
||||
#define DEVICE_TYPE_TMDS_DVI_SIGNALING (1 << 4)
|
||||
#define DEVICE_TYPE_VIDEO_SIGNALING (1 << 3)
|
||||
#define DEVICE_TYPE_DISPLAYPORT_OUTPUT (1 << 2)
|
||||
#define DEVICE_TYPE_DIGITAL_OUTPUT (1 << 1)
|
||||
#define DEVICE_TYPE_ANALOG_OUTPUT (1 << 0)
|
||||
|
||||
/*
|
||||
* Bits we care about when checking for DEVICE_TYPE_eDP
|
||||
* Depending on the system, the other bits may or may not
|
||||
* be set for eDP outputs.
|
||||
*/
|
||||
#define DEVICE_TYPE_eDP_BITS \
|
||||
(DEVICE_TYPE_INTERNAL_CONNECTOR | \
|
||||
DEVICE_TYPE_NOT_HDMI_OUTPUT | \
|
||||
DEVICE_TYPE_MIPI_OUTPUT | \
|
||||
DEVICE_TYPE_COMPOSITE_OUTPUT | \
|
||||
DEVICE_TYPE_DUAL_CHANNEL | \
|
||||
DEVICE_TYPE_LVDS_SINGALING | \
|
||||
DEVICE_TYPE_TMDS_DVI_SIGNALING | \
|
||||
DEVICE_TYPE_VIDEO_SIGNALING | \
|
||||
DEVICE_TYPE_DISPLAYPORT_OUTPUT | \
|
||||
DEVICE_TYPE_DIGITAL_OUTPUT | \
|
||||
DEVICE_TYPE_ANALOG_OUTPUT)
|
||||
|
||||
/* define the DVO port for HDMI output type */
|
||||
#define DVO_B 1
|
||||
#define DVO_C 2
|
||||
|
@ -822,16 +822,15 @@ void intel_crt_init(struct drm_device *dev)
|
||||
crt->base.mode_set = intel_crt_mode_set;
|
||||
crt->base.disable = intel_disable_crt;
|
||||
crt->base.enable = intel_enable_crt;
|
||||
if (IS_HASWELL(dev))
|
||||
crt->base.get_config = hsw_crt_get_config;
|
||||
else
|
||||
crt->base.get_config = intel_crt_get_config;
|
||||
if (I915_HAS_HOTPLUG(dev))
|
||||
crt->base.hpd_pin = HPD_CRT;
|
||||
if (HAS_DDI(dev))
|
||||
if (HAS_DDI(dev)) {
|
||||
crt->base.get_config = hsw_crt_get_config;
|
||||
crt->base.get_hw_state = intel_ddi_get_hw_state;
|
||||
else
|
||||
} else {
|
||||
crt->base.get_config = intel_crt_get_config;
|
||||
crt->base.get_hw_state = intel_crt_get_hw_state;
|
||||
}
|
||||
intel_connector->get_hw_state = intel_connector_get_hw_state;
|
||||
|
||||
drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
|
||||
|
@ -72,6 +72,45 @@ static const u32 hsw_ddi_translations_hdmi[] = {
|
||||
0x80FFFFFF, 0x00030002, /* 11: 1000 1000 0 */
|
||||
};
|
||||
|
||||
static const u32 bdw_ddi_translations_edp[] = {
|
||||
0x00FFFFFF, 0x00000012, /* DP parameters */
|
||||
0x00EBAFFF, 0x00020011,
|
||||
0x00C71FFF, 0x0006000F,
|
||||
0x00FFFFFF, 0x00020011,
|
||||
0x00DB6FFF, 0x0005000F,
|
||||
0x00BEEFFF, 0x000A000C,
|
||||
0x00FFFFFF, 0x0005000F,
|
||||
0x00DB6FFF, 0x000A000C,
|
||||
0x00FFFFFF, 0x000A000C,
|
||||
0x00FFFFFF, 0x00140006 /* HDMI parameters 800mV 0dB*/
|
||||
};
|
||||
|
||||
static const u32 bdw_ddi_translations_dp[] = {
|
||||
0x00FFFFFF, 0x0007000E, /* DP parameters */
|
||||
0x00D75FFF, 0x000E000A,
|
||||
0x00BEFFFF, 0x00140006,
|
||||
0x00FFFFFF, 0x000E000A,
|
||||
0x00D75FFF, 0x00180004,
|
||||
0x80CB2FFF, 0x001B0002,
|
||||
0x00F7DFFF, 0x00180004,
|
||||
0x80D75FFF, 0x001B0002,
|
||||
0x80FFFFFF, 0x001B0002,
|
||||
0x00FFFFFF, 0x00140006 /* HDMI parameters 800mV 0dB*/
|
||||
};
|
||||
|
||||
static const u32 bdw_ddi_translations_fdi[] = {
|
||||
0x00FFFFFF, 0x0001000E, /* FDI parameters */
|
||||
0x00D75FFF, 0x0004000A,
|
||||
0x00C30FFF, 0x00070006,
|
||||
0x00AAAFFF, 0x000C0000,
|
||||
0x00FFFFFF, 0x0004000A,
|
||||
0x00D75FFF, 0x00090004,
|
||||
0x00C30FFF, 0x000C0000,
|
||||
0x00FFFFFF, 0x00070006,
|
||||
0x00D75FFF, 0x000C0000,
|
||||
0x00FFFFFF, 0x00140006 /* HDMI parameters 800mV 0dB*/
|
||||
};
|
||||
|
||||
enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder)
|
||||
{
|
||||
struct drm_encoder *encoder = &intel_encoder->base;
|
||||
@ -92,8 +131,9 @@ enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder)
|
||||
}
|
||||
}
|
||||
|
||||
/* On Haswell, DDI port buffers must be programmed with correct values
|
||||
* in advance. The buffer values are different for FDI and DP modes,
|
||||
/*
|
||||
* Starting with Haswell, DDI port buffers must be programmed with correct
|
||||
* values in advance. The buffer values are different for FDI and DP modes,
|
||||
* but the HDMI/DVI fields are shared among those. So we program the DDI
|
||||
* in either FDI or DP modes only, as HDMI connections will work with both
|
||||
* of those
|
||||
@ -103,10 +143,47 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port)
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 reg;
|
||||
int i;
|
||||
const u32 *ddi_translations = (port == PORT_E) ?
|
||||
hsw_ddi_translations_fdi :
|
||||
hsw_ddi_translations_dp;
|
||||
int hdmi_level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift;
|
||||
const u32 *ddi_translations_fdi;
|
||||
const u32 *ddi_translations_dp;
|
||||
const u32 *ddi_translations_edp;
|
||||
const u32 *ddi_translations;
|
||||
|
||||
if (IS_BROADWELL(dev)) {
|
||||
ddi_translations_fdi = bdw_ddi_translations_fdi;
|
||||
ddi_translations_dp = bdw_ddi_translations_dp;
|
||||
ddi_translations_edp = bdw_ddi_translations_edp;
|
||||
} else if (IS_HASWELL(dev)) {
|
||||
ddi_translations_fdi = hsw_ddi_translations_fdi;
|
||||
ddi_translations_dp = hsw_ddi_translations_dp;
|
||||
ddi_translations_edp = hsw_ddi_translations_dp;
|
||||
} else {
|
||||
WARN(1, "ddi translation table missing\n");
|
||||
ddi_translations_edp = bdw_ddi_translations_dp;
|
||||
ddi_translations_fdi = bdw_ddi_translations_fdi;
|
||||
ddi_translations_dp = bdw_ddi_translations_dp;
|
||||
}
|
||||
|
||||
switch (port) {
|
||||
case PORT_A:
|
||||
ddi_translations = ddi_translations_edp;
|
||||
break;
|
||||
case PORT_B:
|
||||
case PORT_C:
|
||||
ddi_translations = ddi_translations_dp;
|
||||
break;
|
||||
case PORT_D:
|
||||
if (intel_dpd_is_edp(dev))
|
||||
ddi_translations = ddi_translations_edp;
|
||||
else
|
||||
ddi_translations = ddi_translations_dp;
|
||||
break;
|
||||
case PORT_E:
|
||||
ddi_translations = ddi_translations_fdi;
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
|
||||
for (i = 0, reg = DDI_BUF_TRANS(port);
|
||||
i < ARRAY_SIZE(hsw_ddi_translations_fdi); i++) {
|
||||
@ -756,7 +833,8 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
|
||||
struct drm_encoder *encoder = &intel_encoder->base;
|
||||
struct drm_i915_private *dev_priv = crtc->dev->dev_private;
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
enum pipe pipe = intel_crtc->pipe;
|
||||
enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
|
||||
enum port port = intel_ddi_get_encoder_port(intel_encoder);
|
||||
@ -792,10 +870,11 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
|
||||
if (cpu_transcoder == TRANSCODER_EDP) {
|
||||
switch (pipe) {
|
||||
case PIPE_A:
|
||||
/* Can only use the always-on power well for eDP when
|
||||
* not using the panel fitter, and when not using motion
|
||||
* blur mitigation (which we don't support). */
|
||||
if (intel_crtc->config.pch_pfit.enabled)
|
||||
/* On Haswell, can only use the always-on power well for
|
||||
* eDP when not using the panel fitter, and when not
|
||||
* using motion blur mitigation (which we don't
|
||||
* support). */
|
||||
if (IS_HASWELL(dev) && intel_crtc->config.pch_pfit.enabled)
|
||||
temp |= TRANS_DDI_EDP_INPUT_A_ONOFF;
|
||||
else
|
||||
temp |= TRANS_DDI_EDP_INPUT_A_ON;
|
||||
@ -1156,18 +1235,29 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder)
|
||||
|
||||
int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
uint32_t lcpll = I915_READ(LCPLL_CTL);
|
||||
uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
|
||||
|
||||
if (lcpll & LCPLL_CD_SOURCE_FCLK)
|
||||
if (lcpll & LCPLL_CD_SOURCE_FCLK) {
|
||||
return 800000;
|
||||
else if (I915_READ(HSW_FUSE_STRAP) & HSW_CDCLK_LIMIT)
|
||||
} else if (I915_READ(HSW_FUSE_STRAP) & HSW_CDCLK_LIMIT) {
|
||||
return 450000;
|
||||
else if ((lcpll & LCPLL_CLK_FREQ_MASK) == LCPLL_CLK_FREQ_450)
|
||||
} else if (freq == LCPLL_CLK_FREQ_450) {
|
||||
return 450000;
|
||||
else if (IS_ULT(dev_priv->dev))
|
||||
return 337500;
|
||||
else
|
||||
return 540000;
|
||||
} else if (IS_HASWELL(dev)) {
|
||||
if (IS_ULT(dev))
|
||||
return 337500;
|
||||
else
|
||||
return 540000;
|
||||
} else {
|
||||
if (freq == LCPLL_CLK_FREQ_54O_BDW)
|
||||
return 540000;
|
||||
else if (freq == LCPLL_CLK_FREQ_337_5_BDW)
|
||||
return 337500;
|
||||
else
|
||||
return 675000;
|
||||
}
|
||||
}
|
||||
|
||||
void intel_ddi_pll_init(struct drm_device *dev)
|
||||
|
@ -2164,7 +2164,7 @@ static int ironlake_update_plane(struct drm_crtc *crtc,
|
||||
else
|
||||
dspcntr &= ~DISPPLANE_TILED;
|
||||
|
||||
if (IS_HASWELL(dev))
|
||||
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
|
||||
dspcntr &= ~DISPPLANE_TRICKLE_FEED_DISABLE;
|
||||
else
|
||||
dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
|
||||
@ -2184,7 +2184,7 @@ static int ironlake_update_plane(struct drm_crtc *crtc,
|
||||
I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
|
||||
I915_MODIFY_DISPBASE(DSPSURF(plane),
|
||||
i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
|
||||
if (IS_HASWELL(dev)) {
|
||||
if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
|
||||
I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
|
||||
} else {
|
||||
I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
|
||||
@ -3401,15 +3401,26 @@ void hsw_enable_ips(struct intel_crtc *crtc)
|
||||
* only after intel_enable_plane. And intel_enable_plane already waits
|
||||
* for a vblank, so all we need to do here is to enable the IPS bit. */
|
||||
assert_plane_enabled(dev_priv, crtc->plane);
|
||||
I915_WRITE(IPS_CTL, IPS_ENABLE);
|
||||
|
||||
/* The bit only becomes 1 in the next vblank, so this wait here is
|
||||
* essentially intel_wait_for_vblank. If we don't have this and don't
|
||||
* wait for vblanks until the end of crtc_enable, then the HW state
|
||||
* readout code will complain that the expected IPS_CTL value is not the
|
||||
* one we read. */
|
||||
if (wait_for(I915_READ_NOTRACE(IPS_CTL) & IPS_ENABLE, 50))
|
||||
DRM_ERROR("Timed out waiting for IPS enable\n");
|
||||
if (IS_BROADWELL(crtc->base.dev)) {
|
||||
mutex_lock(&dev_priv->rps.hw_lock);
|
||||
WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0xc0000000));
|
||||
mutex_unlock(&dev_priv->rps.hw_lock);
|
||||
/* Quoting Art Runyan: "its not safe to expect any particular
|
||||
* value in IPS_CTL bit 31 after enabling IPS through the
|
||||
* mailbox." Therefore we need to defer waiting on the state
|
||||
* change.
|
||||
* TODO: need to fix this for state checker
|
||||
*/
|
||||
} else {
|
||||
I915_WRITE(IPS_CTL, IPS_ENABLE);
|
||||
/* The bit only becomes 1 in the next vblank, so this wait here
|
||||
* is essentially intel_wait_for_vblank. If we don't have this
|
||||
* and don't wait for vblanks until the end of crtc_enable, then
|
||||
* the HW state readout code will complain that the expected
|
||||
* IPS_CTL value is not the one we read. */
|
||||
if (wait_for(I915_READ_NOTRACE(IPS_CTL) & IPS_ENABLE, 50))
|
||||
DRM_ERROR("Timed out waiting for IPS enable\n");
|
||||
}
|
||||
}
|
||||
|
||||
void hsw_disable_ips(struct intel_crtc *crtc)
|
||||
@ -3421,7 +3432,12 @@ void hsw_disable_ips(struct intel_crtc *crtc)
|
||||
return;
|
||||
|
||||
assert_plane_enabled(dev_priv, crtc->plane);
|
||||
I915_WRITE(IPS_CTL, 0);
|
||||
if (IS_BROADWELL(crtc->base.dev)) {
|
||||
mutex_lock(&dev_priv->rps.hw_lock);
|
||||
WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
|
||||
mutex_unlock(&dev_priv->rps.hw_lock);
|
||||
} else
|
||||
I915_WRITE(IPS_CTL, 0);
|
||||
POSTING_READ(IPS_CTL);
|
||||
|
||||
/* We need to wait for a vblank before we can disable the plane. */
|
||||
@ -4420,7 +4436,7 @@ static bool ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
|
||||
return false;
|
||||
}
|
||||
|
||||
if (IS_HASWELL(dev)) {
|
||||
if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
|
||||
if (pipe_config->fdi_lanes > 2) {
|
||||
DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
|
||||
pipe_config->fdi_lanes);
|
||||
@ -5994,14 +6010,16 @@ static void intel_set_pipe_csc(struct drm_crtc *crtc)
|
||||
|
||||
static void haswell_set_pipeconf(struct drm_crtc *crtc)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = crtc->dev->dev_private;
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
enum pipe pipe = intel_crtc->pipe;
|
||||
enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
|
||||
uint32_t val;
|
||||
|
||||
val = 0;
|
||||
|
||||
if (intel_crtc->config.dither)
|
||||
if (IS_HASWELL(dev) && intel_crtc->config.dither)
|
||||
val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
|
||||
|
||||
if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
|
||||
@ -6014,6 +6032,33 @@ static void haswell_set_pipeconf(struct drm_crtc *crtc)
|
||||
|
||||
I915_WRITE(GAMMA_MODE(intel_crtc->pipe), GAMMA_MODE_MODE_8BIT);
|
||||
POSTING_READ(GAMMA_MODE(intel_crtc->pipe));
|
||||
|
||||
if (IS_BROADWELL(dev)) {
|
||||
val = 0;
|
||||
|
||||
switch (intel_crtc->config.pipe_bpp) {
|
||||
case 18:
|
||||
val |= PIPEMISC_DITHER_6_BPC;
|
||||
break;
|
||||
case 24:
|
||||
val |= PIPEMISC_DITHER_8_BPC;
|
||||
break;
|
||||
case 30:
|
||||
val |= PIPEMISC_DITHER_10_BPC;
|
||||
break;
|
||||
case 36:
|
||||
val |= PIPEMISC_DITHER_12_BPC;
|
||||
break;
|
||||
default:
|
||||
/* Case prevented by pipe_config_set_bpp. */
|
||||
BUG();
|
||||
}
|
||||
|
||||
if (intel_crtc->config.dither)
|
||||
val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
|
||||
|
||||
I915_WRITE(PIPEMISC(pipe), val);
|
||||
}
|
||||
}
|
||||
|
||||
static bool ironlake_compute_clocks(struct drm_crtc *crtc,
|
||||
@ -7165,6 +7210,11 @@ static void ironlake_write_eld(struct drm_connector *connector,
|
||||
aud_config = IBX_AUD_CFG(pipe);
|
||||
aud_cntl_st = IBX_AUD_CNTL_ST(pipe);
|
||||
aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
|
||||
} else if (IS_VALLEYVIEW(connector->dev)) {
|
||||
hdmiw_hdmiedid = VLV_HDMIW_HDMIEDID(pipe);
|
||||
aud_config = VLV_AUD_CFG(pipe);
|
||||
aud_cntl_st = VLV_AUD_CNTL_ST(pipe);
|
||||
aud_cntrl_st2 = VLV_AUD_CNTL_ST2;
|
||||
} else {
|
||||
hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID(pipe);
|
||||
aud_config = CPT_AUD_CFG(pipe);
|
||||
@ -7174,8 +7224,19 @@ static void ironlake_write_eld(struct drm_connector *connector,
|
||||
|
||||
DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe));
|
||||
|
||||
i = I915_READ(aud_cntl_st);
|
||||
i = (i >> 29) & DIP_PORT_SEL_MASK; /* DIP_Port_Select, 0x1 = PortB */
|
||||
if (IS_VALLEYVIEW(connector->dev)) {
|
||||
struct intel_encoder *intel_encoder;
|
||||
struct intel_digital_port *intel_dig_port;
|
||||
|
||||
intel_encoder = intel_attached_encoder(connector);
|
||||
intel_dig_port = enc_to_dig_port(&intel_encoder->base);
|
||||
i = intel_dig_port->port;
|
||||
} else {
|
||||
i = I915_READ(aud_cntl_st);
|
||||
i = (i >> 29) & DIP_PORT_SEL_MASK;
|
||||
/* DIP_Port_Select, 0x1 = PortB */
|
||||
}
|
||||
|
||||
if (!i) {
|
||||
DRM_DEBUG_DRIVER("Audio directed to unknown port\n");
|
||||
/* operate blindly on all ports */
|
||||
@ -7319,7 +7380,7 @@ static void ivb_update_cursor(struct drm_crtc *crtc, u32 base)
|
||||
cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
|
||||
cntl |= CURSOR_MODE_DISABLE;
|
||||
}
|
||||
if (IS_HASWELL(dev)) {
|
||||
if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
|
||||
cntl |= CURSOR_PIPE_CSC_ENABLE;
|
||||
cntl &= ~CURSOR_TRICKLE_FEED_DISABLE;
|
||||
}
|
||||
@ -7375,7 +7436,7 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
|
||||
if (!visible && !intel_crtc->cursor_visible)
|
||||
return;
|
||||
|
||||
if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
|
||||
if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev) || IS_BROADWELL(dev)) {
|
||||
I915_WRITE(CURPOS_IVB(pipe), pos);
|
||||
ivb_update_cursor(crtc, base);
|
||||
} else {
|
||||
@ -10049,6 +10110,18 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
|
||||
drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
|
||||
}
|
||||
|
||||
enum pipe intel_get_pipe_from_connector(struct intel_connector *connector)
|
||||
{
|
||||
struct drm_encoder *encoder = connector->base.encoder;
|
||||
|
||||
WARN_ON(!mutex_is_locked(&connector->base.dev->mode_config.mutex));
|
||||
|
||||
if (!encoder)
|
||||
return INVALID_PIPE;
|
||||
|
||||
return to_intel_crtc(encoder->crtc)->pipe;
|
||||
}
|
||||
|
||||
int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
|
||||
struct drm_file *file)
|
||||
{
|
||||
@ -10064,7 +10137,7 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
|
||||
|
||||
if (!drmmode_obj) {
|
||||
DRM_ERROR("no such CRTC id\n");
|
||||
return -EINVAL;
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
crtc = to_intel_crtc(obj_to_crtc(drmmode_obj));
|
||||
@ -10500,7 +10573,7 @@ static void intel_init_display(struct drm_device *dev)
|
||||
dev_priv->display.write_eld = ironlake_write_eld;
|
||||
dev_priv->display.modeset_global_resources =
|
||||
ivb_modeset_global_resources;
|
||||
} else if (IS_HASWELL(dev)) {
|
||||
} else if (IS_HASWELL(dev) || IS_GEN8(dev)) {
|
||||
dev_priv->display.fdi_link_train = hsw_fdi_link_train;
|
||||
dev_priv->display.write_eld = haswell_write_eld;
|
||||
dev_priv->display.modeset_global_resources =
|
||||
@ -10511,6 +10584,7 @@ static void intel_init_display(struct drm_device *dev)
|
||||
} else if (IS_VALLEYVIEW(dev)) {
|
||||
dev_priv->display.modeset_global_resources =
|
||||
valleyview_modeset_global_resources;
|
||||
dev_priv->display.write_eld = ironlake_write_eld;
|
||||
}
|
||||
|
||||
/* Default just returns -ENODEV to indicate unsupported */
|
||||
@ -10534,9 +10608,12 @@ static void intel_init_display(struct drm_device *dev)
|
||||
dev_priv->display.queue_flip = intel_gen6_queue_flip;
|
||||
break;
|
||||
case 7:
|
||||
case 8: /* FIXME(BDW): Check that the gen8 RCS flip works. */
|
||||
dev_priv->display.queue_flip = intel_gen7_queue_flip;
|
||||
break;
|
||||
}
|
||||
|
||||
intel_panel_init_backlight_funcs(dev);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -10573,17 +10650,6 @@ static void quirk_invert_brightness(struct drm_device *dev)
|
||||
DRM_INFO("applying inverted panel brightness quirk\n");
|
||||
}
|
||||
|
||||
/*
|
||||
* Some machines (Dell XPS13) suffer broken backlight controls if
|
||||
* BLM_PCH_PWM_ENABLE is set.
|
||||
*/
|
||||
static void quirk_no_pcm_pwm_enable(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
dev_priv->quirks |= QUIRK_NO_PCH_PWM_ENABLE;
|
||||
DRM_INFO("applying no-PCH_PWM_ENABLE quirk\n");
|
||||
}
|
||||
|
||||
struct intel_quirk {
|
||||
int device;
|
||||
int subsystem_vendor;
|
||||
@ -10643,11 +10709,6 @@ static struct intel_quirk intel_quirks[] = {
|
||||
* seem to use inverted backlight PWM.
|
||||
*/
|
||||
{ 0x2a42, 0x1025, PCI_ANY_ID, quirk_invert_brightness },
|
||||
|
||||
/* Dell XPS13 HD Sandy Bridge */
|
||||
{ 0x0116, 0x1028, 0x052e, quirk_no_pcm_pwm_enable },
|
||||
/* Dell XPS13 HD and XPS13 FHD Ivy Bridge */
|
||||
{ 0x0166, 0x1028, 0x058b, quirk_no_pcm_pwm_enable },
|
||||
};
|
||||
|
||||
static void intel_init_quirks(struct drm_device *dev)
|
||||
@ -11189,12 +11250,11 @@ void intel_modeset_cleanup(struct drm_device *dev)
|
||||
/* flush any delayed tasks or pending work */
|
||||
flush_scheduled_work();
|
||||
|
||||
/* destroy backlight, if any, before the connectors */
|
||||
intel_panel_destroy_backlight(dev);
|
||||
|
||||
/* destroy the sysfs files before encoders/connectors */
|
||||
list_for_each_entry(connector, &dev->mode_config.connector_list, head)
|
||||
/* destroy the backlight and sysfs files before encoders/connectors */
|
||||
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
|
||||
intel_panel_destroy_backlight(connector);
|
||||
drm_sysfs_connector_remove(connector);
|
||||
}
|
||||
|
||||
drm_mode_config_cleanup(dev);
|
||||
|
||||
|
@ -405,6 +405,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
|
||||
uint32_t status;
|
||||
int try, precharge, clock = 0;
|
||||
bool has_aux_irq = INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev);
|
||||
uint32_t timeout;
|
||||
|
||||
/* dp aux is extremely sensitive to irq latency, hence request the
|
||||
* lowest possible wakeup latency and so prevent the cpu from going into
|
||||
@ -419,6 +420,11 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
|
||||
else
|
||||
precharge = 5;
|
||||
|
||||
if (IS_BROADWELL(dev) && ch_ctl == DPA_AUX_CH_CTL)
|
||||
timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
|
||||
else
|
||||
timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
|
||||
|
||||
intel_aux_display_runtime_get(dev_priv);
|
||||
|
||||
/* Try to wait for any previous AUX channel activity */
|
||||
@ -454,7 +460,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
|
||||
I915_WRITE(ch_ctl,
|
||||
DP_AUX_CH_CTL_SEND_BUSY |
|
||||
(has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
|
||||
DP_AUX_CH_CTL_TIME_OUT_400us |
|
||||
timeout |
|
||||
(send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
|
||||
(precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
|
||||
(aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) |
|
||||
@ -747,7 +753,7 @@ intel_dp_i2c_init(struct intel_dp *intel_dp,
|
||||
strncpy(intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1);
|
||||
intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0';
|
||||
intel_dp->adapter.algo_data = &intel_dp->algo;
|
||||
intel_dp->adapter.dev.parent = &intel_connector->base.kdev;
|
||||
intel_dp->adapter.dev.parent = intel_connector->base.kdev;
|
||||
|
||||
ret = i2c_dp_aux_add_bus(&intel_dp->adapter);
|
||||
return ret;
|
||||
@ -1249,7 +1255,6 @@ void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
|
||||
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
|
||||
struct drm_device *dev = intel_dig_port->base.base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int pipe = to_intel_crtc(intel_dig_port->base.base.crtc)->pipe;
|
||||
u32 pp;
|
||||
u32 pp_ctrl_reg;
|
||||
|
||||
@ -1272,7 +1277,7 @@ void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
|
||||
I915_WRITE(pp_ctrl_reg, pp);
|
||||
POSTING_READ(pp_ctrl_reg);
|
||||
|
||||
intel_panel_enable_backlight(dev, pipe);
|
||||
intel_panel_enable_backlight(intel_dp->attached_connector);
|
||||
}
|
||||
|
||||
void ironlake_edp_backlight_off(struct intel_dp *intel_dp)
|
||||
@ -1285,7 +1290,7 @@ void ironlake_edp_backlight_off(struct intel_dp *intel_dp)
|
||||
if (!is_edp(intel_dp))
|
||||
return;
|
||||
|
||||
intel_panel_disable_backlight(dev);
|
||||
intel_panel_disable_backlight(intel_dp->attached_connector);
|
||||
|
||||
DRM_DEBUG_KMS("\n");
|
||||
pp = ironlake_get_pp_control(intel_dp);
|
||||
@ -1611,6 +1616,7 @@ static void intel_edp_psr_enable_source(struct intel_dp *intel_dp)
|
||||
uint32_t max_sleep_time = 0x1f;
|
||||
uint32_t idle_frames = 1;
|
||||
uint32_t val = 0x0;
|
||||
const uint32_t link_entry_time = EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
|
||||
|
||||
if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT) {
|
||||
val |= EDP_PSR_LINK_STANDBY;
|
||||
@ -1621,7 +1627,7 @@ static void intel_edp_psr_enable_source(struct intel_dp *intel_dp)
|
||||
val |= EDP_PSR_LINK_DISABLE;
|
||||
|
||||
I915_WRITE(EDP_PSR_CTL(dev), val |
|
||||
EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES |
|
||||
IS_BROADWELL(dev) ? 0 : link_entry_time |
|
||||
max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT |
|
||||
idle_frames << EDP_PSR_IDLE_FRAME_SHIFT |
|
||||
EDP_PSR_ENABLE);
|
||||
@ -1958,7 +1964,7 @@ intel_dp_voltage_max(struct intel_dp *intel_dp)
|
||||
struct drm_device *dev = intel_dp_to_dev(intel_dp);
|
||||
enum port port = dp_to_dig_port(intel_dp)->port;
|
||||
|
||||
if (IS_VALLEYVIEW(dev))
|
||||
if (IS_VALLEYVIEW(dev) || IS_BROADWELL(dev))
|
||||
return DP_TRAIN_VOLTAGE_SWING_1200;
|
||||
else if (IS_GEN7(dev) && port == PORT_A)
|
||||
return DP_TRAIN_VOLTAGE_SWING_800;
|
||||
@ -1974,7 +1980,18 @@ intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
|
||||
struct drm_device *dev = intel_dp_to_dev(intel_dp);
|
||||
enum port port = dp_to_dig_port(intel_dp)->port;
|
||||
|
||||
if (HAS_DDI(dev)) {
|
||||
if (IS_BROADWELL(dev)) {
|
||||
switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
|
||||
case DP_TRAIN_VOLTAGE_SWING_400:
|
||||
case DP_TRAIN_VOLTAGE_SWING_600:
|
||||
return DP_TRAIN_PRE_EMPHASIS_6;
|
||||
case DP_TRAIN_VOLTAGE_SWING_800:
|
||||
return DP_TRAIN_PRE_EMPHASIS_3_5;
|
||||
case DP_TRAIN_VOLTAGE_SWING_1200:
|
||||
default:
|
||||
return DP_TRAIN_PRE_EMPHASIS_0;
|
||||
}
|
||||
} else if (IS_HASWELL(dev)) {
|
||||
switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
|
||||
case DP_TRAIN_VOLTAGE_SWING_400:
|
||||
return DP_TRAIN_PRE_EMPHASIS_9_5;
|
||||
@ -2286,6 +2303,41 @@ intel_hsw_signal_levels(uint8_t train_set)
|
||||
}
|
||||
}
|
||||
|
||||
static uint32_t
|
||||
intel_bdw_signal_levels(uint8_t train_set)
|
||||
{
|
||||
int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
|
||||
DP_TRAIN_PRE_EMPHASIS_MASK);
|
||||
switch (signal_levels) {
|
||||
case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
|
||||
return DDI_BUF_EMP_400MV_0DB_BDW; /* Sel0 */
|
||||
case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
|
||||
return DDI_BUF_EMP_400MV_3_5DB_BDW; /* Sel1 */
|
||||
case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
|
||||
return DDI_BUF_EMP_400MV_6DB_BDW; /* Sel2 */
|
||||
|
||||
case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
|
||||
return DDI_BUF_EMP_600MV_0DB_BDW; /* Sel3 */
|
||||
case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
|
||||
return DDI_BUF_EMP_600MV_3_5DB_BDW; /* Sel4 */
|
||||
case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6:
|
||||
return DDI_BUF_EMP_600MV_6DB_BDW; /* Sel5 */
|
||||
|
||||
case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
|
||||
return DDI_BUF_EMP_800MV_0DB_BDW; /* Sel6 */
|
||||
case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
|
||||
return DDI_BUF_EMP_800MV_3_5DB_BDW; /* Sel7 */
|
||||
|
||||
case DP_TRAIN_VOLTAGE_SWING_1200 | DP_TRAIN_PRE_EMPHASIS_0:
|
||||
return DDI_BUF_EMP_1200MV_0DB_BDW; /* Sel8 */
|
||||
|
||||
default:
|
||||
DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
|
||||
"0x%x\n", signal_levels);
|
||||
return DDI_BUF_EMP_400MV_0DB_BDW; /* Sel0 */
|
||||
}
|
||||
}
|
||||
|
||||
/* Properly updates "DP" with the correct signal levels. */
|
||||
static void
|
||||
intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
|
||||
@ -2296,7 +2348,10 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
|
||||
uint32_t signal_levels, mask;
|
||||
uint8_t train_set = intel_dp->train_set[0];
|
||||
|
||||
if (HAS_DDI(dev)) {
|
||||
if (IS_BROADWELL(dev)) {
|
||||
signal_levels = intel_bdw_signal_levels(train_set);
|
||||
mask = DDI_BUF_EMP_MASK;
|
||||
} else if (IS_HASWELL(dev)) {
|
||||
signal_levels = intel_hsw_signal_levels(train_set);
|
||||
mask = DDI_BUF_EMP_MASK;
|
||||
} else if (IS_VALLEYVIEW(dev)) {
|
||||
@ -3284,7 +3339,8 @@ bool intel_dpd_is_edp(struct drm_device *dev)
|
||||
p_child = dev_priv->vbt.child_dev + i;
|
||||
|
||||
if (p_child->common.dvo_port == PORT_IDPD &&
|
||||
p_child->common.device_type == DEVICE_TYPE_eDP)
|
||||
(p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
|
||||
(DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
|
@ -156,6 +156,17 @@ struct intel_encoder {
|
||||
struct intel_panel {
|
||||
struct drm_display_mode *fixed_mode;
|
||||
int fitting_mode;
|
||||
|
||||
/* backlight */
|
||||
struct {
|
||||
bool present;
|
||||
u32 level;
|
||||
u32 max;
|
||||
bool enabled;
|
||||
bool combination_mode; /* gen 2/4 only */
|
||||
bool active_low_pwm;
|
||||
struct backlight_device *device;
|
||||
} backlight;
|
||||
};
|
||||
|
||||
struct intel_connector {
|
||||
@ -630,6 +641,7 @@ void intel_connector_attach_encoder(struct intel_connector *connector,
|
||||
struct drm_encoder *intel_best_encoder(struct drm_connector *connector);
|
||||
struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
|
||||
struct drm_crtc *crtc);
|
||||
enum pipe intel_get_pipe_from_connector(struct intel_connector *connector);
|
||||
int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
|
||||
@ -803,11 +815,13 @@ void intel_pch_panel_fitting(struct intel_crtc *crtc,
|
||||
void intel_gmch_panel_fitting(struct intel_crtc *crtc,
|
||||
struct intel_crtc_config *pipe_config,
|
||||
int fitting_mode);
|
||||
void intel_panel_set_backlight(struct drm_device *dev, u32 level, u32 max);
|
||||
void intel_panel_set_backlight(struct intel_connector *connector, u32 level,
|
||||
u32 max);
|
||||
int intel_panel_setup_backlight(struct drm_connector *connector);
|
||||
void intel_panel_enable_backlight(struct drm_device *dev, enum pipe pipe);
|
||||
void intel_panel_disable_backlight(struct drm_device *dev);
|
||||
void intel_panel_destroy_backlight(struct drm_device *dev);
|
||||
void intel_panel_enable_backlight(struct intel_connector *connector);
|
||||
void intel_panel_disable_backlight(struct intel_connector *connector);
|
||||
void intel_panel_destroy_backlight(struct drm_connector *connector);
|
||||
void intel_panel_init_backlight_funcs(struct drm_device *dev);
|
||||
enum drm_connector_status intel_panel_detect(struct drm_device *dev);
|
||||
|
||||
|
||||
|
@ -847,7 +847,7 @@ static int hdmi_portclock_limit(struct intel_hdmi *hdmi)
|
||||
|
||||
if (IS_G4X(dev))
|
||||
return 165000;
|
||||
else if (IS_HASWELL(dev))
|
||||
else if (IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8)
|
||||
return 300000;
|
||||
else
|
||||
return 225000;
|
||||
|
@ -206,7 +206,8 @@ static void intel_enable_lvds(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
|
||||
struct intel_connector *intel_connector =
|
||||
&lvds_encoder->attached_connector->base;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 ctl_reg, stat_reg;
|
||||
|
||||
@ -225,13 +226,15 @@ static void intel_enable_lvds(struct intel_encoder *encoder)
|
||||
if (wait_for((I915_READ(stat_reg) & PP_ON) != 0, 1000))
|
||||
DRM_ERROR("timed out waiting for panel to power on\n");
|
||||
|
||||
intel_panel_enable_backlight(dev, intel_crtc->pipe);
|
||||
intel_panel_enable_backlight(intel_connector);
|
||||
}
|
||||
|
||||
static void intel_disable_lvds(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
|
||||
struct intel_connector *intel_connector =
|
||||
&lvds_encoder->attached_connector->base;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 ctl_reg, stat_reg;
|
||||
|
||||
@ -243,7 +246,7 @@ static void intel_disable_lvds(struct intel_encoder *encoder)
|
||||
stat_reg = PP_STATUS;
|
||||
}
|
||||
|
||||
intel_panel_disable_backlight(dev);
|
||||
intel_panel_disable_backlight(intel_connector);
|
||||
|
||||
I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON);
|
||||
if (wait_for((I915_READ(stat_reg) & PP_ON) == 0, 1000))
|
||||
|
@ -396,6 +396,9 @@ int intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state)
|
||||
static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_connector *connector;
|
||||
struct intel_connector *intel_connector;
|
||||
struct intel_panel *panel;
|
||||
struct opregion_asle __iomem *asle = dev_priv->opregion.asle;
|
||||
|
||||
DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp);
|
||||
@ -407,10 +410,24 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
|
||||
if (bclp > 255)
|
||||
return ASLC_BACKLIGHT_FAILED;
|
||||
|
||||
mutex_lock(&dev->mode_config.mutex);
|
||||
|
||||
/*
|
||||
* Update backlight on all connectors that support backlight (usually
|
||||
* only one).
|
||||
*/
|
||||
DRM_DEBUG_KMS("updating opregion backlight %d/255\n", bclp);
|
||||
intel_panel_set_backlight(dev, bclp, 255);
|
||||
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
|
||||
intel_connector = to_intel_connector(connector);
|
||||
panel = &intel_connector->panel;
|
||||
if (panel->backlight.present)
|
||||
intel_panel_set_backlight(intel_connector, bclp, 255);
|
||||
}
|
||||
iowrite32(DIV_ROUND_UP(bclp * 100, 255) | ASLE_CBLV_VALID, &asle->cblv);
|
||||
|
||||
mutex_unlock(&dev->mode_config.mutex);
|
||||
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -486,9 +503,13 @@ static u32 asle_isct_state(struct drm_device *dev)
|
||||
return ASLC_ISCT_STATE_FAILED;
|
||||
}
|
||||
|
||||
void intel_opregion_asle_intr(struct drm_device *dev)
|
||||
static void asle_work(struct work_struct *work)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_opregion *opregion =
|
||||
container_of(work, struct intel_opregion, asle_work);
|
||||
struct drm_i915_private *dev_priv =
|
||||
container_of(opregion, struct drm_i915_private, opregion);
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct opregion_asle __iomem *asle = dev_priv->opregion.asle;
|
||||
u32 aslc_stat = 0;
|
||||
u32 aslc_req;
|
||||
@ -535,6 +556,14 @@ void intel_opregion_asle_intr(struct drm_device *dev)
|
||||
iowrite32(aslc_stat, &asle->aslc);
|
||||
}
|
||||
|
||||
void intel_opregion_asle_intr(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
if (dev_priv->opregion.asle)
|
||||
schedule_work(&dev_priv->opregion.asle_work);
|
||||
}
|
||||
|
||||
#define ACPI_EV_DISPLAY_SWITCH (1<<0)
|
||||
#define ACPI_EV_LID (1<<1)
|
||||
#define ACPI_EV_DOCK (1<<2)
|
||||
@ -735,6 +764,8 @@ void intel_opregion_fini(struct drm_device *dev)
|
||||
if (opregion->asle)
|
||||
iowrite32(ASLE_ARDY_NOT_READY, &opregion->asle->ardy);
|
||||
|
||||
cancel_work_sync(&dev_priv->opregion.asle_work);
|
||||
|
||||
if (opregion->acpi) {
|
||||
iowrite32(0, &opregion->acpi->drdy);
|
||||
|
||||
@ -828,6 +859,10 @@ int intel_opregion_setup(struct drm_device *dev)
|
||||
return -ENOTSUPP;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ACPI
|
||||
INIT_WORK(&opregion->asle_work, asle_work);
|
||||
#endif
|
||||
|
||||
base = acpi_os_ioremap(asls, OPREGION_SIZE);
|
||||
if (!base)
|
||||
return -ENOMEM;
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -2291,7 +2291,9 @@ static uint32_t ilk_compute_fbc_wm(const struct hsw_pipe_wm_parameters *params,
|
||||
|
||||
static unsigned int ilk_display_fifo_size(const struct drm_device *dev)
|
||||
{
|
||||
if (INTEL_INFO(dev)->gen >= 7)
|
||||
if (INTEL_INFO(dev)->gen >= 8)
|
||||
return 3072;
|
||||
else if (INTEL_INFO(dev)->gen >= 7)
|
||||
return 768;
|
||||
else
|
||||
return 512;
|
||||
@ -2336,7 +2338,9 @@ static unsigned int ilk_plane_wm_max(const struct drm_device *dev,
|
||||
}
|
||||
|
||||
/* clamp to max that the registers can hold */
|
||||
if (INTEL_INFO(dev)->gen >= 7)
|
||||
if (INTEL_INFO(dev)->gen >= 8)
|
||||
max = level == 0 ? 255 : 2047;
|
||||
else if (INTEL_INFO(dev)->gen >= 7)
|
||||
/* IVB/HSW primary/sprite plane watermarks */
|
||||
max = level == 0 ? 127 : 1023;
|
||||
else if (!is_sprite)
|
||||
@ -2366,10 +2370,13 @@ static unsigned int ilk_cursor_wm_max(const struct drm_device *dev,
|
||||
}
|
||||
|
||||
/* Calculate the maximum FBC watermark */
|
||||
static unsigned int ilk_fbc_wm_max(void)
|
||||
static unsigned int ilk_fbc_wm_max(struct drm_device *dev)
|
||||
{
|
||||
/* max that registers can hold */
|
||||
return 15;
|
||||
if (INTEL_INFO(dev)->gen >= 8)
|
||||
return 31;
|
||||
else
|
||||
return 15;
|
||||
}
|
||||
|
||||
static void ilk_compute_wm_maximums(struct drm_device *dev,
|
||||
@ -2381,7 +2388,7 @@ static void ilk_compute_wm_maximums(struct drm_device *dev,
|
||||
max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false);
|
||||
max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true);
|
||||
max->cur = ilk_cursor_wm_max(dev, level, config);
|
||||
max->fbc = ilk_fbc_wm_max();
|
||||
max->fbc = ilk_fbc_wm_max(dev);
|
||||
}
|
||||
|
||||
static bool ilk_validate_wm_level(int level,
|
||||
@ -2722,10 +2729,18 @@ static void hsw_compute_wm_results(struct drm_device *dev,
|
||||
if (!r->enable)
|
||||
break;
|
||||
|
||||
results->wm_lp[wm_lp - 1] = HSW_WM_LP_VAL(level * 2,
|
||||
r->fbc_val,
|
||||
r->pri_val,
|
||||
r->cur_val);
|
||||
results->wm_lp[wm_lp - 1] = WM3_LP_EN |
|
||||
((level * 2) << WM1_LP_LATENCY_SHIFT) |
|
||||
(r->pri_val << WM1_LP_SR_SHIFT) |
|
||||
r->cur_val;
|
||||
|
||||
if (INTEL_INFO(dev)->gen >= 8)
|
||||
results->wm_lp[wm_lp - 1] |=
|
||||
r->fbc_val << WM1_LP_FBC_SHIFT_BDW;
|
||||
else
|
||||
results->wm_lp[wm_lp - 1] |=
|
||||
r->fbc_val << WM1_LP_FBC_SHIFT;
|
||||
|
||||
results->wm_lp_spr[wm_lp - 1] = r->spr_val;
|
||||
}
|
||||
|
||||
@ -3710,6 +3725,78 @@ static void gen6_enable_rps_interrupts(struct drm_device *dev)
|
||||
I915_WRITE(GEN6_PMINTRMSK, ~enabled_intrs);
|
||||
}
|
||||
|
||||
static void gen8_enable_rps(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_ring_buffer *ring;
|
||||
uint32_t rc6_mask = 0, rp_state_cap;
|
||||
int unused;
|
||||
|
||||
/* 1a: Software RC state - RC0 */
|
||||
I915_WRITE(GEN6_RC_STATE, 0);
|
||||
|
||||
/* 1c & 1d: Get forcewake during program sequence. Although the driver
|
||||
* hasn't enabled a state yet where we need forcewake, BIOS may have.*/
|
||||
gen6_gt_force_wake_get(dev_priv);
|
||||
|
||||
/* 2a: Disable RC states. */
|
||||
I915_WRITE(GEN6_RC_CONTROL, 0);
|
||||
|
||||
rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
|
||||
|
||||
/* 2b: Program RC6 thresholds.*/
|
||||
I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
|
||||
I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
|
||||
I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
|
||||
for_each_ring(ring, dev_priv, unused)
|
||||
I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
|
||||
I915_WRITE(GEN6_RC_SLEEP, 0);
|
||||
I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
|
||||
|
||||
/* 3: Enable RC6 */
|
||||
if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
|
||||
rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
|
||||
DRM_INFO("RC6 %s\n", (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off");
|
||||
I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
|
||||
GEN6_RC_CTL_EI_MODE(1) |
|
||||
rc6_mask);
|
||||
|
||||
/* 4 Program defaults and thresholds for RPS*/
|
||||
I915_WRITE(GEN6_RPNSWREQ, HSW_FREQUENCY(10)); /* Request 500 MHz */
|
||||
I915_WRITE(GEN6_RC_VIDEO_FREQ, HSW_FREQUENCY(12)); /* Request 600 MHz */
|
||||
/* NB: Docs say 1s, and 1000000 - which aren't equivalent */
|
||||
I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 100000000 / 128); /* 1 second timeout */
|
||||
|
||||
/* Docs recommend 900MHz, and 300 MHz respectively */
|
||||
I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
|
||||
dev_priv->rps.max_delay << 24 |
|
||||
dev_priv->rps.min_delay << 16);
|
||||
|
||||
I915_WRITE(GEN6_RP_UP_THRESHOLD, 7600000 / 128); /* 76ms busyness per EI, 90% */
|
||||
I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 31300000 / 128); /* 313ms busyness per EI, 70%*/
|
||||
I915_WRITE(GEN6_RP_UP_EI, 66000); /* 84.48ms, XXX: random? */
|
||||
I915_WRITE(GEN6_RP_DOWN_EI, 350000); /* 448ms, XXX: random? */
|
||||
|
||||
I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
|
||||
|
||||
/* 5: Enable RPS */
|
||||
I915_WRITE(GEN6_RP_CONTROL,
|
||||
GEN6_RP_MEDIA_TURBO |
|
||||
GEN6_RP_MEDIA_HW_NORMAL_MODE |
|
||||
GEN6_RP_MEDIA_IS_GFX |
|
||||
GEN6_RP_ENABLE |
|
||||
GEN6_RP_UP_BUSY_AVG |
|
||||
GEN6_RP_DOWN_IDLE_AVG);
|
||||
|
||||
/* 6: Ring frequency + overclocking (our driver does this later */
|
||||
|
||||
gen6_set_rps(dev, (I915_READ(GEN6_GT_PERF_STATUS) & 0xff00) >> 8);
|
||||
|
||||
gen6_enable_rps_interrupts(dev);
|
||||
|
||||
gen6_gt_force_wake_put(dev_priv);
|
||||
}
|
||||
|
||||
static void gen6_enable_rps(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
@ -3872,7 +3959,10 @@ void gen6_update_ring_freq(struct drm_device *dev)
|
||||
int diff = dev_priv->rps.max_delay - gpu_freq;
|
||||
unsigned int ia_freq = 0, ring_freq = 0;
|
||||
|
||||
if (IS_HASWELL(dev)) {
|
||||
if (INTEL_INFO(dev)->gen >= 8) {
|
||||
/* max(2 * GT, DDR). NB: GT is 50MHz units */
|
||||
ring_freq = max(min_ring_freq, gpu_freq);
|
||||
} else if (IS_HASWELL(dev)) {
|
||||
ring_freq = mult_frac(gpu_freq, 5, 4);
|
||||
ring_freq = max(min_ring_freq, ring_freq);
|
||||
/* leave ia_freq as the default, chosen by cpufreq */
|
||||
@ -4818,6 +4908,9 @@ static void intel_gen6_powersave_work(struct work_struct *work)
|
||||
|
||||
if (IS_VALLEYVIEW(dev)) {
|
||||
valleyview_enable_rps(dev);
|
||||
} else if (IS_BROADWELL(dev)) {
|
||||
gen8_enable_rps(dev);
|
||||
gen6_update_ring_freq(dev);
|
||||
} else {
|
||||
gen6_enable_rps(dev);
|
||||
gen6_update_ring_freq(dev);
|
||||
@ -5126,6 +5219,50 @@ static void lpt_suspend_hw(struct drm_device *dev)
|
||||
}
|
||||
}
|
||||
|
||||
static void gen8_init_clock_gating(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
enum pipe i;
|
||||
|
||||
I915_WRITE(WM3_LP_ILK, 0);
|
||||
I915_WRITE(WM2_LP_ILK, 0);
|
||||
I915_WRITE(WM1_LP_ILK, 0);
|
||||
|
||||
/* FIXME(BDW): Check all the w/a, some might only apply to
|
||||
* pre-production hw. */
|
||||
|
||||
WARN(!i915_preliminary_hw_support,
|
||||
"GEN8_CENTROID_PIXEL_OPT_DIS not be needed for production\n");
|
||||
I915_WRITE(HALF_SLICE_CHICKEN3,
|
||||
_MASKED_BIT_ENABLE(GEN8_CENTROID_PIXEL_OPT_DIS));
|
||||
I915_WRITE(HALF_SLICE_CHICKEN3,
|
||||
_MASKED_BIT_ENABLE(GEN8_SAMPLER_POWER_BYPASS_DIS));
|
||||
I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_BWGTLB_DISABLE));
|
||||
|
||||
I915_WRITE(_3D_CHICKEN3,
|
||||
_3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(2));
|
||||
|
||||
I915_WRITE(COMMON_SLICE_CHICKEN2,
|
||||
_MASKED_BIT_ENABLE(GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE));
|
||||
|
||||
I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
|
||||
_MASKED_BIT_ENABLE(GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE));
|
||||
|
||||
/* WaSwitchSolVfFArbitrationPriority */
|
||||
I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
|
||||
|
||||
/* WaPsrDPAMaskVBlankInSRD */
|
||||
I915_WRITE(CHICKEN_PAR1_1,
|
||||
I915_READ(CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD);
|
||||
|
||||
/* WaPsrDPRSUnmaskVBlankInSRD */
|
||||
for_each_pipe(i) {
|
||||
I915_WRITE(CHICKEN_PIPESL_1(i),
|
||||
I915_READ(CHICKEN_PIPESL_1(i) |
|
||||
DPRS_MASK_VBLANK_SRD));
|
||||
}
|
||||
}
|
||||
|
||||
static void haswell_init_clock_gating(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
@ -5476,7 +5613,9 @@ static bool is_always_on_power_domain(struct drm_device *dev,
|
||||
|
||||
BUG_ON(BIT(domain) & ~POWER_DOMAIN_MASK);
|
||||
|
||||
if (IS_HASWELL(dev)) {
|
||||
if (IS_BROADWELL(dev)) {
|
||||
always_on_domains = BDW_ALWAYS_ON_POWER_DOMAINS;
|
||||
} else if (IS_HASWELL(dev)) {
|
||||
always_on_domains = HSW_ALWAYS_ON_POWER_DOMAINS;
|
||||
} else {
|
||||
WARN_ON(1);
|
||||
@ -5510,6 +5649,7 @@ static void __intel_set_power_well(struct drm_device *dev, bool enable)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
bool is_enabled, enable_requested;
|
||||
unsigned long irqflags;
|
||||
uint32_t tmp;
|
||||
|
||||
tmp = I915_READ(HSW_PWR_WELL_DRIVER);
|
||||
@ -5527,9 +5667,24 @@ static void __intel_set_power_well(struct drm_device *dev, bool enable)
|
||||
HSW_PWR_WELL_STATE_ENABLED), 20))
|
||||
DRM_ERROR("Timeout enabling power well\n");
|
||||
}
|
||||
|
||||
if (IS_BROADWELL(dev)) {
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
|
||||
I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_B),
|
||||
dev_priv->de_irq_mask[PIPE_B]);
|
||||
I915_WRITE(GEN8_DE_PIPE_IER(PIPE_B),
|
||||
~dev_priv->de_irq_mask[PIPE_B] |
|
||||
GEN8_PIPE_VBLANK);
|
||||
I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_C),
|
||||
dev_priv->de_irq_mask[PIPE_C]);
|
||||
I915_WRITE(GEN8_DE_PIPE_IER(PIPE_C),
|
||||
~dev_priv->de_irq_mask[PIPE_C] |
|
||||
GEN8_PIPE_VBLANK);
|
||||
POSTING_READ(GEN8_DE_PIPE_IER(PIPE_C));
|
||||
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
|
||||
}
|
||||
} else {
|
||||
if (enable_requested) {
|
||||
unsigned long irqflags;
|
||||
enum pipe p;
|
||||
|
||||
I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
|
||||
@ -5798,6 +5953,8 @@ void intel_init_pm(struct drm_device *dev)
|
||||
dev_priv->display.update_wm = NULL;
|
||||
}
|
||||
dev_priv->display.init_clock_gating = haswell_init_clock_gating;
|
||||
} else if (INTEL_INFO(dev)->gen == 8) {
|
||||
dev_priv->display.init_clock_gating = gen8_init_clock_gating;
|
||||
} else
|
||||
dev_priv->display.update_wm = NULL;
|
||||
} else if (IS_VALLEYVIEW(dev)) {
|
||||
@ -5949,4 +6106,3 @@ void intel_pm_init(struct drm_device *dev)
|
||||
INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
|
||||
intel_gen6_powersave_work);
|
||||
}
|
||||
|
||||
|
@ -360,6 +360,47 @@ gen7_render_ring_flush(struct intel_ring_buffer *ring,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
gen8_render_ring_flush(struct intel_ring_buffer *ring,
|
||||
u32 invalidate_domains, u32 flush_domains)
|
||||
{
|
||||
u32 flags = 0;
|
||||
u32 scratch_addr = ring->scratch.gtt_offset + 128;
|
||||
int ret;
|
||||
|
||||
flags |= PIPE_CONTROL_CS_STALL;
|
||||
|
||||
if (flush_domains) {
|
||||
flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
|
||||
flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
|
||||
}
|
||||
if (invalidate_domains) {
|
||||
flags |= PIPE_CONTROL_TLB_INVALIDATE;
|
||||
flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
|
||||
flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
|
||||
flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
|
||||
flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
|
||||
flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
|
||||
flags |= PIPE_CONTROL_QW_WRITE;
|
||||
flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
|
||||
}
|
||||
|
||||
ret = intel_ring_begin(ring, 6);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
|
||||
intel_ring_emit(ring, flags);
|
||||
intel_ring_emit(ring, scratch_addr);
|
||||
intel_ring_emit(ring, 0);
|
||||
intel_ring_emit(ring, 0);
|
||||
intel_ring_emit(ring, 0);
|
||||
intel_ring_advance(ring);
|
||||
|
||||
return 0;
|
||||
|
||||
}
|
||||
|
||||
static void ring_write_tail(struct intel_ring_buffer *ring,
|
||||
u32 value)
|
||||
{
|
||||
@ -924,6 +965,7 @@ void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
|
||||
} else if (IS_GEN6(ring->dev)) {
|
||||
mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
|
||||
} else {
|
||||
/* XXX: gen8 returns to sanity */
|
||||
mmio = RING_HWS_PGA(ring->mmio_base);
|
||||
}
|
||||
|
||||
@ -1066,6 +1108,52 @@ hsw_vebox_put_irq(struct intel_ring_buffer *ring)
|
||||
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
|
||||
}
|
||||
|
||||
static bool
|
||||
gen8_ring_get_irq(struct intel_ring_buffer *ring)
|
||||
{
|
||||
struct drm_device *dev = ring->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
unsigned long flags;
|
||||
|
||||
if (!dev->irq_enabled)
|
||||
return false;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, flags);
|
||||
if (ring->irq_refcount++ == 0) {
|
||||
if (HAS_L3_DPF(dev) && ring->id == RCS) {
|
||||
I915_WRITE_IMR(ring,
|
||||
~(ring->irq_enable_mask |
|
||||
GT_RENDER_L3_PARITY_ERROR_INTERRUPT));
|
||||
} else {
|
||||
I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
|
||||
}
|
||||
POSTING_READ(RING_IMR(ring->mmio_base));
|
||||
}
|
||||
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void
|
||||
gen8_ring_put_irq(struct intel_ring_buffer *ring)
|
||||
{
|
||||
struct drm_device *dev = ring->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, flags);
|
||||
if (--ring->irq_refcount == 0) {
|
||||
if (HAS_L3_DPF(dev) && ring->id == RCS) {
|
||||
I915_WRITE_IMR(ring,
|
||||
~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
|
||||
} else {
|
||||
I915_WRITE_IMR(ring, ~0);
|
||||
}
|
||||
POSTING_READ(RING_IMR(ring->mmio_base));
|
||||
}
|
||||
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
|
||||
}
|
||||
|
||||
static int
|
||||
i965_dispatch_execbuffer(struct intel_ring_buffer *ring,
|
||||
u32 offset, u32 length,
|
||||
@ -1624,6 +1712,8 @@ static int gen6_bsd_ring_flush(struct intel_ring_buffer *ring,
|
||||
return ret;
|
||||
|
||||
cmd = MI_FLUSH_DW;
|
||||
if (INTEL_INFO(ring->dev)->gen >= 8)
|
||||
cmd += 1;
|
||||
/*
|
||||
* Bspec vol 1c.5 - video engine command streamer:
|
||||
* "If ENABLED, all TLBs will be invalidated once the flush
|
||||
@ -1635,9 +1725,38 @@ static int gen6_bsd_ring_flush(struct intel_ring_buffer *ring,
|
||||
MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
|
||||
intel_ring_emit(ring, cmd);
|
||||
intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
|
||||
if (INTEL_INFO(ring->dev)->gen >= 8) {
|
||||
intel_ring_emit(ring, 0); /* upper addr */
|
||||
intel_ring_emit(ring, 0); /* value */
|
||||
} else {
|
||||
intel_ring_emit(ring, 0);
|
||||
intel_ring_emit(ring, MI_NOOP);
|
||||
}
|
||||
intel_ring_advance(ring);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
gen8_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
|
||||
u32 offset, u32 len,
|
||||
unsigned flags)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = ring->dev->dev_private;
|
||||
bool ppgtt = dev_priv->mm.aliasing_ppgtt != NULL &&
|
||||
!(flags & I915_DISPATCH_SECURE);
|
||||
int ret;
|
||||
|
||||
ret = intel_ring_begin(ring, 4);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* FIXME(BDW): Address space and security selectors. */
|
||||
intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8));
|
||||
intel_ring_emit(ring, offset);
|
||||
intel_ring_emit(ring, 0);
|
||||
intel_ring_emit(ring, MI_NOOP);
|
||||
intel_ring_advance(ring);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1697,6 +1816,8 @@ static int gen6_ring_flush(struct intel_ring_buffer *ring,
|
||||
return ret;
|
||||
|
||||
cmd = MI_FLUSH_DW;
|
||||
if (INTEL_INFO(ring->dev)->gen >= 8)
|
||||
cmd += 1;
|
||||
/*
|
||||
* Bspec vol 1c.3 - blitter engine command streamer:
|
||||
* "If ENABLED, all TLBs will be invalidated once the flush
|
||||
@ -1708,8 +1829,13 @@ static int gen6_ring_flush(struct intel_ring_buffer *ring,
|
||||
MI_FLUSH_DW_OP_STOREDW;
|
||||
intel_ring_emit(ring, cmd);
|
||||
intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
|
||||
intel_ring_emit(ring, 0);
|
||||
intel_ring_emit(ring, MI_NOOP);
|
||||
if (INTEL_INFO(ring->dev)->gen >= 8) {
|
||||
intel_ring_emit(ring, 0); /* upper addr */
|
||||
intel_ring_emit(ring, 0); /* value */
|
||||
} else {
|
||||
intel_ring_emit(ring, 0);
|
||||
intel_ring_emit(ring, MI_NOOP);
|
||||
}
|
||||
intel_ring_advance(ring);
|
||||
|
||||
if (IS_GEN7(dev) && flush)
|
||||
@ -1732,8 +1858,14 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
|
||||
ring->flush = gen7_render_ring_flush;
|
||||
if (INTEL_INFO(dev)->gen == 6)
|
||||
ring->flush = gen6_render_ring_flush;
|
||||
ring->irq_get = gen6_ring_get_irq;
|
||||
ring->irq_put = gen6_ring_put_irq;
|
||||
if (INTEL_INFO(dev)->gen >= 8) {
|
||||
ring->flush = gen8_render_ring_flush;
|
||||
ring->irq_get = gen8_ring_get_irq;
|
||||
ring->irq_put = gen8_ring_put_irq;
|
||||
} else {
|
||||
ring->irq_get = gen6_ring_get_irq;
|
||||
ring->irq_put = gen6_ring_put_irq;
|
||||
}
|
||||
ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
|
||||
ring->get_seqno = gen6_ring_get_seqno;
|
||||
ring->set_seqno = ring_set_seqno;
|
||||
@ -1775,6 +1907,8 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
|
||||
ring->write_tail = ring_write_tail;
|
||||
if (IS_HASWELL(dev))
|
||||
ring->dispatch_execbuffer = hsw_ring_dispatch_execbuffer;
|
||||
else if (IS_GEN8(dev))
|
||||
ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
|
||||
else if (INTEL_INFO(dev)->gen >= 6)
|
||||
ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
|
||||
else if (INTEL_INFO(dev)->gen >= 4)
|
||||
@ -1888,7 +2022,7 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
|
||||
ring->id = VCS;
|
||||
|
||||
ring->write_tail = ring_write_tail;
|
||||
if (IS_GEN6(dev) || IS_GEN7(dev)) {
|
||||
if (INTEL_INFO(dev)->gen >= 6) {
|
||||
ring->mmio_base = GEN6_BSD_RING_BASE;
|
||||
/* gen6 bsd needs a special wa for tail updates */
|
||||
if (IS_GEN6(dev))
|
||||
@ -1897,10 +2031,20 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
|
||||
ring->add_request = gen6_add_request;
|
||||
ring->get_seqno = gen6_ring_get_seqno;
|
||||
ring->set_seqno = ring_set_seqno;
|
||||
ring->irq_enable_mask = GT_BSD_USER_INTERRUPT;
|
||||
ring->irq_get = gen6_ring_get_irq;
|
||||
ring->irq_put = gen6_ring_put_irq;
|
||||
ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
|
||||
if (INTEL_INFO(dev)->gen >= 8) {
|
||||
ring->irq_enable_mask =
|
||||
GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
|
||||
ring->irq_get = gen8_ring_get_irq;
|
||||
ring->irq_put = gen8_ring_put_irq;
|
||||
ring->dispatch_execbuffer =
|
||||
gen8_ring_dispatch_execbuffer;
|
||||
} else {
|
||||
ring->irq_enable_mask = GT_BSD_USER_INTERRUPT;
|
||||
ring->irq_get = gen6_ring_get_irq;
|
||||
ring->irq_put = gen6_ring_put_irq;
|
||||
ring->dispatch_execbuffer =
|
||||
gen6_ring_dispatch_execbuffer;
|
||||
}
|
||||
ring->sync_to = gen6_ring_sync;
|
||||
ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_VR;
|
||||
ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_INVALID;
|
||||
@ -1946,10 +2090,18 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)
|
||||
ring->add_request = gen6_add_request;
|
||||
ring->get_seqno = gen6_ring_get_seqno;
|
||||
ring->set_seqno = ring_set_seqno;
|
||||
ring->irq_enable_mask = GT_BLT_USER_INTERRUPT;
|
||||
ring->irq_get = gen6_ring_get_irq;
|
||||
ring->irq_put = gen6_ring_put_irq;
|
||||
ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
|
||||
if (INTEL_INFO(dev)->gen >= 8) {
|
||||
ring->irq_enable_mask =
|
||||
GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
|
||||
ring->irq_get = gen8_ring_get_irq;
|
||||
ring->irq_put = gen8_ring_put_irq;
|
||||
ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
|
||||
} else {
|
||||
ring->irq_enable_mask = GT_BLT_USER_INTERRUPT;
|
||||
ring->irq_get = gen6_ring_get_irq;
|
||||
ring->irq_put = gen6_ring_put_irq;
|
||||
ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
|
||||
}
|
||||
ring->sync_to = gen6_ring_sync;
|
||||
ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_BR;
|
||||
ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_BV;
|
||||
@ -1978,10 +2130,19 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev)
|
||||
ring->add_request = gen6_add_request;
|
||||
ring->get_seqno = gen6_ring_get_seqno;
|
||||
ring->set_seqno = ring_set_seqno;
|
||||
ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
|
||||
ring->irq_get = hsw_vebox_get_irq;
|
||||
ring->irq_put = hsw_vebox_put_irq;
|
||||
ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
|
||||
|
||||
if (INTEL_INFO(dev)->gen >= 8) {
|
||||
ring->irq_enable_mask =
|
||||
GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
|
||||
ring->irq_get = gen8_ring_get_irq;
|
||||
ring->irq_put = gen8_ring_put_irq;
|
||||
ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
|
||||
} else {
|
||||
ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
|
||||
ring->irq_get = hsw_vebox_get_irq;
|
||||
ring->irq_put = hsw_vebox_put_irq;
|
||||
ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
|
||||
}
|
||||
ring->sync_to = gen6_ring_sync;
|
||||
ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_VER;
|
||||
ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_VEV;
|
||||
|
@ -260,14 +260,14 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
|
||||
if (obj->tiling_mode != I915_TILING_NONE)
|
||||
sprctl |= SPRITE_TILED;
|
||||
|
||||
if (IS_HASWELL(dev))
|
||||
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
|
||||
sprctl &= ~SPRITE_TRICKLE_FEED_DISABLE;
|
||||
else
|
||||
sprctl |= SPRITE_TRICKLE_FEED_DISABLE;
|
||||
|
||||
sprctl |= SPRITE_ENABLE;
|
||||
|
||||
if (IS_HASWELL(dev))
|
||||
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
|
||||
sprctl |= SPRITE_PIPE_CSC_ENABLE;
|
||||
|
||||
intel_update_sprite_watermarks(plane, crtc, src_w, pixel_size, true,
|
||||
@ -306,7 +306,7 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
|
||||
|
||||
/* HSW consolidates SPRTILEOFF and SPRLINOFF into a single SPROFFSET
|
||||
* register */
|
||||
if (IS_HASWELL(dev))
|
||||
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
|
||||
I915_WRITE(SPROFFSET(pipe), (y << 16) | x);
|
||||
else if (obj->tiling_mode != I915_TILING_NONE)
|
||||
I915_WRITE(SPRTILEOFF(pipe), (y << 16) | x);
|
||||
@ -955,7 +955,7 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
|
||||
|
||||
obj = drm_mode_object_find(dev, set->plane_id, DRM_MODE_OBJECT_PLANE);
|
||||
if (!obj) {
|
||||
ret = -EINVAL;
|
||||
ret = -ENOENT;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
@ -984,7 +984,7 @@ int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
|
||||
|
||||
obj = drm_mode_object_find(dev, get->plane_id, DRM_MODE_OBJECT_PLANE);
|
||||
if (!obj) {
|
||||
ret = -EINVAL;
|
||||
ret = -ENOENT;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
@ -1092,6 +1092,7 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
|
||||
break;
|
||||
|
||||
case 7:
|
||||
case 8:
|
||||
if (IS_IVYBRIDGE(dev)) {
|
||||
intel_plane->can_scale = true;
|
||||
intel_plane->max_downscale = 2;
|
||||
|
@ -93,7 +93,7 @@ static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
u32 forcewake_ack;
|
||||
|
||||
if (IS_HASWELL(dev_priv->dev))
|
||||
if (IS_HASWELL(dev_priv->dev) || IS_GEN8(dev_priv->dev))
|
||||
forcewake_ack = FORCEWAKE_ACK_HSW;
|
||||
else
|
||||
forcewake_ack = FORCEWAKE_MT_ACK;
|
||||
@ -112,7 +112,8 @@ static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
|
||||
DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
|
||||
|
||||
/* WaRsForcewakeWaitTC0:ivb,hsw */
|
||||
__gen6_gt_wait_for_thread_c0(dev_priv);
|
||||
if (INTEL_INFO(dev_priv->dev)->gen < 8)
|
||||
__gen6_gt_wait_for_thread_c0(dev_priv);
|
||||
}
|
||||
|
||||
static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
|
||||
@ -459,6 +460,46 @@ hsw_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace)
|
||||
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
|
||||
}
|
||||
|
||||
static const u32 gen8_shadowed_regs[] = {
|
||||
FORCEWAKE_MT,
|
||||
GEN6_RPNSWREQ,
|
||||
GEN6_RC_VIDEO_FREQ,
|
||||
RING_TAIL(RENDER_RING_BASE),
|
||||
RING_TAIL(GEN6_BSD_RING_BASE),
|
||||
RING_TAIL(VEBOX_RING_BASE),
|
||||
RING_TAIL(BLT_RING_BASE),
|
||||
/* TODO: Other registers are not yet used */
|
||||
};
|
||||
|
||||
static bool is_gen8_shadowed(struct drm_i915_private *dev_priv, u32 reg)
|
||||
{
|
||||
int i;
|
||||
for (i = 0; i < ARRAY_SIZE(gen8_shadowed_regs); i++)
|
||||
if (reg == gen8_shadowed_regs[i])
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
#define __gen8_write(x) \
|
||||
static void \
|
||||
gen8_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
|
||||
bool __needs_put = !is_gen8_shadowed(dev_priv, reg); \
|
||||
REG_WRITE_HEADER; \
|
||||
if (__needs_put) { \
|
||||
dev_priv->uncore.funcs.force_wake_get(dev_priv); \
|
||||
} \
|
||||
__raw_i915_write##x(dev_priv, reg, val); \
|
||||
if (__needs_put) { \
|
||||
dev_priv->uncore.funcs.force_wake_put(dev_priv); \
|
||||
} \
|
||||
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
|
||||
}
|
||||
|
||||
__gen8_write(8)
|
||||
__gen8_write(16)
|
||||
__gen8_write(32)
|
||||
__gen8_write(64)
|
||||
__hsw_write(8)
|
||||
__hsw_write(16)
|
||||
__hsw_write(32)
|
||||
@ -476,6 +517,7 @@ __gen4_write(16)
|
||||
__gen4_write(32)
|
||||
__gen4_write(64)
|
||||
|
||||
#undef __gen8_write
|
||||
#undef __hsw_write
|
||||
#undef __gen6_write
|
||||
#undef __gen5_write
|
||||
@ -492,7 +534,7 @@ void intel_uncore_init(struct drm_device *dev)
|
||||
if (IS_VALLEYVIEW(dev)) {
|
||||
dev_priv->uncore.funcs.force_wake_get = vlv_force_wake_get;
|
||||
dev_priv->uncore.funcs.force_wake_put = vlv_force_wake_put;
|
||||
} else if (IS_HASWELL(dev)) {
|
||||
} else if (IS_HASWELL(dev) || IS_GEN8(dev)) {
|
||||
dev_priv->uncore.funcs.force_wake_get = __gen6_gt_force_wake_mt_get;
|
||||
dev_priv->uncore.funcs.force_wake_put = __gen6_gt_force_wake_mt_put;
|
||||
} else if (IS_IVYBRIDGE(dev)) {
|
||||
@ -534,6 +576,16 @@ void intel_uncore_init(struct drm_device *dev)
|
||||
}
|
||||
|
||||
switch (INTEL_INFO(dev)->gen) {
|
||||
default:
|
||||
dev_priv->uncore.funcs.mmio_writeb = gen8_write8;
|
||||
dev_priv->uncore.funcs.mmio_writew = gen8_write16;
|
||||
dev_priv->uncore.funcs.mmio_writel = gen8_write32;
|
||||
dev_priv->uncore.funcs.mmio_writeq = gen8_write64;
|
||||
dev_priv->uncore.funcs.mmio_readb = gen6_read8;
|
||||
dev_priv->uncore.funcs.mmio_readw = gen6_read16;
|
||||
dev_priv->uncore.funcs.mmio_readl = gen6_read32;
|
||||
dev_priv->uncore.funcs.mmio_readq = gen6_read64;
|
||||
break;
|
||||
case 7:
|
||||
case 6:
|
||||
if (IS_HASWELL(dev)) {
|
||||
@ -767,6 +819,7 @@ static int gen6_do_reset(struct drm_device *dev)
|
||||
int intel_gpu_reset(struct drm_device *dev)
|
||||
{
|
||||
switch (INTEL_INFO(dev)->gen) {
|
||||
case 8:
|
||||
case 7:
|
||||
case 6: return gen6_do_reset(dev);
|
||||
case 5: return ironlake_do_reset(dev);
|
||||
|
@ -765,8 +765,6 @@ static int mga_crtc_do_set_base(struct drm_crtc *crtc,
|
||||
}
|
||||
mgag200_bo_unreserve(bo);
|
||||
|
||||
DRM_INFO("mga base %llx\n", gpu_addr);
|
||||
|
||||
mga_set_start_address(crtc, (u32)gpu_addr);
|
||||
|
||||
return 0;
|
||||
|
@ -21,6 +21,7 @@ msm-y := \
|
||||
msm_drv.o \
|
||||
msm_fb.o \
|
||||
msm_gem.o \
|
||||
msm_gem_prime.o \
|
||||
msm_gem_submit.o \
|
||||
msm_gpu.o \
|
||||
msm_ringbuffer.o
|
||||
|
@ -4,16 +4,16 @@
|
||||
/* Autogenerated file, DO NOT EDIT manually!
|
||||
|
||||
This file was generated by the rules-ng-ng headergen tool in this git repository:
|
||||
http://0x04.net/cgit/index.cgi/rules-ng-ng
|
||||
git clone git://0x04.net/rules-ng-ng
|
||||
http://github.com/freedreno/envytools/
|
||||
git clone https://github.com/freedreno/envytools.git
|
||||
|
||||
The rules-ng-ng source files this header was generated from are:
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 327 bytes, from 2013-07-05 19:21:12)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/a2xx/a2xx.xml ( 30005 bytes, from 2013-07-19 21:30:48)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/a2xx/a2xx.xml ( 31003 bytes, from 2013-09-19 18:50:16)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/adreno_common.xml ( 8983 bytes, from 2013-07-24 01:38:36)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/adreno_pm4.xml ( 9712 bytes, from 2013-05-26 15:22:37)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/a3xx/a3xx.xml ( 51415 bytes, from 2013-08-03 14:26:05)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/adreno_pm4.xml ( 9759 bytes, from 2013-09-10 00:52:33)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/a3xx/a3xx.xml ( 51983 bytes, from 2013-09-10 00:52:32)
|
||||
|
||||
Copyright (C) 2013 by the following authors:
|
||||
- Rob Clark <robdclark@gmail.com> (robclark)
|
||||
@ -317,6 +317,38 @@ static inline uint32_t A2XX_RBBM_STATUS_CMDFIFO_AVAIL(uint32_t val)
|
||||
#define A2XX_RBBM_STATUS_RB_CNTX_BUSY 0x40000000
|
||||
#define A2XX_RBBM_STATUS_GUI_ACTIVE 0x80000000
|
||||
|
||||
#define REG_A2XX_MH_ARBITER_CONFIG 0x00000a40
|
||||
#define A2XX_MH_ARBITER_CONFIG_SAME_PAGE_LIMIT__MASK 0x0000003f
|
||||
#define A2XX_MH_ARBITER_CONFIG_SAME_PAGE_LIMIT__SHIFT 0
|
||||
static inline uint32_t A2XX_MH_ARBITER_CONFIG_SAME_PAGE_LIMIT(uint32_t val)
|
||||
{
|
||||
return ((val) << A2XX_MH_ARBITER_CONFIG_SAME_PAGE_LIMIT__SHIFT) & A2XX_MH_ARBITER_CONFIG_SAME_PAGE_LIMIT__MASK;
|
||||
}
|
||||
#define A2XX_MH_ARBITER_CONFIG_SAME_PAGE_GRANULARITY 0x00000040
|
||||
#define A2XX_MH_ARBITER_CONFIG_L1_ARB_ENABLE 0x00000080
|
||||
#define A2XX_MH_ARBITER_CONFIG_L1_ARB_HOLD_ENABLE 0x00000100
|
||||
#define A2XX_MH_ARBITER_CONFIG_L2_ARB_CONTROL 0x00000200
|
||||
#define A2XX_MH_ARBITER_CONFIG_PAGE_SIZE__MASK 0x00001c00
|
||||
#define A2XX_MH_ARBITER_CONFIG_PAGE_SIZE__SHIFT 10
|
||||
static inline uint32_t A2XX_MH_ARBITER_CONFIG_PAGE_SIZE(uint32_t val)
|
||||
{
|
||||
return ((val) << A2XX_MH_ARBITER_CONFIG_PAGE_SIZE__SHIFT) & A2XX_MH_ARBITER_CONFIG_PAGE_SIZE__MASK;
|
||||
}
|
||||
#define A2XX_MH_ARBITER_CONFIG_TC_REORDER_ENABLE 0x00002000
|
||||
#define A2XX_MH_ARBITER_CONFIG_TC_ARB_HOLD_ENABLE 0x00004000
|
||||
#define A2XX_MH_ARBITER_CONFIG_IN_FLIGHT_LIMIT_ENABLE 0x00008000
|
||||
#define A2XX_MH_ARBITER_CONFIG_IN_FLIGHT_LIMIT__MASK 0x003f0000
|
||||
#define A2XX_MH_ARBITER_CONFIG_IN_FLIGHT_LIMIT__SHIFT 16
|
||||
static inline uint32_t A2XX_MH_ARBITER_CONFIG_IN_FLIGHT_LIMIT(uint32_t val)
|
||||
{
|
||||
return ((val) << A2XX_MH_ARBITER_CONFIG_IN_FLIGHT_LIMIT__SHIFT) & A2XX_MH_ARBITER_CONFIG_IN_FLIGHT_LIMIT__MASK;
|
||||
}
|
||||
#define A2XX_MH_ARBITER_CONFIG_CP_CLNT_ENABLE 0x00400000
|
||||
#define A2XX_MH_ARBITER_CONFIG_VGT_CLNT_ENABLE 0x00800000
|
||||
#define A2XX_MH_ARBITER_CONFIG_TC_CLNT_ENABLE 0x01000000
|
||||
#define A2XX_MH_ARBITER_CONFIG_RB_CLNT_ENABLE 0x02000000
|
||||
#define A2XX_MH_ARBITER_CONFIG_PA_CLNT_ENABLE 0x04000000
|
||||
|
||||
#define REG_A2XX_A220_VSC_BIN_SIZE 0x00000c01
|
||||
#define A2XX_A220_VSC_BIN_SIZE_WIDTH__MASK 0x0000001f
|
||||
#define A2XX_A220_VSC_BIN_SIZE_WIDTH__SHIFT 0
|
||||
|
@ -4,16 +4,16 @@
|
||||
/* Autogenerated file, DO NOT EDIT manually!
|
||||
|
||||
This file was generated by the rules-ng-ng headergen tool in this git repository:
|
||||
http://0x04.net/cgit/index.cgi/rules-ng-ng
|
||||
git clone git://0x04.net/rules-ng-ng
|
||||
http://github.com/freedreno/envytools/
|
||||
git clone https://github.com/freedreno/envytools.git
|
||||
|
||||
The rules-ng-ng source files this header was generated from are:
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 327 bytes, from 2013-07-05 19:21:12)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/a2xx/a2xx.xml ( 30005 bytes, from 2013-07-19 21:30:48)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/a2xx/a2xx.xml ( 31003 bytes, from 2013-09-19 18:50:16)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/adreno_common.xml ( 8983 bytes, from 2013-07-24 01:38:36)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/adreno_pm4.xml ( 9712 bytes, from 2013-05-26 15:22:37)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/a3xx/a3xx.xml ( 51415 bytes, from 2013-08-03 14:26:05)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/adreno_pm4.xml ( 9759 bytes, from 2013-09-10 00:52:33)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/a3xx/a3xx.xml ( 51983 bytes, from 2013-09-10 00:52:32)
|
||||
|
||||
Copyright (C) 2013 by the following authors:
|
||||
- Rob Clark <robdclark@gmail.com> (robclark)
|
||||
@ -637,11 +637,12 @@ static inline uint32_t A3XX_GRAS_SU_POLY_OFFSET_OFFSET(float val)
|
||||
#define REG_A3XX_GRAS_SU_MODE_CONTROL 0x00002070
|
||||
#define A3XX_GRAS_SU_MODE_CONTROL_CULL_FRONT 0x00000001
|
||||
#define A3XX_GRAS_SU_MODE_CONTROL_CULL_BACK 0x00000002
|
||||
#define A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__MASK 0x000007fc
|
||||
#define A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__SHIFT 2
|
||||
static inline uint32_t A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH(uint32_t val)
|
||||
#define A3XX_GRAS_SU_MODE_CONTROL_FRONT_CW 0x00000004
|
||||
#define A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__MASK 0x000007f8
|
||||
#define A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__SHIFT 3
|
||||
static inline uint32_t A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH(float val)
|
||||
{
|
||||
return ((val) << A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__SHIFT) & A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__MASK;
|
||||
return ((((uint32_t)(val * 4.0))) << A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__SHIFT) & A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__MASK;
|
||||
}
|
||||
#define A3XX_GRAS_SU_MODE_CONTROL_POLY_OFFSET 0x00000800
|
||||
|
||||
@ -745,6 +746,7 @@ static inline uint32_t A3XX_RB_RENDER_CONTROL_BIN_WIDTH(uint32_t val)
|
||||
}
|
||||
#define A3XX_RB_RENDER_CONTROL_DISABLE_COLOR_PIPE 0x00001000
|
||||
#define A3XX_RB_RENDER_CONTROL_ENABLE_GMEM 0x00002000
|
||||
#define A3XX_RB_RENDER_CONTROL_ALPHA_TEST 0x00400000
|
||||
#define A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC__MASK 0x07000000
|
||||
#define A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC__SHIFT 24
|
||||
static inline uint32_t A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC(enum adreno_compare_func val)
|
||||
@ -767,7 +769,19 @@ static inline uint32_t A3XX_RB_MSAA_CONTROL_SAMPLE_MASK(uint32_t val)
|
||||
return ((val) << A3XX_RB_MSAA_CONTROL_SAMPLE_MASK__SHIFT) & A3XX_RB_MSAA_CONTROL_SAMPLE_MASK__MASK;
|
||||
}
|
||||
|
||||
#define REG_A3XX_UNKNOWN_20C3 0x000020c3
|
||||
#define REG_A3XX_RB_ALPHA_REF 0x000020c3
|
||||
#define A3XX_RB_ALPHA_REF_UINT__MASK 0x0000ff00
|
||||
#define A3XX_RB_ALPHA_REF_UINT__SHIFT 8
|
||||
static inline uint32_t A3XX_RB_ALPHA_REF_UINT(uint32_t val)
|
||||
{
|
||||
return ((val) << A3XX_RB_ALPHA_REF_UINT__SHIFT) & A3XX_RB_ALPHA_REF_UINT__MASK;
|
||||
}
|
||||
#define A3XX_RB_ALPHA_REF_FLOAT__MASK 0xffff0000
|
||||
#define A3XX_RB_ALPHA_REF_FLOAT__SHIFT 16
|
||||
static inline uint32_t A3XX_RB_ALPHA_REF_FLOAT(float val)
|
||||
{
|
||||
return ((util_float_to_half(val)) << A3XX_RB_ALPHA_REF_FLOAT__SHIFT) & A3XX_RB_ALPHA_REF_FLOAT__MASK;
|
||||
}
|
||||
|
||||
static inline uint32_t REG_A3XX_RB_MRT(uint32_t i0) { return 0x000020c4 + 0x4*i0; }
|
||||
|
||||
@ -1002,7 +1016,7 @@ static inline uint32_t A3XX_RB_COPY_DEST_INFO_ENDIAN(enum adreno_rb_surface_endi
|
||||
#define REG_A3XX_RB_DEPTH_CONTROL 0x00002100
|
||||
#define A3XX_RB_DEPTH_CONTROL_Z_ENABLE 0x00000002
|
||||
#define A3XX_RB_DEPTH_CONTROL_Z_WRITE_ENABLE 0x00000004
|
||||
#define A3XX_RB_DEPTH_CONTROL_EARLY_Z_ENABLE 0x00000008
|
||||
#define A3XX_RB_DEPTH_CONTROL_EARLY_Z_DISABLE 0x00000008
|
||||
#define A3XX_RB_DEPTH_CONTROL_ZFUNC__MASK 0x00000070
|
||||
#define A3XX_RB_DEPTH_CONTROL_ZFUNC__SHIFT 4
|
||||
static inline uint32_t A3XX_RB_DEPTH_CONTROL_ZFUNC(enum adreno_compare_func val)
|
||||
@ -1038,7 +1052,8 @@ static inline uint32_t A3XX_RB_DEPTH_PITCH(uint32_t val)
|
||||
|
||||
#define REG_A3XX_RB_STENCIL_CONTROL 0x00002104
|
||||
#define A3XX_RB_STENCIL_CONTROL_STENCIL_ENABLE 0x00000001
|
||||
#define A3XX_RB_STENCIL_CONTROL_STENCIL_ENABLE_BF 0x00000004
|
||||
#define A3XX_RB_STENCIL_CONTROL_STENCIL_ENABLE_BF 0x00000002
|
||||
#define A3XX_RB_STENCIL_CONTROL_STENCIL_READ 0x00000004
|
||||
#define A3XX_RB_STENCIL_CONTROL_FUNC__MASK 0x00000700
|
||||
#define A3XX_RB_STENCIL_CONTROL_FUNC__SHIFT 8
|
||||
static inline uint32_t A3XX_RB_STENCIL_CONTROL_FUNC(enum adreno_compare_func val)
|
||||
@ -2074,6 +2089,7 @@ static inline uint32_t A3XX_UCHE_CACHE_INVALIDATE1_REG_OPCODE(enum a3xx_cache_op
|
||||
#define REG_A3XX_TP_PERFCOUNTER5_SELECT 0x00000f09
|
||||
|
||||
#define REG_A3XX_TEX_SAMP_0 0x00000000
|
||||
#define A3XX_TEX_SAMP_0_MIPFILTER_LINEAR 0x00000002
|
||||
#define A3XX_TEX_SAMP_0_XY_MAG__MASK 0x0000000c
|
||||
#define A3XX_TEX_SAMP_0_XY_MAG__SHIFT 2
|
||||
static inline uint32_t A3XX_TEX_SAMP_0_XY_MAG(enum a3xx_tex_filter val)
|
||||
@ -2134,6 +2150,12 @@ static inline uint32_t A3XX_TEX_CONST_0_SWIZ_W(enum a3xx_tex_swiz val)
|
||||
{
|
||||
return ((val) << A3XX_TEX_CONST_0_SWIZ_W__SHIFT) & A3XX_TEX_CONST_0_SWIZ_W__MASK;
|
||||
}
|
||||
#define A3XX_TEX_CONST_0_MIPLVLS__MASK 0x000f0000
|
||||
#define A3XX_TEX_CONST_0_MIPLVLS__SHIFT 16
|
||||
static inline uint32_t A3XX_TEX_CONST_0_MIPLVLS(uint32_t val)
|
||||
{
|
||||
return ((val) << A3XX_TEX_CONST_0_MIPLVLS__SHIFT) & A3XX_TEX_CONST_0_MIPLVLS__MASK;
|
||||
}
|
||||
#define A3XX_TEX_CONST_0_FMT__MASK 0x1fc00000
|
||||
#define A3XX_TEX_CONST_0_FMT__SHIFT 22
|
||||
static inline uint32_t A3XX_TEX_CONST_0_FMT(enum a3xx_tex_fmt val)
|
||||
|
@ -4,16 +4,16 @@
|
||||
/* Autogenerated file, DO NOT EDIT manually!
|
||||
|
||||
This file was generated by the rules-ng-ng headergen tool in this git repository:
|
||||
http://0x04.net/cgit/index.cgi/rules-ng-ng
|
||||
git clone git://0x04.net/rules-ng-ng
|
||||
http://github.com/freedreno/envytools/
|
||||
git clone https://github.com/freedreno/envytools.git
|
||||
|
||||
The rules-ng-ng source files this header was generated from are:
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 327 bytes, from 2013-07-05 19:21:12)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/a2xx/a2xx.xml ( 30005 bytes, from 2013-07-19 21:30:48)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/a2xx/a2xx.xml ( 31003 bytes, from 2013-09-19 18:50:16)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/adreno_common.xml ( 8983 bytes, from 2013-07-24 01:38:36)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/adreno_pm4.xml ( 9712 bytes, from 2013-05-26 15:22:37)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/a3xx/a3xx.xml ( 51415 bytes, from 2013-08-03 14:26:05)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/adreno_pm4.xml ( 9759 bytes, from 2013-09-10 00:52:33)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/a3xx/a3xx.xml ( 51983 bytes, from 2013-09-10 00:52:32)
|
||||
|
||||
Copyright (C) 2013 by the following authors:
|
||||
- Rob Clark <robdclark@gmail.com> (robclark)
|
||||
|
@ -4,16 +4,16 @@
|
||||
/* Autogenerated file, DO NOT EDIT manually!
|
||||
|
||||
This file was generated by the rules-ng-ng headergen tool in this git repository:
|
||||
http://0x04.net/cgit/index.cgi/rules-ng-ng
|
||||
git clone git://0x04.net/rules-ng-ng
|
||||
http://github.com/freedreno/envytools/
|
||||
git clone https://github.com/freedreno/envytools.git
|
||||
|
||||
The rules-ng-ng source files this header was generated from are:
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 327 bytes, from 2013-07-05 19:21:12)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/a2xx/a2xx.xml ( 30005 bytes, from 2013-07-19 21:30:48)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/a2xx/a2xx.xml ( 31003 bytes, from 2013-09-19 18:50:16)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/adreno_common.xml ( 8983 bytes, from 2013-07-24 01:38:36)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/adreno_pm4.xml ( 9712 bytes, from 2013-05-26 15:22:37)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/a3xx/a3xx.xml ( 51415 bytes, from 2013-08-03 14:26:05)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/adreno_pm4.xml ( 9759 bytes, from 2013-09-10 00:52:33)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/a3xx/a3xx.xml ( 51983 bytes, from 2013-09-10 00:52:32)
|
||||
|
||||
Copyright (C) 2013 by the following authors:
|
||||
- Rob Clark <robdclark@gmail.com> (robclark)
|
||||
|
@ -4,13 +4,13 @@
|
||||
/* Autogenerated file, DO NOT EDIT manually!
|
||||
|
||||
This file was generated by the rules-ng-ng headergen tool in this git repository:
|
||||
http://0x04.net/cgit/index.cgi/rules-ng-ng
|
||||
git clone git://0x04.net/rules-ng-ng
|
||||
http://github.com/freedreno/envytools/
|
||||
git clone https://github.com/freedreno/envytools.git
|
||||
|
||||
The rules-ng-ng source files this header was generated from are:
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 595 bytes, from 2013-07-05 19:21:12)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-08-16 22:16:36)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-10-07 16:36:48)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
|
||||
|
@ -4,13 +4,13 @@
|
||||
/* Autogenerated file, DO NOT EDIT manually!
|
||||
|
||||
This file was generated by the rules-ng-ng headergen tool in this git repository:
|
||||
http://0x04.net/cgit/index.cgi/rules-ng-ng
|
||||
git clone git://0x04.net/rules-ng-ng
|
||||
http://github.com/freedreno/envytools/
|
||||
git clone https://github.com/freedreno/envytools.git
|
||||
|
||||
The rules-ng-ng source files this header was generated from are:
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 595 bytes, from 2013-07-05 19:21:12)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-08-16 22:16:36)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-10-07 16:36:48)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
|
||||
|
@ -4,13 +4,13 @@
|
||||
/* Autogenerated file, DO NOT EDIT manually!
|
||||
|
||||
This file was generated by the rules-ng-ng headergen tool in this git repository:
|
||||
http://0x04.net/cgit/index.cgi/rules-ng-ng
|
||||
git clone git://0x04.net/rules-ng-ng
|
||||
http://github.com/freedreno/envytools/
|
||||
git clone https://github.com/freedreno/envytools.git
|
||||
|
||||
The rules-ng-ng source files this header was generated from are:
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 595 bytes, from 2013-07-05 19:21:12)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-08-16 22:16:36)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-10-07 16:36:48)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
|
||||
|
@ -4,13 +4,13 @@
|
||||
/* Autogenerated file, DO NOT EDIT manually!
|
||||
|
||||
This file was generated by the rules-ng-ng headergen tool in this git repository:
|
||||
http://0x04.net/cgit/index.cgi/rules-ng-ng
|
||||
git clone git://0x04.net/rules-ng-ng
|
||||
http://github.com/freedreno/envytools/
|
||||
git clone https://github.com/freedreno/envytools.git
|
||||
|
||||
The rules-ng-ng source files this header was generated from are:
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 595 bytes, from 2013-07-05 19:21:12)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-08-16 22:16:36)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-10-07 16:36:48)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
|
||||
|
@ -4,13 +4,13 @@
|
||||
/* Autogenerated file, DO NOT EDIT manually!
|
||||
|
||||
This file was generated by the rules-ng-ng headergen tool in this git repository:
|
||||
http://0x04.net/cgit/index.cgi/rules-ng-ng
|
||||
git clone git://0x04.net/rules-ng-ng
|
||||
http://github.com/freedreno/envytools/
|
||||
git clone https://github.com/freedreno/envytools.git
|
||||
|
||||
The rules-ng-ng source files this header was generated from are:
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 595 bytes, from 2013-07-05 19:21:12)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-08-16 22:16:36)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-10-07 16:36:48)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
|
||||
|
@ -4,13 +4,13 @@
|
||||
/* Autogenerated file, DO NOT EDIT manually!
|
||||
|
||||
This file was generated by the rules-ng-ng headergen tool in this git repository:
|
||||
http://0x04.net/cgit/index.cgi/rules-ng-ng
|
||||
git clone git://0x04.net/rules-ng-ng
|
||||
http://github.com/freedreno/envytools/
|
||||
git clone https://github.com/freedreno/envytools.git
|
||||
|
||||
The rules-ng-ng source files this header was generated from are:
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 595 bytes, from 2013-07-05 19:21:12)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-08-16 22:16:36)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-10-07 16:36:48)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
|
||||
@ -42,28 +42,28 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
|
||||
enum mpd4_bpc {
|
||||
enum mdp4_bpc {
|
||||
BPC1 = 0,
|
||||
BPC5 = 1,
|
||||
BPC6 = 2,
|
||||
BPC8 = 3,
|
||||
};
|
||||
|
||||
enum mpd4_bpc_alpha {
|
||||
enum mdp4_bpc_alpha {
|
||||
BPC1A = 0,
|
||||
BPC4A = 1,
|
||||
BPC6A = 2,
|
||||
BPC8A = 3,
|
||||
};
|
||||
|
||||
enum mpd4_alpha_type {
|
||||
enum mdp4_alpha_type {
|
||||
FG_CONST = 0,
|
||||
BG_CONST = 1,
|
||||
FG_PIXEL = 2,
|
||||
BG_PIXEL = 3,
|
||||
};
|
||||
|
||||
enum mpd4_pipe {
|
||||
enum mdp4_pipe {
|
||||
VG1 = 0,
|
||||
VG2 = 1,
|
||||
RGB1 = 2,
|
||||
@ -73,13 +73,13 @@ enum mpd4_pipe {
|
||||
VG4 = 6,
|
||||
};
|
||||
|
||||
enum mpd4_mixer {
|
||||
enum mdp4_mixer {
|
||||
MIXER0 = 0,
|
||||
MIXER1 = 1,
|
||||
MIXER2 = 2,
|
||||
};
|
||||
|
||||
enum mpd4_mixer_stage_id {
|
||||
enum mdp4_mixer_stage_id {
|
||||
STAGE_UNUSED = 0,
|
||||
STAGE_BASE = 1,
|
||||
STAGE0 = 2,
|
||||
@ -194,56 +194,56 @@ static inline uint32_t MDP4_DISP_INTF_SEL_EXT(enum mdp4_intf val)
|
||||
#define REG_MDP4_LAYERMIXER2_IN_CFG 0x000100f0
|
||||
#define MDP4_LAYERMIXER2_IN_CFG_PIPE0__MASK 0x00000007
|
||||
#define MDP4_LAYERMIXER2_IN_CFG_PIPE0__SHIFT 0
|
||||
static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE0(enum mpd4_mixer_stage_id val)
|
||||
static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE0(enum mdp4_mixer_stage_id val)
|
||||
{
|
||||
return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE0__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE0__MASK;
|
||||
}
|
||||
#define MDP4_LAYERMIXER2_IN_CFG_PIPE0_MIXER1 0x00000008
|
||||
#define MDP4_LAYERMIXER2_IN_CFG_PIPE1__MASK 0x00000070
|
||||
#define MDP4_LAYERMIXER2_IN_CFG_PIPE1__SHIFT 4
|
||||
static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE1(enum mpd4_mixer_stage_id val)
|
||||
static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE1(enum mdp4_mixer_stage_id val)
|
||||
{
|
||||
return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE1__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE1__MASK;
|
||||
}
|
||||
#define MDP4_LAYERMIXER2_IN_CFG_PIPE1_MIXER1 0x00000080
|
||||
#define MDP4_LAYERMIXER2_IN_CFG_PIPE2__MASK 0x00000700
|
||||
#define MDP4_LAYERMIXER2_IN_CFG_PIPE2__SHIFT 8
|
||||
static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE2(enum mpd4_mixer_stage_id val)
|
||||
static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE2(enum mdp4_mixer_stage_id val)
|
||||
{
|
||||
return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE2__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE2__MASK;
|
||||
}
|
||||
#define MDP4_LAYERMIXER2_IN_CFG_PIPE2_MIXER1 0x00000800
|
||||
#define MDP4_LAYERMIXER2_IN_CFG_PIPE3__MASK 0x00007000
|
||||
#define MDP4_LAYERMIXER2_IN_CFG_PIPE3__SHIFT 12
|
||||
static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE3(enum mpd4_mixer_stage_id val)
|
||||
static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE3(enum mdp4_mixer_stage_id val)
|
||||
{
|
||||
return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE3__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE3__MASK;
|
||||
}
|
||||
#define MDP4_LAYERMIXER2_IN_CFG_PIPE3_MIXER1 0x00008000
|
||||
#define MDP4_LAYERMIXER2_IN_CFG_PIPE4__MASK 0x00070000
|
||||
#define MDP4_LAYERMIXER2_IN_CFG_PIPE4__SHIFT 16
|
||||
static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE4(enum mpd4_mixer_stage_id val)
|
||||
static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE4(enum mdp4_mixer_stage_id val)
|
||||
{
|
||||
return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE4__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE4__MASK;
|
||||
}
|
||||
#define MDP4_LAYERMIXER2_IN_CFG_PIPE4_MIXER1 0x00080000
|
||||
#define MDP4_LAYERMIXER2_IN_CFG_PIPE5__MASK 0x00700000
|
||||
#define MDP4_LAYERMIXER2_IN_CFG_PIPE5__SHIFT 20
|
||||
static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE5(enum mpd4_mixer_stage_id val)
|
||||
static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE5(enum mdp4_mixer_stage_id val)
|
||||
{
|
||||
return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE5__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE5__MASK;
|
||||
}
|
||||
#define MDP4_LAYERMIXER2_IN_CFG_PIPE5_MIXER1 0x00800000
|
||||
#define MDP4_LAYERMIXER2_IN_CFG_PIPE6__MASK 0x07000000
|
||||
#define MDP4_LAYERMIXER2_IN_CFG_PIPE6__SHIFT 24
|
||||
static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE6(enum mpd4_mixer_stage_id val)
|
||||
static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE6(enum mdp4_mixer_stage_id val)
|
||||
{
|
||||
return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE6__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE6__MASK;
|
||||
}
|
||||
#define MDP4_LAYERMIXER2_IN_CFG_PIPE6_MIXER1 0x08000000
|
||||
#define MDP4_LAYERMIXER2_IN_CFG_PIPE7__MASK 0x70000000
|
||||
#define MDP4_LAYERMIXER2_IN_CFG_PIPE7__SHIFT 28
|
||||
static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE7(enum mpd4_mixer_stage_id val)
|
||||
static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE7(enum mdp4_mixer_stage_id val)
|
||||
{
|
||||
return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE7__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE7__MASK;
|
||||
}
|
||||
@ -254,56 +254,56 @@ static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE7(enum mpd4_mixer_stage_id va
|
||||
#define REG_MDP4_LAYERMIXER_IN_CFG 0x00010100
|
||||
#define MDP4_LAYERMIXER_IN_CFG_PIPE0__MASK 0x00000007
|
||||
#define MDP4_LAYERMIXER_IN_CFG_PIPE0__SHIFT 0
|
||||
static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE0(enum mpd4_mixer_stage_id val)
|
||||
static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE0(enum mdp4_mixer_stage_id val)
|
||||
{
|
||||
return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE0__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE0__MASK;
|
||||
}
|
||||
#define MDP4_LAYERMIXER_IN_CFG_PIPE0_MIXER1 0x00000008
|
||||
#define MDP4_LAYERMIXER_IN_CFG_PIPE1__MASK 0x00000070
|
||||
#define MDP4_LAYERMIXER_IN_CFG_PIPE1__SHIFT 4
|
||||
static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE1(enum mpd4_mixer_stage_id val)
|
||||
static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE1(enum mdp4_mixer_stage_id val)
|
||||
{
|
||||
return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE1__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE1__MASK;
|
||||
}
|
||||
#define MDP4_LAYERMIXER_IN_CFG_PIPE1_MIXER1 0x00000080
|
||||
#define MDP4_LAYERMIXER_IN_CFG_PIPE2__MASK 0x00000700
|
||||
#define MDP4_LAYERMIXER_IN_CFG_PIPE2__SHIFT 8
|
||||
static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE2(enum mpd4_mixer_stage_id val)
|
||||
static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE2(enum mdp4_mixer_stage_id val)
|
||||
{
|
||||
return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE2__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE2__MASK;
|
||||
}
|
||||
#define MDP4_LAYERMIXER_IN_CFG_PIPE2_MIXER1 0x00000800
|
||||
#define MDP4_LAYERMIXER_IN_CFG_PIPE3__MASK 0x00007000
|
||||
#define MDP4_LAYERMIXER_IN_CFG_PIPE3__SHIFT 12
|
||||
static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE3(enum mpd4_mixer_stage_id val)
|
||||
static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE3(enum mdp4_mixer_stage_id val)
|
||||
{
|
||||
return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE3__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE3__MASK;
|
||||
}
|
||||
#define MDP4_LAYERMIXER_IN_CFG_PIPE3_MIXER1 0x00008000
|
||||
#define MDP4_LAYERMIXER_IN_CFG_PIPE4__MASK 0x00070000
|
||||
#define MDP4_LAYERMIXER_IN_CFG_PIPE4__SHIFT 16
|
||||
static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE4(enum mpd4_mixer_stage_id val)
|
||||
static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE4(enum mdp4_mixer_stage_id val)
|
||||
{
|
||||
return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE4__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE4__MASK;
|
||||
}
|
||||
#define MDP4_LAYERMIXER_IN_CFG_PIPE4_MIXER1 0x00080000
|
||||
#define MDP4_LAYERMIXER_IN_CFG_PIPE5__MASK 0x00700000
|
||||
#define MDP4_LAYERMIXER_IN_CFG_PIPE5__SHIFT 20
|
||||
static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE5(enum mpd4_mixer_stage_id val)
|
||||
static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE5(enum mdp4_mixer_stage_id val)
|
||||
{
|
||||
return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE5__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE5__MASK;
|
||||
}
|
||||
#define MDP4_LAYERMIXER_IN_CFG_PIPE5_MIXER1 0x00800000
|
||||
#define MDP4_LAYERMIXER_IN_CFG_PIPE6__MASK 0x07000000
|
||||
#define MDP4_LAYERMIXER_IN_CFG_PIPE6__SHIFT 24
|
||||
static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE6(enum mpd4_mixer_stage_id val)
|
||||
static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE6(enum mdp4_mixer_stage_id val)
|
||||
{
|
||||
return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE6__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE6__MASK;
|
||||
}
|
||||
#define MDP4_LAYERMIXER_IN_CFG_PIPE6_MIXER1 0x08000000
|
||||
#define MDP4_LAYERMIXER_IN_CFG_PIPE7__MASK 0x70000000
|
||||
#define MDP4_LAYERMIXER_IN_CFG_PIPE7__SHIFT 28
|
||||
static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE7(enum mpd4_mixer_stage_id val)
|
||||
static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE7(enum mdp4_mixer_stage_id val)
|
||||
{
|
||||
return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE7__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE7__MASK;
|
||||
}
|
||||
@ -369,7 +369,7 @@ static inline uint32_t REG_MDP4_OVLP_STAGE(uint32_t i0, uint32_t i1) { return 0x
|
||||
static inline uint32_t REG_MDP4_OVLP_STAGE_OP(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_OVLP(i0) + __offset_STAGE(i1); }
|
||||
#define MDP4_OVLP_STAGE_OP_FG_ALPHA__MASK 0x00000003
|
||||
#define MDP4_OVLP_STAGE_OP_FG_ALPHA__SHIFT 0
|
||||
static inline uint32_t MDP4_OVLP_STAGE_OP_FG_ALPHA(enum mpd4_alpha_type val)
|
||||
static inline uint32_t MDP4_OVLP_STAGE_OP_FG_ALPHA(enum mdp4_alpha_type val)
|
||||
{
|
||||
return ((val) << MDP4_OVLP_STAGE_OP_FG_ALPHA__SHIFT) & MDP4_OVLP_STAGE_OP_FG_ALPHA__MASK;
|
||||
}
|
||||
@ -377,7 +377,7 @@ static inline uint32_t MDP4_OVLP_STAGE_OP_FG_ALPHA(enum mpd4_alpha_type val)
|
||||
#define MDP4_OVLP_STAGE_OP_FG_MOD_ALPHA 0x00000008
|
||||
#define MDP4_OVLP_STAGE_OP_BG_ALPHA__MASK 0x00000030
|
||||
#define MDP4_OVLP_STAGE_OP_BG_ALPHA__SHIFT 4
|
||||
static inline uint32_t MDP4_OVLP_STAGE_OP_BG_ALPHA(enum mpd4_alpha_type val)
|
||||
static inline uint32_t MDP4_OVLP_STAGE_OP_BG_ALPHA(enum mdp4_alpha_type val)
|
||||
{
|
||||
return ((val) << MDP4_OVLP_STAGE_OP_BG_ALPHA__SHIFT) & MDP4_OVLP_STAGE_OP_BG_ALPHA__MASK;
|
||||
}
|
||||
@ -472,19 +472,19 @@ static inline uint32_t REG_MDP4_DMA(enum mdp4_dma i0) { return 0x00000000 + __of
|
||||
static inline uint32_t REG_MDP4_DMA_CONFIG(enum mdp4_dma i0) { return 0x00000000 + __offset_DMA(i0); }
|
||||
#define MDP4_DMA_CONFIG_G_BPC__MASK 0x00000003
|
||||
#define MDP4_DMA_CONFIG_G_BPC__SHIFT 0
|
||||
static inline uint32_t MDP4_DMA_CONFIG_G_BPC(enum mpd4_bpc val)
|
||||
static inline uint32_t MDP4_DMA_CONFIG_G_BPC(enum mdp4_bpc val)
|
||||
{
|
||||
return ((val) << MDP4_DMA_CONFIG_G_BPC__SHIFT) & MDP4_DMA_CONFIG_G_BPC__MASK;
|
||||
}
|
||||
#define MDP4_DMA_CONFIG_B_BPC__MASK 0x0000000c
|
||||
#define MDP4_DMA_CONFIG_B_BPC__SHIFT 2
|
||||
static inline uint32_t MDP4_DMA_CONFIG_B_BPC(enum mpd4_bpc val)
|
||||
static inline uint32_t MDP4_DMA_CONFIG_B_BPC(enum mdp4_bpc val)
|
||||
{
|
||||
return ((val) << MDP4_DMA_CONFIG_B_BPC__SHIFT) & MDP4_DMA_CONFIG_B_BPC__MASK;
|
||||
}
|
||||
#define MDP4_DMA_CONFIG_R_BPC__MASK 0x00000030
|
||||
#define MDP4_DMA_CONFIG_R_BPC__SHIFT 4
|
||||
static inline uint32_t MDP4_DMA_CONFIG_R_BPC(enum mpd4_bpc val)
|
||||
static inline uint32_t MDP4_DMA_CONFIG_R_BPC(enum mdp4_bpc val)
|
||||
{
|
||||
return ((val) << MDP4_DMA_CONFIG_R_BPC__SHIFT) & MDP4_DMA_CONFIG_R_BPC__MASK;
|
||||
}
|
||||
@ -601,9 +601,9 @@ static inline uint32_t REG_MDP4_DMA_CSC_POST_LV(enum mdp4_dma i0, uint32_t i1) {
|
||||
|
||||
static inline uint32_t REG_MDP4_DMA_CSC_POST_LV_VAL(enum mdp4_dma i0, uint32_t i1) { return 0x00003680 + __offset_DMA(i0) + 0x4*i1; }
|
||||
|
||||
static inline uint32_t REG_MDP4_PIPE(enum mpd4_pipe i0) { return 0x00020000 + 0x10000*i0; }
|
||||
static inline uint32_t REG_MDP4_PIPE(enum mdp4_pipe i0) { return 0x00020000 + 0x10000*i0; }
|
||||
|
||||
static inline uint32_t REG_MDP4_PIPE_SRC_SIZE(enum mpd4_pipe i0) { return 0x00020000 + 0x10000*i0; }
|
||||
static inline uint32_t REG_MDP4_PIPE_SRC_SIZE(enum mdp4_pipe i0) { return 0x00020000 + 0x10000*i0; }
|
||||
#define MDP4_PIPE_SRC_SIZE_HEIGHT__MASK 0xffff0000
|
||||
#define MDP4_PIPE_SRC_SIZE_HEIGHT__SHIFT 16
|
||||
static inline uint32_t MDP4_PIPE_SRC_SIZE_HEIGHT(uint32_t val)
|
||||
@ -617,7 +617,7 @@ static inline uint32_t MDP4_PIPE_SRC_SIZE_WIDTH(uint32_t val)
|
||||
return ((val) << MDP4_PIPE_SRC_SIZE_WIDTH__SHIFT) & MDP4_PIPE_SRC_SIZE_WIDTH__MASK;
|
||||
}
|
||||
|
||||
static inline uint32_t REG_MDP4_PIPE_SRC_XY(enum mpd4_pipe i0) { return 0x00020004 + 0x10000*i0; }
|
||||
static inline uint32_t REG_MDP4_PIPE_SRC_XY(enum mdp4_pipe i0) { return 0x00020004 + 0x10000*i0; }
|
||||
#define MDP4_PIPE_SRC_XY_Y__MASK 0xffff0000
|
||||
#define MDP4_PIPE_SRC_XY_Y__SHIFT 16
|
||||
static inline uint32_t MDP4_PIPE_SRC_XY_Y(uint32_t val)
|
||||
@ -631,7 +631,7 @@ static inline uint32_t MDP4_PIPE_SRC_XY_X(uint32_t val)
|
||||
return ((val) << MDP4_PIPE_SRC_XY_X__SHIFT) & MDP4_PIPE_SRC_XY_X__MASK;
|
||||
}
|
||||
|
||||
static inline uint32_t REG_MDP4_PIPE_DST_SIZE(enum mpd4_pipe i0) { return 0x00020008 + 0x10000*i0; }
|
||||
static inline uint32_t REG_MDP4_PIPE_DST_SIZE(enum mdp4_pipe i0) { return 0x00020008 + 0x10000*i0; }
|
||||
#define MDP4_PIPE_DST_SIZE_HEIGHT__MASK 0xffff0000
|
||||
#define MDP4_PIPE_DST_SIZE_HEIGHT__SHIFT 16
|
||||
static inline uint32_t MDP4_PIPE_DST_SIZE_HEIGHT(uint32_t val)
|
||||
@ -645,7 +645,7 @@ static inline uint32_t MDP4_PIPE_DST_SIZE_WIDTH(uint32_t val)
|
||||
return ((val) << MDP4_PIPE_DST_SIZE_WIDTH__SHIFT) & MDP4_PIPE_DST_SIZE_WIDTH__MASK;
|
||||
}
|
||||
|
||||
static inline uint32_t REG_MDP4_PIPE_DST_XY(enum mpd4_pipe i0) { return 0x0002000c + 0x10000*i0; }
|
||||
static inline uint32_t REG_MDP4_PIPE_DST_XY(enum mdp4_pipe i0) { return 0x0002000c + 0x10000*i0; }
|
||||
#define MDP4_PIPE_DST_XY_Y__MASK 0xffff0000
|
||||
#define MDP4_PIPE_DST_XY_Y__SHIFT 16
|
||||
static inline uint32_t MDP4_PIPE_DST_XY_Y(uint32_t val)
|
||||
@ -659,13 +659,13 @@ static inline uint32_t MDP4_PIPE_DST_XY_X(uint32_t val)
|
||||
return ((val) << MDP4_PIPE_DST_XY_X__SHIFT) & MDP4_PIPE_DST_XY_X__MASK;
|
||||
}
|
||||
|
||||
static inline uint32_t REG_MDP4_PIPE_SRCP0_BASE(enum mpd4_pipe i0) { return 0x00020010 + 0x10000*i0; }
|
||||
static inline uint32_t REG_MDP4_PIPE_SRCP0_BASE(enum mdp4_pipe i0) { return 0x00020010 + 0x10000*i0; }
|
||||
|
||||
static inline uint32_t REG_MDP4_PIPE_SRCP1_BASE(enum mpd4_pipe i0) { return 0x00020014 + 0x10000*i0; }
|
||||
static inline uint32_t REG_MDP4_PIPE_SRCP1_BASE(enum mdp4_pipe i0) { return 0x00020014 + 0x10000*i0; }
|
||||
|
||||
static inline uint32_t REG_MDP4_PIPE_SRCP2_BASE(enum mpd4_pipe i0) { return 0x00020018 + 0x10000*i0; }
|
||||
static inline uint32_t REG_MDP4_PIPE_SRCP2_BASE(enum mdp4_pipe i0) { return 0x00020018 + 0x10000*i0; }
|
||||
|
||||
static inline uint32_t REG_MDP4_PIPE_SRC_STRIDE_A(enum mpd4_pipe i0) { return 0x00020040 + 0x10000*i0; }
|
||||
static inline uint32_t REG_MDP4_PIPE_SRC_STRIDE_A(enum mdp4_pipe i0) { return 0x00020040 + 0x10000*i0; }
|
||||
#define MDP4_PIPE_SRC_STRIDE_A_P0__MASK 0x0000ffff
|
||||
#define MDP4_PIPE_SRC_STRIDE_A_P0__SHIFT 0
|
||||
static inline uint32_t MDP4_PIPE_SRC_STRIDE_A_P0(uint32_t val)
|
||||
@ -679,7 +679,7 @@ static inline uint32_t MDP4_PIPE_SRC_STRIDE_A_P1(uint32_t val)
|
||||
return ((val) << MDP4_PIPE_SRC_STRIDE_A_P1__SHIFT) & MDP4_PIPE_SRC_STRIDE_A_P1__MASK;
|
||||
}
|
||||
|
||||
static inline uint32_t REG_MDP4_PIPE_SRC_STRIDE_B(enum mpd4_pipe i0) { return 0x00020044 + 0x10000*i0; }
|
||||
static inline uint32_t REG_MDP4_PIPE_SRC_STRIDE_B(enum mdp4_pipe i0) { return 0x00020044 + 0x10000*i0; }
|
||||
#define MDP4_PIPE_SRC_STRIDE_B_P2__MASK 0x0000ffff
|
||||
#define MDP4_PIPE_SRC_STRIDE_B_P2__SHIFT 0
|
||||
static inline uint32_t MDP4_PIPE_SRC_STRIDE_B_P2(uint32_t val)
|
||||
@ -693,7 +693,7 @@ static inline uint32_t MDP4_PIPE_SRC_STRIDE_B_P3(uint32_t val)
|
||||
return ((val) << MDP4_PIPE_SRC_STRIDE_B_P3__SHIFT) & MDP4_PIPE_SRC_STRIDE_B_P3__MASK;
|
||||
}
|
||||
|
||||
static inline uint32_t REG_MDP4_PIPE_FRAME_SIZE(enum mpd4_pipe i0) { return 0x00020048 + 0x10000*i0; }
|
||||
static inline uint32_t REG_MDP4_PIPE_FRAME_SIZE(enum mdp4_pipe i0) { return 0x00020048 + 0x10000*i0; }
|
||||
#define MDP4_PIPE_FRAME_SIZE_HEIGHT__MASK 0xffff0000
|
||||
#define MDP4_PIPE_FRAME_SIZE_HEIGHT__SHIFT 16
|
||||
static inline uint32_t MDP4_PIPE_FRAME_SIZE_HEIGHT(uint32_t val)
|
||||
@ -707,28 +707,28 @@ static inline uint32_t MDP4_PIPE_FRAME_SIZE_WIDTH(uint32_t val)
|
||||
return ((val) << MDP4_PIPE_FRAME_SIZE_WIDTH__SHIFT) & MDP4_PIPE_FRAME_SIZE_WIDTH__MASK;
|
||||
}
|
||||
|
||||
static inline uint32_t REG_MDP4_PIPE_SRC_FORMAT(enum mpd4_pipe i0) { return 0x00020050 + 0x10000*i0; }
|
||||
static inline uint32_t REG_MDP4_PIPE_SRC_FORMAT(enum mdp4_pipe i0) { return 0x00020050 + 0x10000*i0; }
|
||||
#define MDP4_PIPE_SRC_FORMAT_G_BPC__MASK 0x00000003
|
||||
#define MDP4_PIPE_SRC_FORMAT_G_BPC__SHIFT 0
|
||||
static inline uint32_t MDP4_PIPE_SRC_FORMAT_G_BPC(enum mpd4_bpc val)
|
||||
static inline uint32_t MDP4_PIPE_SRC_FORMAT_G_BPC(enum mdp4_bpc val)
|
||||
{
|
||||
return ((val) << MDP4_PIPE_SRC_FORMAT_G_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_G_BPC__MASK;
|
||||
}
|
||||
#define MDP4_PIPE_SRC_FORMAT_B_BPC__MASK 0x0000000c
|
||||
#define MDP4_PIPE_SRC_FORMAT_B_BPC__SHIFT 2
|
||||
static inline uint32_t MDP4_PIPE_SRC_FORMAT_B_BPC(enum mpd4_bpc val)
|
||||
static inline uint32_t MDP4_PIPE_SRC_FORMAT_B_BPC(enum mdp4_bpc val)
|
||||
{
|
||||
return ((val) << MDP4_PIPE_SRC_FORMAT_B_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_B_BPC__MASK;
|
||||
}
|
||||
#define MDP4_PIPE_SRC_FORMAT_R_BPC__MASK 0x00000030
|
||||
#define MDP4_PIPE_SRC_FORMAT_R_BPC__SHIFT 4
|
||||
static inline uint32_t MDP4_PIPE_SRC_FORMAT_R_BPC(enum mpd4_bpc val)
|
||||
static inline uint32_t MDP4_PIPE_SRC_FORMAT_R_BPC(enum mdp4_bpc val)
|
||||
{
|
||||
return ((val) << MDP4_PIPE_SRC_FORMAT_R_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_R_BPC__MASK;
|
||||
}
|
||||
#define MDP4_PIPE_SRC_FORMAT_A_BPC__MASK 0x000000c0
|
||||
#define MDP4_PIPE_SRC_FORMAT_A_BPC__SHIFT 6
|
||||
static inline uint32_t MDP4_PIPE_SRC_FORMAT_A_BPC(enum mpd4_bpc_alpha val)
|
||||
static inline uint32_t MDP4_PIPE_SRC_FORMAT_A_BPC(enum mdp4_bpc_alpha val)
|
||||
{
|
||||
return ((val) << MDP4_PIPE_SRC_FORMAT_A_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_A_BPC__MASK;
|
||||
}
|
||||
@ -750,7 +750,7 @@ static inline uint32_t MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT(uint32_t val)
|
||||
#define MDP4_PIPE_SRC_FORMAT_UNPACK_ALIGN_MSB 0x00040000
|
||||
#define MDP4_PIPE_SRC_FORMAT_SOLID_FILL 0x00400000
|
||||
|
||||
static inline uint32_t REG_MDP4_PIPE_SRC_UNPACK(enum mpd4_pipe i0) { return 0x00020054 + 0x10000*i0; }
|
||||
static inline uint32_t REG_MDP4_PIPE_SRC_UNPACK(enum mdp4_pipe i0) { return 0x00020054 + 0x10000*i0; }
|
||||
#define MDP4_PIPE_SRC_UNPACK_ELEM0__MASK 0x000000ff
|
||||
#define MDP4_PIPE_SRC_UNPACK_ELEM0__SHIFT 0
|
||||
static inline uint32_t MDP4_PIPE_SRC_UNPACK_ELEM0(uint32_t val)
|
||||
@ -776,7 +776,7 @@ static inline uint32_t MDP4_PIPE_SRC_UNPACK_ELEM3(uint32_t val)
|
||||
return ((val) << MDP4_PIPE_SRC_UNPACK_ELEM3__SHIFT) & MDP4_PIPE_SRC_UNPACK_ELEM3__MASK;
|
||||
}
|
||||
|
||||
static inline uint32_t REG_MDP4_PIPE_OP_MODE(enum mpd4_pipe i0) { return 0x00020058 + 0x10000*i0; }
|
||||
static inline uint32_t REG_MDP4_PIPE_OP_MODE(enum mdp4_pipe i0) { return 0x00020058 + 0x10000*i0; }
|
||||
#define MDP4_PIPE_OP_MODE_SCALEX_EN 0x00000001
|
||||
#define MDP4_PIPE_OP_MODE_SCALEY_EN 0x00000002
|
||||
#define MDP4_PIPE_OP_MODE_SRC_YCBCR 0x00000200
|
||||
@ -789,36 +789,36 @@ static inline uint32_t REG_MDP4_PIPE_OP_MODE(enum mpd4_pipe i0) { return 0x00020
|
||||
#define MDP4_PIPE_OP_MODE_DEINT_EN 0x00040000
|
||||
#define MDP4_PIPE_OP_MODE_DEINT_ODD_REF 0x00080000
|
||||
|
||||
static inline uint32_t REG_MDP4_PIPE_PHASEX_STEP(enum mpd4_pipe i0) { return 0x0002005c + 0x10000*i0; }
|
||||
static inline uint32_t REG_MDP4_PIPE_PHASEX_STEP(enum mdp4_pipe i0) { return 0x0002005c + 0x10000*i0; }
|
||||
|
||||
static inline uint32_t REG_MDP4_PIPE_PHASEY_STEP(enum mpd4_pipe i0) { return 0x00020060 + 0x10000*i0; }
|
||||
static inline uint32_t REG_MDP4_PIPE_PHASEY_STEP(enum mdp4_pipe i0) { return 0x00020060 + 0x10000*i0; }
|
||||
|
||||
static inline uint32_t REG_MDP4_PIPE_FETCH_CONFIG(enum mpd4_pipe i0) { return 0x00021004 + 0x10000*i0; }
|
||||
static inline uint32_t REG_MDP4_PIPE_FETCH_CONFIG(enum mdp4_pipe i0) { return 0x00021004 + 0x10000*i0; }
|
||||
|
||||
static inline uint32_t REG_MDP4_PIPE_SOLID_COLOR(enum mpd4_pipe i0) { return 0x00021008 + 0x10000*i0; }
|
||||
static inline uint32_t REG_MDP4_PIPE_SOLID_COLOR(enum mdp4_pipe i0) { return 0x00021008 + 0x10000*i0; }
|
||||
|
||||
static inline uint32_t REG_MDP4_PIPE_CSC(enum mpd4_pipe i0) { return 0x00024000 + 0x10000*i0; }
|
||||
static inline uint32_t REG_MDP4_PIPE_CSC(enum mdp4_pipe i0) { return 0x00024000 + 0x10000*i0; }
|
||||
|
||||
|
||||
static inline uint32_t REG_MDP4_PIPE_CSC_MV(enum mpd4_pipe i0, uint32_t i1) { return 0x00024400 + 0x10000*i0 + 0x4*i1; }
|
||||
static inline uint32_t REG_MDP4_PIPE_CSC_MV(enum mdp4_pipe i0, uint32_t i1) { return 0x00024400 + 0x10000*i0 + 0x4*i1; }
|
||||
|
||||
static inline uint32_t REG_MDP4_PIPE_CSC_MV_VAL(enum mpd4_pipe i0, uint32_t i1) { return 0x00024400 + 0x10000*i0 + 0x4*i1; }
|
||||
static inline uint32_t REG_MDP4_PIPE_CSC_MV_VAL(enum mdp4_pipe i0, uint32_t i1) { return 0x00024400 + 0x10000*i0 + 0x4*i1; }
|
||||
|
||||
static inline uint32_t REG_MDP4_PIPE_CSC_PRE_BV(enum mpd4_pipe i0, uint32_t i1) { return 0x00024500 + 0x10000*i0 + 0x4*i1; }
|
||||
static inline uint32_t REG_MDP4_PIPE_CSC_PRE_BV(enum mdp4_pipe i0, uint32_t i1) { return 0x00024500 + 0x10000*i0 + 0x4*i1; }
|
||||
|
||||
static inline uint32_t REG_MDP4_PIPE_CSC_PRE_BV_VAL(enum mpd4_pipe i0, uint32_t i1) { return 0x00024500 + 0x10000*i0 + 0x4*i1; }
|
||||
static inline uint32_t REG_MDP4_PIPE_CSC_PRE_BV_VAL(enum mdp4_pipe i0, uint32_t i1) { return 0x00024500 + 0x10000*i0 + 0x4*i1; }
|
||||
|
||||
static inline uint32_t REG_MDP4_PIPE_CSC_POST_BV(enum mpd4_pipe i0, uint32_t i1) { return 0x00024580 + 0x10000*i0 + 0x4*i1; }
|
||||
static inline uint32_t REG_MDP4_PIPE_CSC_POST_BV(enum mdp4_pipe i0, uint32_t i1) { return 0x00024580 + 0x10000*i0 + 0x4*i1; }
|
||||
|
||||
static inline uint32_t REG_MDP4_PIPE_CSC_POST_BV_VAL(enum mpd4_pipe i0, uint32_t i1) { return 0x00024580 + 0x10000*i0 + 0x4*i1; }
|
||||
static inline uint32_t REG_MDP4_PIPE_CSC_POST_BV_VAL(enum mdp4_pipe i0, uint32_t i1) { return 0x00024580 + 0x10000*i0 + 0x4*i1; }
|
||||
|
||||
static inline uint32_t REG_MDP4_PIPE_CSC_PRE_LV(enum mpd4_pipe i0, uint32_t i1) { return 0x00024600 + 0x10000*i0 + 0x4*i1; }
|
||||
static inline uint32_t REG_MDP4_PIPE_CSC_PRE_LV(enum mdp4_pipe i0, uint32_t i1) { return 0x00024600 + 0x10000*i0 + 0x4*i1; }
|
||||
|
||||
static inline uint32_t REG_MDP4_PIPE_CSC_PRE_LV_VAL(enum mpd4_pipe i0, uint32_t i1) { return 0x00024600 + 0x10000*i0 + 0x4*i1; }
|
||||
static inline uint32_t REG_MDP4_PIPE_CSC_PRE_LV_VAL(enum mdp4_pipe i0, uint32_t i1) { return 0x00024600 + 0x10000*i0 + 0x4*i1; }
|
||||
|
||||
static inline uint32_t REG_MDP4_PIPE_CSC_POST_LV(enum mpd4_pipe i0, uint32_t i1) { return 0x00024680 + 0x10000*i0 + 0x4*i1; }
|
||||
static inline uint32_t REG_MDP4_PIPE_CSC_POST_LV(enum mdp4_pipe i0, uint32_t i1) { return 0x00024680 + 0x10000*i0 + 0x4*i1; }
|
||||
|
||||
static inline uint32_t REG_MDP4_PIPE_CSC_POST_LV_VAL(enum mpd4_pipe i0, uint32_t i1) { return 0x00024680 + 0x10000*i0 + 0x4*i1; }
|
||||
static inline uint32_t REG_MDP4_PIPE_CSC_POST_LV_VAL(enum mdp4_pipe i0, uint32_t i1) { return 0x00024680 + 0x10000*i0 + 0x4*i1; }
|
||||
|
||||
#define REG_MDP4_LCDC 0x000c0000
|
||||
|
||||
|
@ -26,6 +26,7 @@ struct mdp4_crtc {
|
||||
struct drm_crtc base;
|
||||
char name[8];
|
||||
struct drm_plane *plane;
|
||||
struct drm_plane *planes[8];
|
||||
int id;
|
||||
int ovlp;
|
||||
enum mdp4_dma dma;
|
||||
@ -50,7 +51,11 @@ struct mdp4_crtc {
|
||||
|
||||
/* if there is a pending flip, these will be non-null: */
|
||||
struct drm_pending_vblank_event *event;
|
||||
struct work_struct pageflip_work;
|
||||
struct msm_fence_cb pageflip_cb;
|
||||
|
||||
#define PENDING_CURSOR 0x1
|
||||
#define PENDING_FLIP 0x2
|
||||
atomic_t pending;
|
||||
|
||||
/* the fb that we currently hold a scanout ref to: */
|
||||
struct drm_framebuffer *fb;
|
||||
@ -92,7 +97,8 @@ static void update_fb(struct drm_crtc *crtc, bool async,
|
||||
}
|
||||
}
|
||||
|
||||
static void complete_flip(struct drm_crtc *crtc, bool canceled)
|
||||
/* if file!=NULL, this is preclose potential cancel-flip path */
|
||||
static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
|
||||
{
|
||||
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
|
||||
struct drm_device *dev = crtc->dev;
|
||||
@ -102,11 +108,14 @@ static void complete_flip(struct drm_crtc *crtc, bool canceled)
|
||||
spin_lock_irqsave(&dev->event_lock, flags);
|
||||
event = mdp4_crtc->event;
|
||||
if (event) {
|
||||
mdp4_crtc->event = NULL;
|
||||
if (canceled)
|
||||
event->base.destroy(&event->base);
|
||||
else
|
||||
/* if regular vblank case (!file) or if cancel-flip from
|
||||
* preclose on file that requested flip, then send the
|
||||
* event:
|
||||
*/
|
||||
if (!file || (event->base.file_priv == file)) {
|
||||
mdp4_crtc->event = NULL;
|
||||
drm_send_vblank_event(dev, mdp4_crtc->id, event);
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&dev->event_lock, flags);
|
||||
}
|
||||
@ -115,9 +124,15 @@ static void crtc_flush(struct drm_crtc *crtc)
|
||||
{
|
||||
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
|
||||
struct mdp4_kms *mdp4_kms = get_kms(crtc);
|
||||
uint32_t flush = 0;
|
||||
uint32_t i, flush = 0;
|
||||
|
||||
flush |= pipe2flush(mdp4_plane_pipe(mdp4_crtc->plane));
|
||||
for (i = 0; i < ARRAY_SIZE(mdp4_crtc->planes); i++) {
|
||||
struct drm_plane *plane = mdp4_crtc->planes[i];
|
||||
if (plane) {
|
||||
enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
|
||||
flush |= pipe2flush(pipe_id);
|
||||
}
|
||||
}
|
||||
flush |= ovlp2flush(mdp4_crtc->ovlp);
|
||||
|
||||
DBG("%s: flush=%08x", mdp4_crtc->name, flush);
|
||||
@ -125,17 +140,29 @@ static void crtc_flush(struct drm_crtc *crtc)
|
||||
mdp4_write(mdp4_kms, REG_MDP4_OVERLAY_FLUSH, flush);
|
||||
}
|
||||
|
||||
static void pageflip_worker(struct work_struct *work)
|
||||
static void request_pending(struct drm_crtc *crtc, uint32_t pending)
|
||||
{
|
||||
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
|
||||
|
||||
atomic_or(pending, &mdp4_crtc->pending);
|
||||
mdp4_irq_register(get_kms(crtc), &mdp4_crtc->vblank);
|
||||
}
|
||||
|
||||
static void pageflip_cb(struct msm_fence_cb *cb)
|
||||
{
|
||||
struct mdp4_crtc *mdp4_crtc =
|
||||
container_of(work, struct mdp4_crtc, pageflip_work);
|
||||
container_of(cb, struct mdp4_crtc, pageflip_cb);
|
||||
struct drm_crtc *crtc = &mdp4_crtc->base;
|
||||
struct drm_framebuffer *fb = crtc->fb;
|
||||
|
||||
mdp4_plane_set_scanout(mdp4_crtc->plane, crtc->fb);
|
||||
if (!fb)
|
||||
return;
|
||||
|
||||
mdp4_plane_set_scanout(mdp4_crtc->plane, fb);
|
||||
crtc_flush(crtc);
|
||||
|
||||
/* enable vblank to complete flip: */
|
||||
mdp4_irq_register(get_kms(crtc), &mdp4_crtc->vblank);
|
||||
request_pending(crtc, PENDING_FLIP);
|
||||
}
|
||||
|
||||
static void unref_fb_worker(struct drm_flip_work *work, void *val)
|
||||
@ -205,67 +232,69 @@ static void blend_setup(struct drm_crtc *crtc)
|
||||
struct mdp4_kms *mdp4_kms = get_kms(crtc);
|
||||
int i, ovlp = mdp4_crtc->ovlp;
|
||||
uint32_t mixer_cfg = 0;
|
||||
static const enum mdp4_mixer_stage_id stages[] = {
|
||||
STAGE_BASE, STAGE0, STAGE1, STAGE2, STAGE3,
|
||||
};
|
||||
/* statically (for now) map planes to mixer stage (z-order): */
|
||||
static const int idxs[] = {
|
||||
[VG1] = 1,
|
||||
[VG2] = 2,
|
||||
[RGB1] = 0,
|
||||
[RGB2] = 0,
|
||||
[RGB3] = 0,
|
||||
[VG3] = 3,
|
||||
[VG4] = 4,
|
||||
|
||||
/*
|
||||
* This probably would also need to be triggered by any attached
|
||||
* plane when it changes.. for now since we are only using a single
|
||||
* private plane, the configuration is hard-coded:
|
||||
*/
|
||||
};
|
||||
bool alpha[4]= { false, false, false, false };
|
||||
|
||||
mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_LOW0(ovlp), 0);
|
||||
mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_LOW1(ovlp), 0);
|
||||
mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_HIGH0(ovlp), 0);
|
||||
mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_HIGH1(ovlp), 0);
|
||||
|
||||
/* TODO single register for all CRTCs, so this won't work properly
|
||||
* when multiple CRTCs are active..
|
||||
*/
|
||||
for (i = 0; i < ARRAY_SIZE(mdp4_crtc->planes); i++) {
|
||||
struct drm_plane *plane = mdp4_crtc->planes[i];
|
||||
if (plane) {
|
||||
enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
|
||||
int idx = idxs[pipe_id];
|
||||
if (idx > 0) {
|
||||
const struct mdp4_format *format =
|
||||
to_mdp4_format(msm_framebuffer_format(plane->fb));
|
||||
alpha[idx-1] = format->alpha_enable;
|
||||
}
|
||||
mixer_cfg |= mixercfg(mdp4_crtc->mixer, pipe_id, stages[idx]);
|
||||
}
|
||||
}
|
||||
|
||||
/* this shouldn't happen.. and seems to cause underflow: */
|
||||
WARN_ON(!mixer_cfg);
|
||||
|
||||
for (i = 0; i < 4; i++) {
|
||||
mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_FG_ALPHA(ovlp, i), 0);
|
||||
mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_BG_ALPHA(ovlp, i), 0);
|
||||
mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_OP(ovlp, i),
|
||||
MDP4_OVLP_STAGE_OP_FG_ALPHA(FG_CONST) |
|
||||
MDP4_OVLP_STAGE_OP_BG_ALPHA(BG_CONST));
|
||||
mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_CO3(ovlp, i), 0);
|
||||
uint32_t op;
|
||||
|
||||
if (alpha[i]) {
|
||||
op = MDP4_OVLP_STAGE_OP_FG_ALPHA(FG_PIXEL) |
|
||||
MDP4_OVLP_STAGE_OP_BG_ALPHA(FG_PIXEL) |
|
||||
MDP4_OVLP_STAGE_OP_BG_INV_ALPHA;
|
||||
} else {
|
||||
op = MDP4_OVLP_STAGE_OP_FG_ALPHA(FG_CONST) |
|
||||
MDP4_OVLP_STAGE_OP_BG_ALPHA(BG_CONST);
|
||||
}
|
||||
|
||||
mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_FG_ALPHA(ovlp, i), 0xff);
|
||||
mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_BG_ALPHA(ovlp, i), 0x00);
|
||||
mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_OP(ovlp, i), op);
|
||||
mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_CO3(ovlp, i), 1);
|
||||
mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_LOW0(ovlp, i), 0);
|
||||
mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_LOW1(ovlp, i), 0);
|
||||
mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_HIGH0(ovlp, i), 0);
|
||||
mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_HIGH1(ovlp, i), 0);
|
||||
}
|
||||
|
||||
/* TODO single register for all CRTCs, so this won't work properly
|
||||
* when multiple CRTCs are active..
|
||||
*/
|
||||
switch (mdp4_plane_pipe(mdp4_crtc->plane)) {
|
||||
case VG1:
|
||||
mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE0(STAGE_BASE) |
|
||||
COND(mdp4_crtc->mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE0_MIXER1);
|
||||
break;
|
||||
case VG2:
|
||||
mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE1(STAGE_BASE) |
|
||||
COND(mdp4_crtc->mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE1_MIXER1);
|
||||
break;
|
||||
case RGB1:
|
||||
mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE2(STAGE_BASE) |
|
||||
COND(mdp4_crtc->mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE2_MIXER1);
|
||||
break;
|
||||
case RGB2:
|
||||
mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE3(STAGE_BASE) |
|
||||
COND(mdp4_crtc->mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE3_MIXER1);
|
||||
break;
|
||||
case RGB3:
|
||||
mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE4(STAGE_BASE) |
|
||||
COND(mdp4_crtc->mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE4_MIXER1);
|
||||
break;
|
||||
case VG3:
|
||||
mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE5(STAGE_BASE) |
|
||||
COND(mdp4_crtc->mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE5_MIXER1);
|
||||
break;
|
||||
case VG4:
|
||||
mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE6(STAGE_BASE) |
|
||||
COND(mdp4_crtc->mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE6_MIXER1);
|
||||
break;
|
||||
default:
|
||||
WARN_ON("invalid pipe");
|
||||
break;
|
||||
}
|
||||
mdp4_write(mdp4_kms, REG_MDP4_LAYERMIXER_IN_CFG, mixer_cfg);
|
||||
}
|
||||
|
||||
@ -377,6 +406,7 @@ static int mdp4_crtc_page_flip(struct drm_crtc *crtc,
|
||||
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_gem_object *obj;
|
||||
unsigned long flags;
|
||||
|
||||
if (mdp4_crtc->event) {
|
||||
dev_err(dev->dev, "already pending flip!\n");
|
||||
@ -385,11 +415,13 @@ static int mdp4_crtc_page_flip(struct drm_crtc *crtc,
|
||||
|
||||
obj = msm_framebuffer_bo(new_fb, 0);
|
||||
|
||||
spin_lock_irqsave(&dev->event_lock, flags);
|
||||
mdp4_crtc->event = event;
|
||||
spin_unlock_irqrestore(&dev->event_lock, flags);
|
||||
|
||||
update_fb(crtc, true, new_fb);
|
||||
|
||||
return msm_gem_queue_inactive_work(obj,
|
||||
&mdp4_crtc->pageflip_work);
|
||||
return msm_gem_queue_inactive_cb(obj, &mdp4_crtc->pageflip_cb);
|
||||
}
|
||||
|
||||
static int mdp4_crtc_set_property(struct drm_crtc *crtc,
|
||||
@ -498,6 +530,8 @@ static int mdp4_crtc_cursor_set(struct drm_crtc *crtc,
|
||||
drm_gem_object_unreference_unlocked(old_bo);
|
||||
}
|
||||
|
||||
request_pending(crtc, PENDING_CURSOR);
|
||||
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
@ -542,13 +576,21 @@ static void mdp4_crtc_vblank_irq(struct mdp4_irq *irq, uint32_t irqstatus)
|
||||
struct mdp4_crtc *mdp4_crtc = container_of(irq, struct mdp4_crtc, vblank);
|
||||
struct drm_crtc *crtc = &mdp4_crtc->base;
|
||||
struct msm_drm_private *priv = crtc->dev->dev_private;
|
||||
unsigned pending;
|
||||
|
||||
update_cursor(crtc);
|
||||
complete_flip(crtc, false);
|
||||
mdp4_irq_unregister(get_kms(crtc), &mdp4_crtc->vblank);
|
||||
|
||||
drm_flip_work_commit(&mdp4_crtc->unref_fb_work, priv->wq);
|
||||
drm_flip_work_commit(&mdp4_crtc->unref_cursor_work, priv->wq);
|
||||
pending = atomic_xchg(&mdp4_crtc->pending, 0);
|
||||
|
||||
if (pending & PENDING_FLIP) {
|
||||
complete_flip(crtc, NULL);
|
||||
drm_flip_work_commit(&mdp4_crtc->unref_fb_work, priv->wq);
|
||||
}
|
||||
|
||||
if (pending & PENDING_CURSOR) {
|
||||
update_cursor(crtc);
|
||||
drm_flip_work_commit(&mdp4_crtc->unref_cursor_work, priv->wq);
|
||||
}
|
||||
}
|
||||
|
||||
static void mdp4_crtc_err_irq(struct mdp4_irq *irq, uint32_t irqstatus)
|
||||
@ -565,9 +607,10 @@ uint32_t mdp4_crtc_vblank(struct drm_crtc *crtc)
|
||||
return mdp4_crtc->vblank.irqmask;
|
||||
}
|
||||
|
||||
void mdp4_crtc_cancel_pending_flip(struct drm_crtc *crtc)
|
||||
void mdp4_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file)
|
||||
{
|
||||
complete_flip(crtc, true);
|
||||
DBG("cancel: %p", file);
|
||||
complete_flip(crtc, file);
|
||||
}
|
||||
|
||||
/* set dma config, ie. the format the encoder wants. */
|
||||
@ -622,6 +665,32 @@ void mdp4_crtc_set_intf(struct drm_crtc *crtc, enum mdp4_intf intf)
|
||||
mdp4_write(mdp4_kms, REG_MDP4_DISP_INTF_SEL, intf_sel);
|
||||
}
|
||||
|
||||
static void set_attach(struct drm_crtc *crtc, enum mdp4_pipe pipe_id,
|
||||
struct drm_plane *plane)
|
||||
{
|
||||
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
|
||||
|
||||
BUG_ON(pipe_id >= ARRAY_SIZE(mdp4_crtc->planes));
|
||||
|
||||
if (mdp4_crtc->planes[pipe_id] == plane)
|
||||
return;
|
||||
|
||||
mdp4_crtc->planes[pipe_id] = plane;
|
||||
blend_setup(crtc);
|
||||
if (mdp4_crtc->enabled && (plane != mdp4_crtc->plane))
|
||||
crtc_flush(crtc);
|
||||
}
|
||||
|
||||
void mdp4_crtc_attach(struct drm_crtc *crtc, struct drm_plane *plane)
|
||||
{
|
||||
set_attach(crtc, mdp4_plane_pipe(plane), plane);
|
||||
}
|
||||
|
||||
void mdp4_crtc_detach(struct drm_crtc *crtc, struct drm_plane *plane)
|
||||
{
|
||||
set_attach(crtc, mdp4_plane_pipe(plane), NULL);
|
||||
}
|
||||
|
||||
static const char *dma_names[] = {
|
||||
"DMA_P", "DMA_S", "DMA_E",
|
||||
};
|
||||
@ -644,7 +713,6 @@ struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
|
||||
crtc = &mdp4_crtc->base;
|
||||
|
||||
mdp4_crtc->plane = plane;
|
||||
mdp4_crtc->plane->crtc = crtc;
|
||||
|
||||
mdp4_crtc->ovlp = ovlp_id;
|
||||
mdp4_crtc->dma = dma_id;
|
||||
@ -668,7 +736,7 @@ struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
|
||||
ret = drm_flip_work_init(&mdp4_crtc->unref_cursor_work, 64,
|
||||
"unref cursor", unref_cursor_worker);
|
||||
|
||||
INIT_WORK(&mdp4_crtc->pageflip_work, pageflip_worker);
|
||||
INIT_FENCE_CB(&mdp4_crtc->pageflip_cb, pageflip_cb);
|
||||
|
||||
drm_crtc_init(dev, crtc, &mdp4_crtc_funcs);
|
||||
drm_crtc_helper_add(crtc, &mdp4_crtc_helper_funcs);
|
||||
|
@ -44,6 +44,22 @@ static const struct mdp4_format formats[] = {
|
||||
FMT(BGR565, 0, 5, 6, 5, 2, 0, 1, 0, false, true, 2, 3),
|
||||
};
|
||||
|
||||
uint32_t mdp4_get_formats(enum mdp4_pipe pipe_id, uint32_t *pixel_formats,
|
||||
uint32_t max_formats)
|
||||
{
|
||||
uint32_t i;
|
||||
for (i = 0; i < ARRAY_SIZE(formats); i++) {
|
||||
const struct mdp4_format *f = &formats[i];
|
||||
|
||||
if (i == max_formats)
|
||||
break;
|
||||
|
||||
pixel_formats[i] = f->base.pixel_format;
|
||||
}
|
||||
|
||||
return i;
|
||||
}
|
||||
|
||||
const struct msm_format *mdp4_get_format(struct msm_kms *kms, uint32_t format)
|
||||
{
|
||||
int i;
|
||||
|
@ -135,7 +135,7 @@ static void mdp4_preclose(struct msm_kms *kms, struct drm_file *file)
|
||||
unsigned i;
|
||||
|
||||
for (i = 0; i < priv->num_crtcs; i++)
|
||||
mdp4_crtc_cancel_pending_flip(priv->crtcs[i]);
|
||||
mdp4_crtc_cancel_pending_flip(priv->crtcs[i], file);
|
||||
}
|
||||
|
||||
static void mdp4_destroy(struct msm_kms *kms)
|
||||
@ -196,6 +196,23 @@ static int modeset_init(struct mdp4_kms *mdp4_kms)
|
||||
* for more than just RGB1->DMA_E->DTV->HDMI
|
||||
*/
|
||||
|
||||
/* construct non-private planes: */
|
||||
plane = mdp4_plane_init(dev, VG1, false);
|
||||
if (IS_ERR(plane)) {
|
||||
dev_err(dev->dev, "failed to construct plane for VG1\n");
|
||||
ret = PTR_ERR(plane);
|
||||
goto fail;
|
||||
}
|
||||
priv->planes[priv->num_planes++] = plane;
|
||||
|
||||
plane = mdp4_plane_init(dev, VG2, false);
|
||||
if (IS_ERR(plane)) {
|
||||
dev_err(dev->dev, "failed to construct plane for VG2\n");
|
||||
ret = PTR_ERR(plane);
|
||||
goto fail;
|
||||
}
|
||||
priv->planes[priv->num_planes++] = plane;
|
||||
|
||||
/* the CRTCs get constructed with a private plane: */
|
||||
plane = mdp4_plane_init(dev, RGB1, true);
|
||||
if (IS_ERR(plane)) {
|
||||
|
@ -75,8 +75,8 @@ struct mdp4_platform_config {
|
||||
|
||||
struct mdp4_format {
|
||||
struct msm_format base;
|
||||
enum mpd4_bpc bpc_r, bpc_g, bpc_b;
|
||||
enum mpd4_bpc_alpha bpc_a;
|
||||
enum mdp4_bpc bpc_r, bpc_g, bpc_b;
|
||||
enum mdp4_bpc_alpha bpc_a;
|
||||
uint8_t unpack[4];
|
||||
bool alpha_enable, unpack_tight;
|
||||
uint8_t cpp, unpack_count;
|
||||
@ -93,7 +93,7 @@ static inline u32 mdp4_read(struct mdp4_kms *mdp4_kms, u32 reg)
|
||||
return msm_readl(mdp4_kms->mmio + reg);
|
||||
}
|
||||
|
||||
static inline uint32_t pipe2flush(enum mpd4_pipe pipe)
|
||||
static inline uint32_t pipe2flush(enum mdp4_pipe pipe)
|
||||
{
|
||||
switch (pipe) {
|
||||
case VG1: return MDP4_OVERLAY_FLUSH_VG1;
|
||||
@ -133,6 +133,48 @@ static inline uint32_t dma2err(enum mdp4_dma dma)
|
||||
}
|
||||
}
|
||||
|
||||
static inline uint32_t mixercfg(int mixer, enum mdp4_pipe pipe,
|
||||
enum mdp4_mixer_stage_id stage)
|
||||
{
|
||||
uint32_t mixer_cfg = 0;
|
||||
|
||||
switch (pipe) {
|
||||
case VG1:
|
||||
mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE0(stage) |
|
||||
COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE0_MIXER1);
|
||||
break;
|
||||
case VG2:
|
||||
mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE1(stage) |
|
||||
COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE1_MIXER1);
|
||||
break;
|
||||
case RGB1:
|
||||
mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE2(stage) |
|
||||
COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE2_MIXER1);
|
||||
break;
|
||||
case RGB2:
|
||||
mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE3(stage) |
|
||||
COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE3_MIXER1);
|
||||
break;
|
||||
case RGB3:
|
||||
mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE4(stage) |
|
||||
COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE4_MIXER1);
|
||||
break;
|
||||
case VG3:
|
||||
mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE5(stage) |
|
||||
COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE5_MIXER1);
|
||||
break;
|
||||
case VG4:
|
||||
mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE6(stage) |
|
||||
COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE6_MIXER1);
|
||||
break;
|
||||
default:
|
||||
WARN_ON("invalid pipe");
|
||||
break;
|
||||
}
|
||||
|
||||
return mixer_cfg;
|
||||
}
|
||||
|
||||
int mdp4_disable(struct mdp4_kms *mdp4_kms);
|
||||
int mdp4_enable(struct mdp4_kms *mdp4_kms);
|
||||
|
||||
@ -146,6 +188,8 @@ void mdp4_irq_unregister(struct mdp4_kms *mdp4_kms, struct mdp4_irq *irq);
|
||||
int mdp4_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
|
||||
void mdp4_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
|
||||
|
||||
uint32_t mdp4_get_formats(enum mdp4_pipe pipe_id, uint32_t *formats,
|
||||
uint32_t max_formats);
|
||||
const struct msm_format *mdp4_get_format(struct msm_kms *kms, uint32_t format);
|
||||
|
||||
void mdp4_plane_install_properties(struct drm_plane *plane,
|
||||
@ -158,14 +202,16 @@ int mdp4_plane_mode_set(struct drm_plane *plane,
|
||||
unsigned int crtc_w, unsigned int crtc_h,
|
||||
uint32_t src_x, uint32_t src_y,
|
||||
uint32_t src_w, uint32_t src_h);
|
||||
enum mpd4_pipe mdp4_plane_pipe(struct drm_plane *plane);
|
||||
enum mdp4_pipe mdp4_plane_pipe(struct drm_plane *plane);
|
||||
struct drm_plane *mdp4_plane_init(struct drm_device *dev,
|
||||
enum mpd4_pipe pipe_id, bool private_plane);
|
||||
enum mdp4_pipe pipe_id, bool private_plane);
|
||||
|
||||
uint32_t mdp4_crtc_vblank(struct drm_crtc *crtc);
|
||||
void mdp4_crtc_cancel_pending_flip(struct drm_crtc *crtc);
|
||||
void mdp4_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file);
|
||||
void mdp4_crtc_set_config(struct drm_crtc *crtc, uint32_t config);
|
||||
void mdp4_crtc_set_intf(struct drm_crtc *crtc, enum mdp4_intf intf);
|
||||
void mdp4_crtc_attach(struct drm_crtc *crtc, struct drm_plane *plane);
|
||||
void mdp4_crtc_detach(struct drm_crtc *crtc, struct drm_plane *plane);
|
||||
struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
|
||||
struct drm_plane *plane, int id, int ovlp_id,
|
||||
enum mdp4_dma dma_id);
|
||||
|
@ -22,7 +22,7 @@ struct mdp4_plane {
|
||||
struct drm_plane base;
|
||||
const char *name;
|
||||
|
||||
enum mpd4_pipe pipe;
|
||||
enum mdp4_pipe pipe;
|
||||
|
||||
uint32_t nformats;
|
||||
uint32_t formats[32];
|
||||
@ -61,7 +61,9 @@ static int mdp4_plane_update(struct drm_plane *plane,
|
||||
static int mdp4_plane_disable(struct drm_plane *plane)
|
||||
{
|
||||
struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
|
||||
DBG("%s: TODO", mdp4_plane->name); // XXX
|
||||
DBG("%s: disable", mdp4_plane->name);
|
||||
if (plane->crtc)
|
||||
mdp4_crtc_detach(plane->crtc, plane);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -101,7 +103,7 @@ void mdp4_plane_set_scanout(struct drm_plane *plane,
|
||||
{
|
||||
struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
|
||||
struct mdp4_kms *mdp4_kms = get_kms(plane);
|
||||
enum mpd4_pipe pipe = mdp4_plane->pipe;
|
||||
enum mdp4_pipe pipe = mdp4_plane->pipe;
|
||||
uint32_t iova;
|
||||
|
||||
mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_STRIDE_A(pipe),
|
||||
@ -129,7 +131,7 @@ int mdp4_plane_mode_set(struct drm_plane *plane,
|
||||
{
|
||||
struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
|
||||
struct mdp4_kms *mdp4_kms = get_kms(plane);
|
||||
enum mpd4_pipe pipe = mdp4_plane->pipe;
|
||||
enum mdp4_pipe pipe = mdp4_plane->pipe;
|
||||
const struct mdp4_format *format;
|
||||
uint32_t op_mode = 0;
|
||||
uint32_t phasex_step = MDP4_VG_PHASE_STEP_DEFAULT;
|
||||
@ -141,6 +143,10 @@ int mdp4_plane_mode_set(struct drm_plane *plane,
|
||||
src_w = src_w >> 16;
|
||||
src_h = src_h >> 16;
|
||||
|
||||
DBG("%s: FB[%u] %u,%u,%u,%u -> CRTC[%u] %d,%d,%u,%u", mdp4_plane->name,
|
||||
fb->base.id, src_x, src_y, src_w, src_h,
|
||||
crtc->base.id, crtc_x, crtc_y, crtc_w, crtc_h);
|
||||
|
||||
if (src_w != crtc_w) {
|
||||
op_mode |= MDP4_PIPE_OP_MODE_SCALEX_EN;
|
||||
/* TODO calc phasex_step */
|
||||
@ -191,7 +197,8 @@ int mdp4_plane_mode_set(struct drm_plane *plane,
|
||||
mdp4_write(mdp4_kms, REG_MDP4_PIPE_PHASEX_STEP(pipe), phasex_step);
|
||||
mdp4_write(mdp4_kms, REG_MDP4_PIPE_PHASEY_STEP(pipe), phasey_step);
|
||||
|
||||
plane->crtc = crtc;
|
||||
/* TODO detach from old crtc (if we had more than one) */
|
||||
mdp4_crtc_attach(crtc, plane);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -202,7 +209,7 @@ static const char *pipe_names[] = {
|
||||
"VG3", "VG4",
|
||||
};
|
||||
|
||||
enum mpd4_pipe mdp4_plane_pipe(struct drm_plane *plane)
|
||||
enum mdp4_pipe mdp4_plane_pipe(struct drm_plane *plane)
|
||||
{
|
||||
struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
|
||||
return mdp4_plane->pipe;
|
||||
@ -210,9 +217,8 @@ enum mpd4_pipe mdp4_plane_pipe(struct drm_plane *plane)
|
||||
|
||||
/* initialize plane */
|
||||
struct drm_plane *mdp4_plane_init(struct drm_device *dev,
|
||||
enum mpd4_pipe pipe_id, bool private_plane)
|
||||
enum mdp4_pipe pipe_id, bool private_plane)
|
||||
{
|
||||
struct msm_drm_private *priv = dev->dev_private;
|
||||
struct drm_plane *plane = NULL;
|
||||
struct mdp4_plane *mdp4_plane;
|
||||
int ret;
|
||||
@ -228,8 +234,12 @@ struct drm_plane *mdp4_plane_init(struct drm_device *dev,
|
||||
mdp4_plane->pipe = pipe_id;
|
||||
mdp4_plane->name = pipe_names[pipe_id];
|
||||
|
||||
drm_plane_init(dev, plane, (1 << priv->num_crtcs) - 1, &mdp4_plane_funcs,
|
||||
mdp4_plane->formats, mdp4_plane->nformats, private_plane);
|
||||
mdp4_plane->nformats = mdp4_get_formats(pipe_id, mdp4_plane->formats,
|
||||
ARRAY_SIZE(mdp4_plane->formats));
|
||||
|
||||
drm_plane_init(dev, plane, 0xff, &mdp4_plane_funcs,
|
||||
mdp4_plane->formats, mdp4_plane->nformats,
|
||||
private_plane);
|
||||
|
||||
mdp4_plane_install_properties(plane, &plane->base);
|
||||
|
||||
|
@ -187,6 +187,7 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
|
||||
init_waitqueue_head(&priv->fence_event);
|
||||
|
||||
INIT_LIST_HEAD(&priv->inactive_list);
|
||||
INIT_LIST_HEAD(&priv->fence_cbs);
|
||||
|
||||
drm_mode_config_init(dev);
|
||||
|
||||
@ -539,15 +540,36 @@ int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence,
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* call under struct_mutex */
|
||||
/* called from workqueue */
|
||||
void msm_update_fence(struct drm_device *dev, uint32_t fence)
|
||||
{
|
||||
struct msm_drm_private *priv = dev->dev_private;
|
||||
|
||||
if (fence > priv->completed_fence) {
|
||||
priv->completed_fence = fence;
|
||||
wake_up_all(&priv->fence_event);
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
priv->completed_fence = max(fence, priv->completed_fence);
|
||||
|
||||
while (!list_empty(&priv->fence_cbs)) {
|
||||
struct msm_fence_cb *cb;
|
||||
|
||||
cb = list_first_entry(&priv->fence_cbs,
|
||||
struct msm_fence_cb, work.entry);
|
||||
|
||||
if (cb->fence > priv->completed_fence)
|
||||
break;
|
||||
|
||||
list_del_init(&cb->work.entry);
|
||||
queue_work(priv->wq, &cb->work);
|
||||
}
|
||||
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
wake_up_all(&priv->fence_event);
|
||||
}
|
||||
|
||||
void __msm_fence_worker(struct work_struct *work)
|
||||
{
|
||||
struct msm_fence_cb *cb = container_of(work, struct msm_fence_cb, work);
|
||||
cb->func(cb);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -650,13 +672,13 @@ static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
|
||||
}
|
||||
|
||||
static const struct drm_ioctl_desc msm_ioctls[] = {
|
||||
DRM_IOCTL_DEF_DRV(MSM_GET_PARAM, msm_ioctl_get_param, DRM_UNLOCKED|DRM_AUTH),
|
||||
DRM_IOCTL_DEF_DRV(MSM_GEM_NEW, msm_ioctl_gem_new, DRM_UNLOCKED|DRM_AUTH),
|
||||
DRM_IOCTL_DEF_DRV(MSM_GEM_INFO, msm_ioctl_gem_info, DRM_UNLOCKED|DRM_AUTH),
|
||||
DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_PREP, msm_ioctl_gem_cpu_prep, DRM_UNLOCKED|DRM_AUTH),
|
||||
DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_FINI, msm_ioctl_gem_cpu_fini, DRM_UNLOCKED|DRM_AUTH),
|
||||
DRM_IOCTL_DEF_DRV(MSM_GEM_SUBMIT, msm_ioctl_gem_submit, DRM_UNLOCKED|DRM_AUTH),
|
||||
DRM_IOCTL_DEF_DRV(MSM_WAIT_FENCE, msm_ioctl_wait_fence, DRM_UNLOCKED|DRM_AUTH),
|
||||
DRM_IOCTL_DEF_DRV(MSM_GET_PARAM, msm_ioctl_get_param, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
|
||||
DRM_IOCTL_DEF_DRV(MSM_GEM_NEW, msm_ioctl_gem_new, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
|
||||
DRM_IOCTL_DEF_DRV(MSM_GEM_INFO, msm_ioctl_gem_info, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
|
||||
DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_PREP, msm_ioctl_gem_cpu_prep, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
|
||||
DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_FINI, msm_ioctl_gem_cpu_fini, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
|
||||
DRM_IOCTL_DEF_DRV(MSM_GEM_SUBMIT, msm_ioctl_gem_submit, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
|
||||
DRM_IOCTL_DEF_DRV(MSM_WAIT_FENCE, msm_ioctl_wait_fence, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
|
||||
};
|
||||
|
||||
static const struct vm_operations_struct vm_ops = {
|
||||
@ -680,7 +702,11 @@ static const struct file_operations fops = {
|
||||
};
|
||||
|
||||
static struct drm_driver msm_driver = {
|
||||
.driver_features = DRIVER_HAVE_IRQ | DRIVER_GEM | DRIVER_MODESET,
|
||||
.driver_features = DRIVER_HAVE_IRQ |
|
||||
DRIVER_GEM |
|
||||
DRIVER_PRIME |
|
||||
DRIVER_RENDER |
|
||||
DRIVER_MODESET,
|
||||
.load = msm_load,
|
||||
.unload = msm_unload,
|
||||
.open = msm_open,
|
||||
@ -698,6 +724,16 @@ static struct drm_driver msm_driver = {
|
||||
.dumb_create = msm_gem_dumb_create,
|
||||
.dumb_map_offset = msm_gem_dumb_map_offset,
|
||||
.dumb_destroy = drm_gem_dumb_destroy,
|
||||
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
|
||||
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
|
||||
.gem_prime_export = drm_gem_prime_export,
|
||||
.gem_prime_import = drm_gem_prime_import,
|
||||
.gem_prime_pin = msm_gem_prime_pin,
|
||||
.gem_prime_unpin = msm_gem_prime_unpin,
|
||||
.gem_prime_get_sg_table = msm_gem_prime_get_sg_table,
|
||||
.gem_prime_import_sg_table = msm_gem_prime_import_sg_table,
|
||||
.gem_prime_vmap = msm_gem_prime_vmap,
|
||||
.gem_prime_vunmap = msm_gem_prime_vunmap,
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
.debugfs_init = msm_debugfs_init,
|
||||
.debugfs_cleanup = msm_debugfs_cleanup,
|
||||
|
@ -73,10 +73,16 @@ struct msm_drm_private {
|
||||
|
||||
struct workqueue_struct *wq;
|
||||
|
||||
/* callbacks deferred until bo is inactive: */
|
||||
struct list_head fence_cbs;
|
||||
|
||||
/* registered IOMMU domains: */
|
||||
unsigned int num_iommus;
|
||||
struct iommu_domain *iommus[NUM_DOMAINS];
|
||||
|
||||
unsigned int num_planes;
|
||||
struct drm_plane *planes[8];
|
||||
|
||||
unsigned int num_crtcs;
|
||||
struct drm_crtc *crtcs[8];
|
||||
|
||||
@ -94,6 +100,20 @@ struct msm_format {
|
||||
uint32_t pixel_format;
|
||||
};
|
||||
|
||||
/* callback from wq once fence has passed: */
|
||||
struct msm_fence_cb {
|
||||
struct work_struct work;
|
||||
uint32_t fence;
|
||||
void (*func)(struct msm_fence_cb *cb);
|
||||
};
|
||||
|
||||
void __msm_fence_worker(struct work_struct *work);
|
||||
|
||||
#define INIT_FENCE_CB(_cb, _func) do { \
|
||||
INIT_WORK(&(_cb)->work, __msm_fence_worker); \
|
||||
(_cb)->func = _func; \
|
||||
} while (0)
|
||||
|
||||
/* As there are different display controller blocks depending on the
|
||||
* snapdragon version, the kms support is split out and the appropriate
|
||||
* implementation is loaded at runtime. The kms module is responsible
|
||||
@ -141,17 +161,24 @@ uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj);
|
||||
int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
|
||||
uint32_t *iova);
|
||||
int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova);
|
||||
struct page **msm_gem_get_pages(struct drm_gem_object *obj);
|
||||
void msm_gem_put_pages(struct drm_gem_object *obj);
|
||||
void msm_gem_put_iova(struct drm_gem_object *obj, int id);
|
||||
int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
|
||||
struct drm_mode_create_dumb *args);
|
||||
int msm_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
|
||||
uint32_t handle);
|
||||
int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
|
||||
uint32_t handle, uint64_t *offset);
|
||||
struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj);
|
||||
void *msm_gem_prime_vmap(struct drm_gem_object *obj);
|
||||
void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
|
||||
struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
|
||||
size_t size, struct sg_table *sg);
|
||||
int msm_gem_prime_pin(struct drm_gem_object *obj);
|
||||
void msm_gem_prime_unpin(struct drm_gem_object *obj);
|
||||
void *msm_gem_vaddr_locked(struct drm_gem_object *obj);
|
||||
void *msm_gem_vaddr(struct drm_gem_object *obj);
|
||||
int msm_gem_queue_inactive_work(struct drm_gem_object *obj,
|
||||
struct work_struct *work);
|
||||
int msm_gem_queue_inactive_cb(struct drm_gem_object *obj,
|
||||
struct msm_fence_cb *cb);
|
||||
void msm_gem_move_to_active(struct drm_gem_object *obj,
|
||||
struct msm_gpu *gpu, bool write, uint32_t fence);
|
||||
void msm_gem_move_to_inactive(struct drm_gem_object *obj);
|
||||
@ -163,6 +190,8 @@ int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
|
||||
uint32_t size, uint32_t flags, uint32_t *handle);
|
||||
struct drm_gem_object *msm_gem_new(struct drm_device *dev,
|
||||
uint32_t size, uint32_t flags);
|
||||
struct drm_gem_object *msm_gem_import(struct drm_device *dev,
|
||||
uint32_t size, struct sg_table *sgt);
|
||||
|
||||
struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane);
|
||||
const struct msm_format *msm_framebuffer_format(struct drm_framebuffer *fb);
|
||||
|
@ -17,6 +17,7 @@
|
||||
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/shmem_fs.h>
|
||||
#include <linux/dma-buf.h>
|
||||
|
||||
#include "msm_drv.h"
|
||||
#include "msm_gem.h"
|
||||
@ -77,6 +78,21 @@ static void put_pages(struct drm_gem_object *obj)
|
||||
}
|
||||
}
|
||||
|
||||
struct page **msm_gem_get_pages(struct drm_gem_object *obj)
|
||||
{
|
||||
struct drm_device *dev = obj->dev;
|
||||
struct page **p;
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
p = get_pages(obj);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return p;
|
||||
}
|
||||
|
||||
void msm_gem_put_pages(struct drm_gem_object *obj)
|
||||
{
|
||||
/* when we start tracking the pin count, then do something here */
|
||||
}
|
||||
|
||||
int msm_gem_mmap_obj(struct drm_gem_object *obj,
|
||||
struct vm_area_struct *vma)
|
||||
{
|
||||
@ -162,6 +178,11 @@ out:
|
||||
case 0:
|
||||
case -ERESTARTSYS:
|
||||
case -EINTR:
|
||||
case -EBUSY:
|
||||
/*
|
||||
* EBUSY is ok: this just means that another thread
|
||||
* already did the job.
|
||||
*/
|
||||
return VM_FAULT_NOPAGE;
|
||||
case -ENOMEM:
|
||||
return VM_FAULT_OOM;
|
||||
@ -293,7 +314,17 @@ int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
|
||||
|
||||
int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova)
|
||||
{
|
||||
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
||||
int ret;
|
||||
|
||||
/* this is safe right now because we don't unmap until the
|
||||
* bo is deleted:
|
||||
*/
|
||||
if (msm_obj->domain[id].iova) {
|
||||
*iova = msm_obj->domain[id].iova;
|
||||
return 0;
|
||||
}
|
||||
|
||||
mutex_lock(&obj->dev->struct_mutex);
|
||||
ret = msm_gem_get_iova_locked(obj, id, iova);
|
||||
mutex_unlock(&obj->dev->struct_mutex);
|
||||
@ -363,8 +394,11 @@ void *msm_gem_vaddr(struct drm_gem_object *obj)
|
||||
return ret;
|
||||
}
|
||||
|
||||
int msm_gem_queue_inactive_work(struct drm_gem_object *obj,
|
||||
struct work_struct *work)
|
||||
/* setup callback for when bo is no longer busy..
|
||||
* TODO probably want to differentiate read vs write..
|
||||
*/
|
||||
int msm_gem_queue_inactive_cb(struct drm_gem_object *obj,
|
||||
struct msm_fence_cb *cb)
|
||||
{
|
||||
struct drm_device *dev = obj->dev;
|
||||
struct msm_drm_private *priv = dev->dev_private;
|
||||
@ -372,12 +406,13 @@ int msm_gem_queue_inactive_work(struct drm_gem_object *obj,
|
||||
int ret = 0;
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
if (!list_empty(&work->entry)) {
|
||||
if (!list_empty(&cb->work.entry)) {
|
||||
ret = -EINVAL;
|
||||
} else if (is_active(msm_obj)) {
|
||||
list_add_tail(&work->entry, &msm_obj->inactive_work);
|
||||
cb->fence = max(msm_obj->read_fence, msm_obj->write_fence);
|
||||
list_add_tail(&cb->work.entry, &priv->fence_cbs);
|
||||
} else {
|
||||
queue_work(priv->wq, work);
|
||||
queue_work(priv->wq, &cb->work);
|
||||
}
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
@ -410,16 +445,6 @@ void msm_gem_move_to_inactive(struct drm_gem_object *obj)
|
||||
msm_obj->write_fence = 0;
|
||||
list_del_init(&msm_obj->mm_list);
|
||||
list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
|
||||
|
||||
while (!list_empty(&msm_obj->inactive_work)) {
|
||||
struct work_struct *work;
|
||||
|
||||
work = list_first_entry(&msm_obj->inactive_work,
|
||||
struct work_struct, entry);
|
||||
|
||||
list_del_init(&work->entry);
|
||||
queue_work(priv->wq, work);
|
||||
}
|
||||
}
|
||||
|
||||
int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op,
|
||||
@ -510,10 +535,21 @@ void msm_gem_free_object(struct drm_gem_object *obj)
|
||||
|
||||
drm_gem_free_mmap_offset(obj);
|
||||
|
||||
if (msm_obj->vaddr)
|
||||
vunmap(msm_obj->vaddr);
|
||||
if (obj->import_attach) {
|
||||
if (msm_obj->vaddr)
|
||||
dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr);
|
||||
|
||||
put_pages(obj);
|
||||
/* Don't drop the pages for imported dmabuf, as they are not
|
||||
* ours, just free the array we allocated:
|
||||
*/
|
||||
if (msm_obj->pages)
|
||||
drm_free_large(msm_obj->pages);
|
||||
|
||||
} else {
|
||||
if (msm_obj->vaddr)
|
||||
vunmap(msm_obj->vaddr);
|
||||
put_pages(obj);
|
||||
}
|
||||
|
||||
if (msm_obj->resv == &msm_obj->_resv)
|
||||
reservation_object_fini(msm_obj->resv);
|
||||
@ -549,17 +585,12 @@ int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
|
||||
return ret;
|
||||
}
|
||||
|
||||
struct drm_gem_object *msm_gem_new(struct drm_device *dev,
|
||||
uint32_t size, uint32_t flags)
|
||||
static int msm_gem_new_impl(struct drm_device *dev,
|
||||
uint32_t size, uint32_t flags,
|
||||
struct drm_gem_object **obj)
|
||||
{
|
||||
struct msm_drm_private *priv = dev->dev_private;
|
||||
struct msm_gem_object *msm_obj;
|
||||
struct drm_gem_object *obj = NULL;
|
||||
int ret;
|
||||
|
||||
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
|
||||
|
||||
size = PAGE_ALIGN(size);
|
||||
|
||||
switch (flags & MSM_BO_CACHE_MASK) {
|
||||
case MSM_BO_UNCACHED:
|
||||
@ -569,21 +600,12 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev,
|
||||
default:
|
||||
dev_err(dev->dev, "invalid cache flag: %x\n",
|
||||
(flags & MSM_BO_CACHE_MASK));
|
||||
ret = -EINVAL;
|
||||
goto fail;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
|
||||
if (!msm_obj) {
|
||||
ret = -ENOMEM;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
obj = &msm_obj->base;
|
||||
|
||||
ret = drm_gem_object_init(dev, obj, size);
|
||||
if (ret)
|
||||
goto fail;
|
||||
if (!msm_obj)
|
||||
return -ENOMEM;
|
||||
|
||||
msm_obj->flags = flags;
|
||||
|
||||
@ -591,9 +613,69 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev,
|
||||
reservation_object_init(msm_obj->resv);
|
||||
|
||||
INIT_LIST_HEAD(&msm_obj->submit_entry);
|
||||
INIT_LIST_HEAD(&msm_obj->inactive_work);
|
||||
list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
|
||||
|
||||
*obj = &msm_obj->base;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct drm_gem_object *msm_gem_new(struct drm_device *dev,
|
||||
uint32_t size, uint32_t flags)
|
||||
{
|
||||
struct drm_gem_object *obj;
|
||||
int ret;
|
||||
|
||||
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
|
||||
|
||||
size = PAGE_ALIGN(size);
|
||||
|
||||
ret = msm_gem_new_impl(dev, size, flags, &obj);
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
ret = drm_gem_object_init(dev, obj, size);
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
return obj;
|
||||
|
||||
fail:
|
||||
if (obj)
|
||||
drm_gem_object_unreference_unlocked(obj);
|
||||
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
struct drm_gem_object *msm_gem_import(struct drm_device *dev,
|
||||
uint32_t size, struct sg_table *sgt)
|
||||
{
|
||||
struct msm_gem_object *msm_obj;
|
||||
struct drm_gem_object *obj;
|
||||
int ret, npages;
|
||||
|
||||
size = PAGE_ALIGN(size);
|
||||
|
||||
ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj);
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
drm_gem_private_object_init(dev, obj, size);
|
||||
|
||||
npages = size / PAGE_SIZE;
|
||||
|
||||
msm_obj = to_msm_bo(obj);
|
||||
msm_obj->sgt = sgt;
|
||||
msm_obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
|
||||
if (!msm_obj->pages) {
|
||||
ret = -ENOMEM;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages);
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
return obj;
|
||||
|
||||
fail:
|
||||
|
@ -45,9 +45,6 @@ struct msm_gem_object {
|
||||
*/
|
||||
struct list_head submit_entry;
|
||||
|
||||
/* work defered until bo is inactive: */
|
||||
struct list_head inactive_work;
|
||||
|
||||
struct page **pages;
|
||||
struct sg_table *sgt;
|
||||
void *vaddr;
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user