Merge branch 'drm-core-next' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6

* 'drm-core-next' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6: (177 commits)
  drm/radeon: fixup refcounts in radeon dumb create ioctl.
  drm: radeon: *_cs_packet_parse_vline() cleanup
  radeon: merge list_del()/list_add_tail() to list_move_tail()
  drm: Retry i2c transfer of EDID block after failure
  drm/radeon/kms: fix typo in atom overscan setup
  drm: Hold the mode mutex whilst probing for sysfs status
  drm/nouveau: fix __nouveau_fence_wait performance
  drm/nv40: attempt to reserve just enough vram for all 32 channels
  drm/nv50: check for vm traps on every gr irq
  drm/nv50: decode vm faults some more
  drm/nouveau: add nouveau_enum_find() util function
  drm/nouveau: properly handle pushbuffer check failures
  drm/nvc0: remove vm hack forcing large/small pages to not share a PDE
  drm/i915: disable opregion lid detection for now.
  drm/i915: Only wait on a pending flip if we intend to write to the buffer
  drm/i915/dp: Sanity check eDP existence
  drm: add cap bit to denote if dumb ioctl is available or not.
  drm/core: add ioctl to query device/driver capabilities
  drm/radeon/kms: allow max clock of 340 Mhz on hdmi 1.3+
  drm/radeon/kms: add cayman pci ids
  ...
This commit is contained in:
Linus Torvalds 2011-03-17 09:11:39 -07:00
commit 40c7f2112c
181 changed files with 9280 additions and 6447 deletions

View File

@ -73,32 +73,17 @@ source "drivers/gpu/drm/radeon/Kconfig"
config DRM_I810
tristate "Intel I810"
# BKL usage in order to avoid AB-BA deadlocks, may become BROKEN_ON_SMP
depends on DRM && AGP && AGP_INTEL && BKL
# !PREEMPT because of missing ioctl locking
depends on DRM && AGP && AGP_INTEL && (!PREEMPT || BROKEN)
help
Choose this option if you have an Intel I810 graphics card. If M is
selected, the module will be called i810. AGP support is required
for this driver to work.
choice
prompt "Intel 830M, 845G, 852GM, 855GM, 865G"
depends on DRM && AGP && AGP_INTEL
optional
config DRM_I830
tristate "i830 driver"
# BKL usage in order to avoid AB-BA deadlocks, i830 may get removed
depends on BKL
help
Choose this option if you have a system that has Intel 830M, 845G,
852GM, 855GM or 865G integrated graphics. If M is selected, the
module will be called i830. AGP support is required for this driver
to work. This driver is used by the older X releases X.org 6.7 and
XFree86 4.3. If unsure, build this and i915 as modules and the X server
will load the correct one.
config DRM_I915
tristate "i915 driver"
tristate "Intel 8xx/9xx/G3x/G4x/HD Graphics"
depends on DRM
depends on AGP
depends on AGP_INTEL
# we need shmfs for the swappable backing store, and in particular
# the shmem_readpage() which depends upon tmpfs
@ -115,12 +100,20 @@ config DRM_I915
select ACPI_VIDEO if ACPI
select ACPI_BUTTON if ACPI
help
Choose this option if you have a system that has Intel 830M, 845G,
852GM, 855GM 865G or 915G integrated graphics. If M is selected, the
module will be called i915. AGP support is required for this driver
to work. This driver is used by the Intel driver in X.org 6.8 and
XFree86 4.4 and above. If unsure, build this and i830 as modules and
the X server will load the correct one.
Choose this option if you have a system that has "Intel Graphics
Media Accelerator" or "HD Graphics" integrated graphics,
including 830M, 845G, 852GM, 855GM, 865G, 915G, 945G, 965G,
G35, G41, G43, G45 chipsets and Celeron, Pentium, Core i3,
Core i5, Core i7 as well as Atom CPUs with integrated graphics.
If M is selected, the module will be called i915. AGP support
is required for this driver to work. This driver is used by
the Intel driver in X.org 6.8 and XFree86 4.4 and above. It
replaces the older i830 module that supported a subset of the
hardware in older X.org releases.
Note that the older i810/i815 chipsets require the use of the
i810 driver instead, and the Atom z5xx series has an entirely
different implementation.
config DRM_I915_KMS
bool "Enable modesetting on intel by default"
@ -132,8 +125,6 @@ config DRM_I915_KMS
the driver to bind to PCI devices, which precludes loading things
like intelfb.
endchoice
config DRM_MGA
tristate "Matrox g200/g400"
depends on DRM && PCI

View File

@ -12,7 +12,7 @@ drm-y := drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \
drm_platform.o drm_sysfs.o drm_hashtab.o drm_sman.o drm_mm.o \
drm_crtc.o drm_modes.o drm_edid.o \
drm_info.o drm_debugfs.o drm_encoder_slave.o \
drm_trace_points.o drm_global.o
drm_trace_points.o drm_global.o drm_usb.o
drm-$(CONFIG_COMPAT) += drm_ioc32.o
@ -29,7 +29,6 @@ obj-$(CONFIG_DRM_R128) += r128/
obj-$(CONFIG_DRM_RADEON)+= radeon/
obj-$(CONFIG_DRM_MGA) += mga/
obj-$(CONFIG_DRM_I810) += i810/
obj-$(CONFIG_DRM_I830) += i830/
obj-$(CONFIG_DRM_I915) += i915/
obj-$(CONFIG_DRM_SIS) += sis/
obj-$(CONFIG_DRM_SAVAGE)+= savage/

View File

@ -2694,3 +2694,36 @@ void drm_mode_config_reset(struct drm_device *dev)
connector->funcs->reset(connector);
}
EXPORT_SYMBOL(drm_mode_config_reset);
int drm_mode_create_dumb_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
struct drm_mode_create_dumb *args = data;
if (!dev->driver->dumb_create)
return -ENOSYS;
return dev->driver->dumb_create(file_priv, dev, args);
}
int drm_mode_mmap_dumb_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
struct drm_mode_map_dumb *args = data;
/* call driver ioctl to get mmap offset */
if (!dev->driver->dumb_map_offset)
return -ENOSYS;
return dev->driver->dumb_map_offset(file_priv, dev, args->handle, &args->offset);
}
int drm_mode_destroy_dumb_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
struct drm_mode_destroy_dumb *args = data;
if (!dev->driver->dumb_destroy)
return -ENOSYS;
return dev->driver->dumb_destroy(file_priv, dev, args->handle);
}

View File

@ -67,6 +67,7 @@ static struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, 0),
DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, 0),
DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, 0),
DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, 0),
DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER),
DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
@ -150,7 +151,10 @@ static struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED)
DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_MAP_DUMB, drm_mode_mmap_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED)
};
#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
@ -234,49 +238,6 @@ int drm_lastclose(struct drm_device * dev)
return 0;
}
/**
* Module initialization. Called via init_module at module load time, or via
* linux/init/main.c (this is not currently supported).
*
* \return zero on success or a negative number on failure.
*
* Initializes an array of drm_device structures, and attempts to
* initialize all available devices, using consecutive minors, registering the
* stubs and initializing the device.
*
* Expands the \c DRIVER_PREINIT and \c DRIVER_POST_INIT macros before and
* after the initialization for driver customization.
*/
int drm_init(struct drm_driver *driver)
{
DRM_DEBUG("\n");
INIT_LIST_HEAD(&driver->device_list);
if (driver->driver_features & DRIVER_USE_PLATFORM_DEVICE)
return drm_platform_init(driver);
else
return drm_pci_init(driver);
}
EXPORT_SYMBOL(drm_init);
void drm_exit(struct drm_driver *driver)
{
struct drm_device *dev, *tmp;
DRM_DEBUG("\n");
if (driver->driver_features & DRIVER_MODESET) {
pci_unregister_driver(&driver->pci_driver);
} else {
list_for_each_entry_safe(dev, tmp, &driver->device_list, driver_item)
drm_put_dev(dev);
}
DRM_INFO("Module unloaded\n");
}
EXPORT_SYMBOL(drm_exit);
/** File operations structure */
static const struct file_operations drm_stub_fops = {
.owner = THIS_MODULE,

View File

@ -230,24 +230,32 @@ drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf,
int block, int len)
{
unsigned char start = block * EDID_LENGTH;
struct i2c_msg msgs[] = {
{
.addr = DDC_ADDR,
.flags = 0,
.len = 1,
.buf = &start,
}, {
.addr = DDC_ADDR,
.flags = I2C_M_RD,
.len = len,
.buf = buf,
}
};
int ret, retries = 5;
if (i2c_transfer(adapter, msgs, 2) == 2)
return 0;
/* The core i2c driver will automatically retry the transfer if the
* adapter reports EAGAIN. However, we find that bit-banging transfers
* are susceptible to errors under a heavily loaded machine and
* generate spurious NAKs and timeouts. Retrying the transfer
* of the individual block a few times seems to overcome this.
*/
do {
struct i2c_msg msgs[] = {
{
.addr = DDC_ADDR,
.flags = 0,
.len = 1,
.buf = &start,
}, {
.addr = DDC_ADDR,
.flags = I2C_M_RD,
.len = len,
.buf = buf,
}
};
ret = i2c_transfer(adapter, msgs, 2);
} while (ret != 2 && --retries);
return -1;
return ret == 2 ? 0 : -1;
}
static u8 *
@ -449,12 +457,11 @@ static void edid_fixup_preferred(struct drm_connector *connector,
struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev,
int hsize, int vsize, int fresh)
{
struct drm_display_mode *mode = NULL;
int i;
struct drm_display_mode *ptr, *mode;
mode = NULL;
for (i = 0; i < drm_num_dmt_modes; i++) {
ptr = &drm_dmt_modes[i];
const struct drm_display_mode *ptr = &drm_dmt_modes[i];
if (hsize == ptr->hdisplay &&
vsize == ptr->vdisplay &&
fresh == drm_mode_vrefresh(ptr)) {
@ -885,7 +892,7 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
}
static bool
mode_is_rb(struct drm_display_mode *mode)
mode_is_rb(const struct drm_display_mode *mode)
{
return (mode->htotal - mode->hdisplay == 160) &&
(mode->hsync_end - mode->hdisplay == 80) &&
@ -894,7 +901,8 @@ mode_is_rb(struct drm_display_mode *mode)
}
static bool
mode_in_hsync_range(struct drm_display_mode *mode, struct edid *edid, u8 *t)
mode_in_hsync_range(const struct drm_display_mode *mode,
struct edid *edid, u8 *t)
{
int hsync, hmin, hmax;
@ -910,7 +918,8 @@ mode_in_hsync_range(struct drm_display_mode *mode, struct edid *edid, u8 *t)
}
static bool
mode_in_vsync_range(struct drm_display_mode *mode, struct edid *edid, u8 *t)
mode_in_vsync_range(const struct drm_display_mode *mode,
struct edid *edid, u8 *t)
{
int vsync, vmin, vmax;
@ -941,7 +950,7 @@ range_pixel_clock(struct edid *edid, u8 *t)
}
static bool
mode_in_range(struct drm_display_mode *mode, struct edid *edid,
mode_in_range(const struct drm_display_mode *mode, struct edid *edid,
struct detailed_timing *timing)
{
u32 max_clock;
@ -1472,7 +1481,7 @@ int drm_add_modes_noedid(struct drm_connector *connector,
int hdisplay, int vdisplay)
{
int i, count, num_modes = 0;
struct drm_display_mode *mode, *ptr;
struct drm_display_mode *mode;
struct drm_device *dev = connector->dev;
count = sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode);
@ -1482,7 +1491,7 @@ int drm_add_modes_noedid(struct drm_connector *connector,
vdisplay = 0;
for (i = 0; i < count; i++) {
ptr = &drm_dmt_modes[i];
const struct drm_display_mode *ptr = &drm_dmt_modes[i];
if (hdisplay && vdisplay) {
/*
* Only when two are valid, they will be used to check

View File

@ -32,7 +32,7 @@
* This table is copied from xfree86/modes/xf86EdidModes.c.
* But the mode with Reduced blank feature is deleted.
*/
static struct drm_display_mode drm_dmt_modes[] = {
static const struct drm_display_mode drm_dmt_modes[] = {
/* 640x350@85Hz */
{ DRM_MODE("640x350", DRM_MODE_TYPE_DRIVER, 31500, 640, 672,
736, 832, 0, 350, 382, 385, 445, 0,
@ -266,7 +266,7 @@ static struct drm_display_mode drm_dmt_modes[] = {
static const int drm_num_dmt_modes =
sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode);
static struct drm_display_mode edid_est_modes[] = {
static const struct drm_display_mode edid_est_modes[] = {
{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
968, 1056, 0, 600, 601, 605, 628, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@60Hz */

View File

@ -627,6 +627,11 @@ static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green,
value = (red << info->var.red.offset) |
(green << info->var.green.offset) |
(blue << info->var.blue.offset);
if (info->var.transp.length > 0) {
u32 mask = (1 << info->var.transp.length) - 1;
mask <<= info->var.transp.offset;
value |= mask;
}
palette[regno] = value;
return 0;
}

View File

@ -101,7 +101,7 @@ drm_gem_init(struct drm_device *dev)
dev->mm_private = mm;
if (drm_ht_create(&mm->offset_hash, 19)) {
if (drm_ht_create(&mm->offset_hash, 12)) {
kfree(mm);
return -ENOMEM;
}
@ -181,7 +181,7 @@ EXPORT_SYMBOL(drm_gem_object_alloc);
/**
* Removes the mapping from handle to filp for this object.
*/
static int
int
drm_gem_handle_delete(struct drm_file *filp, u32 handle)
{
struct drm_device *dev;
@ -214,6 +214,7 @@ drm_gem_handle_delete(struct drm_file *filp, u32 handle)
return 0;
}
EXPORT_SYMBOL(drm_gem_handle_delete);
/**
* Create a handle for this object. This adds a handle reference

View File

@ -39,27 +39,18 @@
int drm_ht_create(struct drm_open_hash *ht, unsigned int order)
{
unsigned int i;
unsigned int size = 1 << order;
ht->size = 1 << order;
ht->order = order;
ht->fill = 0;
ht->table = NULL;
ht->use_vmalloc = ((ht->size * sizeof(*ht->table)) > PAGE_SIZE);
if (!ht->use_vmalloc) {
ht->table = kcalloc(ht->size, sizeof(*ht->table), GFP_KERNEL);
}
if (!ht->table) {
ht->use_vmalloc = 1;
ht->table = vmalloc(ht->size*sizeof(*ht->table));
}
if (size <= PAGE_SIZE / sizeof(*ht->table))
ht->table = kcalloc(size, sizeof(*ht->table), GFP_KERNEL);
else
ht->table = vzalloc(size*sizeof(*ht->table));
if (!ht->table) {
DRM_ERROR("Out of memory for hash table\n");
return -ENOMEM;
}
for (i=0; i< ht->size; ++i) {
INIT_HLIST_HEAD(&ht->table[i]);
}
return 0;
}
EXPORT_SYMBOL(drm_ht_create);
@ -180,7 +171,6 @@ int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key)
list = drm_ht_find_key(ht, key);
if (list) {
hlist_del_init(list);
ht->fill--;
return 0;
}
return -EINVAL;
@ -189,7 +179,6 @@ int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key)
int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item)
{
hlist_del_init(&item->head);
ht->fill--;
return 0;
}
EXPORT_SYMBOL(drm_ht_remove_item);
@ -197,10 +186,10 @@ EXPORT_SYMBOL(drm_ht_remove_item);
void drm_ht_remove(struct drm_open_hash *ht)
{
if (ht->table) {
if (ht->use_vmalloc)
vfree(ht->table);
else
if ((PAGE_SIZE / sizeof(*ht->table)) >> ht->order)
kfree(ht->table);
else
vfree(ht->table);
ht->table = NULL;
}
}

View File

@ -47,30 +47,19 @@ int drm_name_info(struct seq_file *m, void *data)
struct drm_minor *minor = node->minor;
struct drm_device *dev = minor->dev;
struct drm_master *master = minor->master;
const char *bus_name;
if (!master)
return 0;
if (drm_core_check_feature(dev, DRIVER_USE_PLATFORM_DEVICE)) {
if (master->unique) {
seq_printf(m, "%s %s %s\n",
dev->driver->platform_device->name,
dev_name(dev->dev), master->unique);
} else {
seq_printf(m, "%s\n",
dev->driver->platform_device->name);
}
bus_name = dev->driver->bus->get_name(dev);
if (master->unique) {
seq_printf(m, "%s %s %s\n",
bus_name,
dev_name(dev->dev), master->unique);
} else {
if (master->unique) {
seq_printf(m, "%s %s %s\n",
dev->driver->pci_driver.name,
dev_name(dev->dev), master->unique);
} else {
seq_printf(m, "%s %s\n", dev->driver->pci_driver.name,
dev_name(dev->dev));
}
seq_printf(m, "%s %s\n",
bus_name, dev_name(dev->dev));
}
return 0;
}

View File

@ -96,7 +96,7 @@ int drm_setunique(struct drm_device *dev, void *data,
{
struct drm_unique *u = data;
struct drm_master *master = file_priv->master;
int domain, bus, slot, func, ret;
int ret;
if (master->unique_len || master->unique)
return -EBUSY;
@ -104,50 +104,12 @@ int drm_setunique(struct drm_device *dev, void *data,
if (!u->unique_len || u->unique_len > 1024)
return -EINVAL;
master->unique_len = u->unique_len;
master->unique_size = u->unique_len + 1;
master->unique = kmalloc(master->unique_size, GFP_KERNEL);
if (!master->unique) {
ret = -ENOMEM;
if (!dev->driver->bus->set_unique)
return -EINVAL;
ret = dev->driver->bus->set_unique(dev, master, u);
if (ret)
goto err;
}
if (copy_from_user(master->unique, u->unique, master->unique_len)) {
ret = -EFAULT;
goto err;
}
master->unique[master->unique_len] = '\0';
dev->devname = kmalloc(strlen(dev->driver->pci_driver.name) +
strlen(master->unique) + 2, GFP_KERNEL);
if (!dev->devname) {
ret = -ENOMEM;
goto err;
}
sprintf(dev->devname, "%s@%s", dev->driver->pci_driver.name,
master->unique);
/* Return error if the busid submitted doesn't match the device's actual
* busid.
*/
ret = sscanf(master->unique, "PCI:%d:%d:%d", &bus, &slot, &func);
if (ret != 3) {
ret = -EINVAL;
goto err;
}
domain = bus >> 8;
bus &= 0xff;
if ((domain != drm_get_pci_domain(dev)) ||
(bus != dev->pdev->bus->number) ||
(slot != PCI_SLOT(dev->pdev->devfn)) ||
(func != PCI_FUNC(dev->pdev->devfn))) {
ret = -EINVAL;
goto err;
}
return 0;
@ -159,74 +121,15 @@ err:
static int drm_set_busid(struct drm_device *dev, struct drm_file *file_priv)
{
struct drm_master *master = file_priv->master;
int len, ret;
int ret;
if (master->unique != NULL)
drm_unset_busid(dev, master);
if (drm_core_check_feature(dev, DRIVER_USE_PLATFORM_DEVICE)) {
master->unique_len = 10 + strlen(dev->platformdev->name);
master->unique = kmalloc(master->unique_len + 1, GFP_KERNEL);
if (master->unique == NULL)
return -ENOMEM;
len = snprintf(master->unique, master->unique_len,
"platform:%s", dev->platformdev->name);
if (len > master->unique_len) {
DRM_ERROR("Unique buffer overflowed\n");
ret = -EINVAL;
goto err;
}
dev->devname =
kmalloc(strlen(dev->platformdev->name) +
master->unique_len + 2, GFP_KERNEL);
if (dev->devname == NULL) {
ret = -ENOMEM;
goto err;
}
sprintf(dev->devname, "%s@%s", dev->platformdev->name,
master->unique);
} else {
master->unique_len = 40;
master->unique_size = master->unique_len;
master->unique = kmalloc(master->unique_size, GFP_KERNEL);
if (master->unique == NULL)
return -ENOMEM;
len = snprintf(master->unique, master->unique_len,
"pci:%04x:%02x:%02x.%d",
drm_get_pci_domain(dev),
dev->pdev->bus->number,
PCI_SLOT(dev->pdev->devfn),
PCI_FUNC(dev->pdev->devfn));
if (len >= master->unique_len) {
DRM_ERROR("buffer overflow");
ret = -EINVAL;
goto err;
} else
master->unique_len = len;
dev->devname =
kmalloc(strlen(dev->driver->pci_driver.name) +
master->unique_len + 2, GFP_KERNEL);
if (dev->devname == NULL) {
ret = -ENOMEM;
goto err;
}
sprintf(dev->devname, "%s@%s", dev->driver->pci_driver.name,
master->unique);
}
ret = dev->driver->bus->set_busid(dev, master);
if (ret)
goto err;
return 0;
err:
drm_unset_busid(dev, master);
return ret;
@ -364,6 +267,25 @@ int drm_getstats(struct drm_device *dev, void *data,
return 0;
}
/**
* Get device/driver capabilities
*/
int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
struct drm_get_cap *req = data;
req->value = 0;
switch (req->capability) {
case DRM_CAP_DUMB_BUFFER:
if (dev->driver->dumb_create)
req->value = 1;
break;
default:
return -EINVAL;
}
return 0;
}
/**
* Setversion ioctl.
*

View File

@ -74,23 +74,13 @@ int drm_irq_by_busid(struct drm_device *dev, void *data,
{
struct drm_irq_busid *p = data;
if (drm_core_check_feature(dev, DRIVER_USE_PLATFORM_DEVICE))
if (!dev->driver->bus->irq_by_busid)
return -EINVAL;
if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
return -EINVAL;
if ((p->busnum >> 8) != drm_get_pci_domain(dev) ||
(p->busnum & 0xff) != dev->pdev->bus->number ||
p->devnum != PCI_SLOT(dev->pdev->devfn) || p->funcnum != PCI_FUNC(dev->pdev->devfn))
return -EINVAL;
p->irq = dev->pdev->irq;
DRM_DEBUG("%d:%d:%d => IRQ %d\n", p->busnum, p->devnum, p->funcnum,
p->irq);
return 0;
return dev->driver->bus->irq_by_busid(dev, p);
}
/*

View File

@ -64,8 +64,8 @@ static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
else {
child =
list_entry(mm->unused_nodes.next,
struct drm_mm_node, free_stack);
list_del(&child->free_stack);
struct drm_mm_node, node_list);
list_del(&child->node_list);
--mm->num_unused;
}
spin_unlock(&mm->unused_lock);
@ -94,195 +94,242 @@ int drm_mm_pre_get(struct drm_mm *mm)
return ret;
}
++mm->num_unused;
list_add_tail(&node->free_stack, &mm->unused_nodes);
list_add_tail(&node->node_list, &mm->unused_nodes);
}
spin_unlock(&mm->unused_lock);
return 0;
}
EXPORT_SYMBOL(drm_mm_pre_get);
static int drm_mm_create_tail_node(struct drm_mm *mm,
unsigned long start,
unsigned long size, int atomic)
static inline unsigned long drm_mm_hole_node_start(struct drm_mm_node *hole_node)
{
struct drm_mm_node *child;
child = drm_mm_kmalloc(mm, atomic);
if (unlikely(child == NULL))
return -ENOMEM;
child->free = 1;
child->size = size;
child->start = start;
child->mm = mm;
list_add_tail(&child->node_list, &mm->node_list);
list_add_tail(&child->free_stack, &mm->free_stack);
return 0;
return hole_node->start + hole_node->size;
}
static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent,
unsigned long size,
int atomic)
static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
{
struct drm_mm_node *child;
struct drm_mm_node *next_node =
list_entry(hole_node->node_list.next, struct drm_mm_node,
node_list);
child = drm_mm_kmalloc(parent->mm, atomic);
if (unlikely(child == NULL))
return NULL;
INIT_LIST_HEAD(&child->free_stack);
child->size = size;
child->start = parent->start;
child->mm = parent->mm;
list_add_tail(&child->node_list, &parent->node_list);
INIT_LIST_HEAD(&child->free_stack);
parent->size -= size;
parent->start += size;
return child;
return next_node->start;
}
static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
struct drm_mm_node *node,
unsigned long size, unsigned alignment)
{
struct drm_mm *mm = hole_node->mm;
unsigned long tmp = 0, wasted = 0;
unsigned long hole_start = drm_mm_hole_node_start(hole_node);
unsigned long hole_end = drm_mm_hole_node_end(hole_node);
struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
BUG_ON(!hole_node->hole_follows || node->allocated);
if (alignment)
tmp = hole_start % alignment;
if (!tmp) {
hole_node->hole_follows = 0;
list_del_init(&hole_node->hole_stack);
} else
wasted = alignment - tmp;
node->start = hole_start + wasted;
node->size = size;
node->mm = mm;
node->allocated = 1;
INIT_LIST_HEAD(&node->hole_stack);
list_add(&node->node_list, &hole_node->node_list);
BUG_ON(node->start + node->size > hole_end);
if (node->start + node->size < hole_end) {
list_add(&node->hole_stack, &mm->hole_stack);
node->hole_follows = 1;
} else {
node->hole_follows = 0;
}
}
struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node,
unsigned long size,
unsigned alignment,
int atomic)
{
struct drm_mm_node *node;
struct drm_mm_node *align_splitoff = NULL;
unsigned tmp = 0;
node = drm_mm_kmalloc(hole_node->mm, atomic);
if (unlikely(node == NULL))
return NULL;
if (alignment)
tmp = node->start % alignment;
if (tmp) {
align_splitoff =
drm_mm_split_at_start(node, alignment - tmp, atomic);
if (unlikely(align_splitoff == NULL))
return NULL;
}
if (node->size == size) {
list_del_init(&node->free_stack);
node->free = 0;
} else {
node = drm_mm_split_at_start(node, size, atomic);
}
if (align_splitoff)
drm_mm_put_block(align_splitoff);
drm_mm_insert_helper(hole_node, node, size, alignment);
return node;
}
EXPORT_SYMBOL(drm_mm_get_block_generic);
struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *node,
/**
* Search for free space and insert a preallocated memory node. Returns
* -ENOSPC if no suitable free area is available. The preallocated memory node
* must be cleared.
*/
int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node,
unsigned long size, unsigned alignment)
{
struct drm_mm_node *hole_node;
hole_node = drm_mm_search_free(mm, size, alignment, 0);
if (!hole_node)
return -ENOSPC;
drm_mm_insert_helper(hole_node, node, size, alignment);
return 0;
}
EXPORT_SYMBOL(drm_mm_insert_node);
static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
struct drm_mm_node *node,
unsigned long size, unsigned alignment,
unsigned long start, unsigned long end)
{
struct drm_mm *mm = hole_node->mm;
unsigned long tmp = 0, wasted = 0;
unsigned long hole_start = drm_mm_hole_node_start(hole_node);
unsigned long hole_end = drm_mm_hole_node_end(hole_node);
BUG_ON(!hole_node->hole_follows || node->allocated);
if (hole_start < start)
wasted += start - hole_start;
if (alignment)
tmp = (hole_start + wasted) % alignment;
if (tmp)
wasted += alignment - tmp;
if (!wasted) {
hole_node->hole_follows = 0;
list_del_init(&hole_node->hole_stack);
}
node->start = hole_start + wasted;
node->size = size;
node->mm = mm;
node->allocated = 1;
INIT_LIST_HEAD(&node->hole_stack);
list_add(&node->node_list, &hole_node->node_list);
BUG_ON(node->start + node->size > hole_end);
BUG_ON(node->start + node->size > end);
if (node->start + node->size < hole_end) {
list_add(&node->hole_stack, &mm->hole_stack);
node->hole_follows = 1;
} else {
node->hole_follows = 0;
}
}
struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *hole_node,
unsigned long size,
unsigned alignment,
unsigned long start,
unsigned long end,
int atomic)
{
struct drm_mm_node *align_splitoff = NULL;
unsigned tmp = 0;
unsigned wasted = 0;
struct drm_mm_node *node;
if (node->start < start)
wasted += start - node->start;
if (alignment)
tmp = ((node->start + wasted) % alignment);
node = drm_mm_kmalloc(hole_node->mm, atomic);
if (unlikely(node == NULL))
return NULL;
if (tmp)
wasted += alignment - tmp;
if (wasted) {
align_splitoff = drm_mm_split_at_start(node, wasted, atomic);
if (unlikely(align_splitoff == NULL))
return NULL;
}
if (node->size == size) {
list_del_init(&node->free_stack);
node->free = 0;
} else {
node = drm_mm_split_at_start(node, size, atomic);
}
if (align_splitoff)
drm_mm_put_block(align_splitoff);
drm_mm_insert_helper_range(hole_node, node, size, alignment,
start, end);
return node;
}
EXPORT_SYMBOL(drm_mm_get_block_range_generic);
/*
* Put a block. Merge with the previous and / or next block if they are free.
* Otherwise add to the free stack.
/**
* Search for free space and insert a preallocated memory node. Returns
* -ENOSPC if no suitable free area is available. This is for range
* restricted allocations. The preallocated memory node must be cleared.
*/
int drm_mm_insert_node_in_range(struct drm_mm *mm, struct drm_mm_node *node,
unsigned long size, unsigned alignment,
unsigned long start, unsigned long end)
{
struct drm_mm_node *hole_node;
void drm_mm_put_block(struct drm_mm_node *cur)
hole_node = drm_mm_search_free_in_range(mm, size, alignment,
start, end, 0);
if (!hole_node)
return -ENOSPC;
drm_mm_insert_helper_range(hole_node, node, size, alignment,
start, end);
return 0;
}
EXPORT_SYMBOL(drm_mm_insert_node_in_range);
/**
* Remove a memory node from the allocator.
*/
void drm_mm_remove_node(struct drm_mm_node *node)
{
struct drm_mm *mm = node->mm;
struct drm_mm_node *prev_node;
BUG_ON(node->scanned_block || node->scanned_prev_free
|| node->scanned_next_free);
prev_node =
list_entry(node->node_list.prev, struct drm_mm_node, node_list);
if (node->hole_follows) {
BUG_ON(drm_mm_hole_node_start(node)
== drm_mm_hole_node_end(node));
list_del(&node->hole_stack);
} else
BUG_ON(drm_mm_hole_node_start(node)
!= drm_mm_hole_node_end(node));
if (!prev_node->hole_follows) {
prev_node->hole_follows = 1;
list_add(&prev_node->hole_stack, &mm->hole_stack);
} else
list_move(&prev_node->hole_stack, &mm->hole_stack);
list_del(&node->node_list);
node->allocated = 0;
}
EXPORT_SYMBOL(drm_mm_remove_node);
/*
* Remove a memory node from the allocator and free the allocated struct
* drm_mm_node. Only to be used on a struct drm_mm_node obtained by one of the
* drm_mm_get_block functions.
*/
void drm_mm_put_block(struct drm_mm_node *node)
{
struct drm_mm *mm = cur->mm;
struct list_head *cur_head = &cur->node_list;
struct list_head *root_head = &mm->node_list;
struct drm_mm_node *prev_node = NULL;
struct drm_mm_node *next_node;
struct drm_mm *mm = node->mm;
int merged = 0;
drm_mm_remove_node(node);
BUG_ON(cur->scanned_block || cur->scanned_prev_free
|| cur->scanned_next_free);
if (cur_head->prev != root_head) {
prev_node =
list_entry(cur_head->prev, struct drm_mm_node, node_list);
if (prev_node->free) {
prev_node->size += cur->size;
merged = 1;
}
}
if (cur_head->next != root_head) {
next_node =
list_entry(cur_head->next, struct drm_mm_node, node_list);
if (next_node->free) {
if (merged) {
prev_node->size += next_node->size;
list_del(&next_node->node_list);
list_del(&next_node->free_stack);
spin_lock(&mm->unused_lock);
if (mm->num_unused < MM_UNUSED_TARGET) {
list_add(&next_node->free_stack,
&mm->unused_nodes);
++mm->num_unused;
} else
kfree(next_node);
spin_unlock(&mm->unused_lock);
} else {
next_node->size += cur->size;
next_node->start = cur->start;
merged = 1;
}
}
}
if (!merged) {
cur->free = 1;
list_add(&cur->free_stack, &mm->free_stack);
} else {
list_del(&cur->node_list);
spin_lock(&mm->unused_lock);
if (mm->num_unused < MM_UNUSED_TARGET) {
list_add(&cur->free_stack, &mm->unused_nodes);
++mm->num_unused;
} else
kfree(cur);
spin_unlock(&mm->unused_lock);
}
spin_lock(&mm->unused_lock);
if (mm->num_unused < MM_UNUSED_TARGET) {
list_add(&node->node_list, &mm->unused_nodes);
++mm->num_unused;
} else
kfree(node);
spin_unlock(&mm->unused_lock);
}
EXPORT_SYMBOL(drm_mm_put_block);
static int check_free_hole(unsigned long start, unsigned long end,
@ -319,8 +366,10 @@ struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
best = NULL;
best_size = ~0UL;
list_for_each_entry(entry, &mm->free_stack, free_stack) {
if (!check_free_hole(entry->start, entry->start + entry->size,
list_for_each_entry(entry, &mm->hole_stack, hole_stack) {
BUG_ON(!entry->hole_follows);
if (!check_free_hole(drm_mm_hole_node_start(entry),
drm_mm_hole_node_end(entry),
size, alignment))
continue;
@ -353,12 +402,13 @@ struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
best = NULL;
best_size = ~0UL;
list_for_each_entry(entry, &mm->free_stack, free_stack) {
unsigned long adj_start = entry->start < start ?
start : entry->start;
unsigned long adj_end = entry->start + entry->size > end ?
end : entry->start + entry->size;
list_for_each_entry(entry, &mm->hole_stack, hole_stack) {
unsigned long adj_start = drm_mm_hole_node_start(entry) < start ?
start : drm_mm_hole_node_start(entry);
unsigned long adj_end = drm_mm_hole_node_end(entry) > end ?
end : drm_mm_hole_node_end(entry);
BUG_ON(!entry->hole_follows);
if (!check_free_hole(adj_start, adj_end, size, alignment))
continue;
@ -375,6 +425,23 @@ struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
}
EXPORT_SYMBOL(drm_mm_search_free_in_range);
/**
* Moves an allocation. To be used with embedded struct drm_mm_node.
*/
void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
{
list_replace(&old->node_list, &new->node_list);
list_replace(&old->node_list, &new->hole_stack);
new->hole_follows = old->hole_follows;
new->mm = old->mm;
new->start = old->start;
new->size = old->size;
old->allocated = 0;
new->allocated = 1;
}
EXPORT_SYMBOL(drm_mm_replace_node);
/**
* Initializa lru scanning.
*
@ -393,6 +460,7 @@ void drm_mm_init_scan(struct drm_mm *mm, unsigned long size,
mm->scan_hit_start = 0;
mm->scan_hit_size = 0;
mm->scan_check_range = 0;
mm->prev_scanned_node = NULL;
}
EXPORT_SYMBOL(drm_mm_init_scan);
@ -418,6 +486,7 @@ void drm_mm_init_scan_with_range(struct drm_mm *mm, unsigned long size,
mm->scan_start = start;
mm->scan_end = end;
mm->scan_check_range = 1;
mm->prev_scanned_node = NULL;
}
EXPORT_SYMBOL(drm_mm_init_scan_with_range);
@ -430,70 +499,42 @@ EXPORT_SYMBOL(drm_mm_init_scan_with_range);
int drm_mm_scan_add_block(struct drm_mm_node *node)
{
struct drm_mm *mm = node->mm;
struct list_head *prev_free, *next_free;
struct drm_mm_node *prev_node, *next_node;
struct drm_mm_node *prev_node;
unsigned long hole_start, hole_end;
unsigned long adj_start;
unsigned long adj_end;
mm->scanned_blocks++;
prev_free = next_free = NULL;
BUG_ON(node->free);
BUG_ON(node->scanned_block);
node->scanned_block = 1;
node->free = 1;
if (node->node_list.prev != &mm->node_list) {
prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
node_list);
prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
node_list);
if (prev_node->free) {
list_del(&prev_node->node_list);
node->start = prev_node->start;
node->size += prev_node->size;
prev_node->scanned_prev_free = 1;
prev_free = &prev_node->free_stack;
}
}
if (node->node_list.next != &mm->node_list) {
next_node = list_entry(node->node_list.next, struct drm_mm_node,
node_list);
if (next_node->free) {
list_del(&next_node->node_list);
node->size += next_node->size;
next_node->scanned_next_free = 1;
next_free = &next_node->free_stack;
}
}
/* The free_stack list is not used for allocated objects, so these two
* pointers can be abused (as long as no allocations in this memory
* manager happens). */
node->free_stack.prev = prev_free;
node->free_stack.next = next_free;
node->scanned_preceeds_hole = prev_node->hole_follows;
prev_node->hole_follows = 1;
list_del(&node->node_list);
node->node_list.prev = &prev_node->node_list;
node->node_list.next = &mm->prev_scanned_node->node_list;
mm->prev_scanned_node = node;
hole_start = drm_mm_hole_node_start(prev_node);
hole_end = drm_mm_hole_node_end(prev_node);
if (mm->scan_check_range) {
adj_start = node->start < mm->scan_start ?
mm->scan_start : node->start;
adj_end = node->start + node->size > mm->scan_end ?
mm->scan_end : node->start + node->size;
adj_start = hole_start < mm->scan_start ?
mm->scan_start : hole_start;
adj_end = hole_end > mm->scan_end ?
mm->scan_end : hole_end;
} else {
adj_start = node->start;
adj_end = node->start + node->size;
adj_start = hole_start;
adj_end = hole_end;
}
if (check_free_hole(adj_start , adj_end,
mm->scan_size, mm->scan_alignment)) {
mm->scan_hit_start = node->start;
mm->scan_hit_size = node->size;
mm->scan_hit_start = hole_start;
mm->scan_hit_size = hole_end;
return 1;
}
@ -519,39 +560,19 @@ EXPORT_SYMBOL(drm_mm_scan_add_block);
int drm_mm_scan_remove_block(struct drm_mm_node *node)
{
struct drm_mm *mm = node->mm;
struct drm_mm_node *prev_node, *next_node;
struct drm_mm_node *prev_node;
mm->scanned_blocks--;
BUG_ON(!node->scanned_block);
node->scanned_block = 0;
node->free = 0;
prev_node = list_entry(node->free_stack.prev, struct drm_mm_node,
free_stack);
next_node = list_entry(node->free_stack.next, struct drm_mm_node,
free_stack);
prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
node_list);
if (prev_node) {
BUG_ON(!prev_node->scanned_prev_free);
prev_node->scanned_prev_free = 0;
list_add_tail(&prev_node->node_list, &node->node_list);
node->start = prev_node->start + prev_node->size;
node->size -= prev_node->size;
}
if (next_node) {
BUG_ON(!next_node->scanned_next_free);
next_node->scanned_next_free = 0;
list_add(&next_node->node_list, &node->node_list);
node->size -= next_node->size;
}
INIT_LIST_HEAD(&node->free_stack);
prev_node->hole_follows = node->scanned_preceeds_hole;
INIT_LIST_HEAD(&node->node_list);
list_add(&node->node_list, &prev_node->node_list);
/* Only need to check for containement because start&size for the
* complete resulting free block (not just the desired part) is
@ -568,7 +589,7 @@ EXPORT_SYMBOL(drm_mm_scan_remove_block);
int drm_mm_clean(struct drm_mm * mm)
{
struct list_head *head = &mm->node_list;
struct list_head *head = &mm->head_node.node_list;
return (head->next->next == head);
}
@ -576,38 +597,40 @@ EXPORT_SYMBOL(drm_mm_clean);
int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
{
INIT_LIST_HEAD(&mm->node_list);
INIT_LIST_HEAD(&mm->free_stack);
INIT_LIST_HEAD(&mm->hole_stack);
INIT_LIST_HEAD(&mm->unused_nodes);
mm->num_unused = 0;
mm->scanned_blocks = 0;
spin_lock_init(&mm->unused_lock);
return drm_mm_create_tail_node(mm, start, size, 0);
/* Clever trick to avoid a special case in the free hole tracking. */
INIT_LIST_HEAD(&mm->head_node.node_list);
INIT_LIST_HEAD(&mm->head_node.hole_stack);
mm->head_node.hole_follows = 1;
mm->head_node.scanned_block = 0;
mm->head_node.scanned_prev_free = 0;
mm->head_node.scanned_next_free = 0;
mm->head_node.mm = mm;
mm->head_node.start = start + size;
mm->head_node.size = start - mm->head_node.start;
list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack);
return 0;
}
EXPORT_SYMBOL(drm_mm_init);
void drm_mm_takedown(struct drm_mm * mm)
{
struct list_head *bnode = mm->free_stack.next;
struct drm_mm_node *entry;
struct drm_mm_node *next;
struct drm_mm_node *entry, *next;
entry = list_entry(bnode, struct drm_mm_node, free_stack);
if (entry->node_list.next != &mm->node_list ||
entry->free_stack.next != &mm->free_stack) {
if (!list_empty(&mm->head_node.node_list)) {
DRM_ERROR("Memory manager not clean. Delaying takedown\n");
return;
}
list_del(&entry->free_stack);
list_del(&entry->node_list);
kfree(entry);
spin_lock(&mm->unused_lock);
list_for_each_entry_safe(entry, next, &mm->unused_nodes, free_stack) {
list_del(&entry->free_stack);
list_for_each_entry_safe(entry, next, &mm->unused_nodes, node_list) {
list_del(&entry->node_list);
kfree(entry);
--mm->num_unused;
}
@ -620,19 +643,37 @@ EXPORT_SYMBOL(drm_mm_takedown);
void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
{
struct drm_mm_node *entry;
int total_used = 0, total_free = 0, total = 0;
unsigned long total_used = 0, total_free = 0, total = 0;
unsigned long hole_start, hole_end, hole_size;
list_for_each_entry(entry, &mm->node_list, node_list) {
printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8ld: %s\n",
hole_start = drm_mm_hole_node_start(&mm->head_node);
hole_end = drm_mm_hole_node_end(&mm->head_node);
hole_size = hole_end - hole_start;
if (hole_size)
printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n",
prefix, hole_start, hole_end,
hole_size);
total_free += hole_size;
drm_mm_for_each_node(entry, mm) {
printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: used\n",
prefix, entry->start, entry->start + entry->size,
entry->size, entry->free ? "free" : "used");
total += entry->size;
if (entry->free)
total_free += entry->size;
else
total_used += entry->size;
entry->size);
total_used += entry->size;
if (entry->hole_follows) {
hole_start = drm_mm_hole_node_start(entry);
hole_end = drm_mm_hole_node_end(entry);
hole_size = hole_end - hole_start;
printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n",
prefix, hole_start, hole_end,
hole_size);
total_free += hole_size;
}
}
printk(KERN_DEBUG "%s total: %d, used %d free %d\n", prefix, total,
total = total_free + total_used;
printk(KERN_DEBUG "%s total: %lu, used %lu free %lu\n", prefix, total,
total_used, total_free);
}
EXPORT_SYMBOL(drm_mm_debug_table);
@ -641,17 +682,34 @@ EXPORT_SYMBOL(drm_mm_debug_table);
int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
{
struct drm_mm_node *entry;
int total_used = 0, total_free = 0, total = 0;
unsigned long total_used = 0, total_free = 0, total = 0;
unsigned long hole_start, hole_end, hole_size;
list_for_each_entry(entry, &mm->node_list, node_list) {
seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: %s\n", entry->start, entry->start + entry->size, entry->size, entry->free ? "free" : "used");
total += entry->size;
if (entry->free)
total_free += entry->size;
else
total_used += entry->size;
hole_start = drm_mm_hole_node_start(&mm->head_node);
hole_end = drm_mm_hole_node_end(&mm->head_node);
hole_size = hole_end - hole_start;
if (hole_size)
seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
hole_start, hole_end, hole_size);
total_free += hole_size;
drm_mm_for_each_node(entry, mm) {
seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: used\n",
entry->start, entry->start + entry->size,
entry->size);
total_used += entry->size;
if (entry->hole_follows) {
hole_start = drm_mm_hole_node_start(&mm->head_node);
hole_end = drm_mm_hole_node_end(&mm->head_node);
hole_size = hole_end - hole_start;
seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
hole_start, hole_end, hole_size);
total_free += hole_size;
}
}
seq_printf(m, "total: %d, used %d free %d\n", total, total_used, total_free);
total = total_free + total_used;
seq_printf(m, "total: %lu, used %lu free %lu\n", total, total_used, total_free);
return 0;
}
EXPORT_SYMBOL(drm_mm_dump_table);

View File

@ -593,7 +593,7 @@ EXPORT_SYMBOL(drm_mode_height);
*
* Return @modes's hsync rate in kHz, rounded to the nearest int.
*/
int drm_mode_hsync(struct drm_display_mode *mode)
int drm_mode_hsync(const struct drm_display_mode *mode)
{
unsigned int calc_val;
@ -627,7 +627,7 @@ EXPORT_SYMBOL(drm_mode_hsync);
* If it is 70.288, it will return 70Hz.
* If it is 59.6, it will return 60Hz.
*/
int drm_mode_vrefresh(struct drm_display_mode *mode)
int drm_mode_vrefresh(const struct drm_display_mode *mode)
{
int refresh = 0;
unsigned int calc_val;
@ -725,7 +725,7 @@ EXPORT_SYMBOL(drm_mode_set_crtcinfo);
* a pointer to it. Used to create new instances of established modes.
*/
struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev,
struct drm_display_mode *mode)
const struct drm_display_mode *mode)
{
struct drm_display_mode *nmode;
int new_id;

View File

@ -125,6 +125,176 @@ void drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
EXPORT_SYMBOL(drm_pci_free);
#ifdef CONFIG_PCI
static int drm_get_pci_domain(struct drm_device *dev)
{
#ifndef __alpha__
/* For historical reasons, drm_get_pci_domain() is busticated
* on most archs and has to remain so for userspace interface
* < 1.4, except on alpha which was right from the beginning
*/
if (dev->if_version < 0x10004)
return 0;
#endif /* __alpha__ */
return pci_domain_nr(dev->pdev->bus);
}
static int drm_pci_get_irq(struct drm_device *dev)
{
return dev->pdev->irq;
}
static const char *drm_pci_get_name(struct drm_device *dev)
{
struct pci_driver *pdriver = dev->driver->kdriver.pci;
return pdriver->name;
}
int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master)
{
int len, ret;
struct pci_driver *pdriver = dev->driver->kdriver.pci;
master->unique_len = 40;
master->unique_size = master->unique_len;
master->unique = kmalloc(master->unique_size, GFP_KERNEL);
if (master->unique == NULL)
return -ENOMEM;
len = snprintf(master->unique, master->unique_len,
"pci:%04x:%02x:%02x.%d",
drm_get_pci_domain(dev),
dev->pdev->bus->number,
PCI_SLOT(dev->pdev->devfn),
PCI_FUNC(dev->pdev->devfn));
if (len >= master->unique_len) {
DRM_ERROR("buffer overflow");
ret = -EINVAL;
goto err;
} else
master->unique_len = len;
dev->devname =
kmalloc(strlen(pdriver->name) +
master->unique_len + 2, GFP_KERNEL);
if (dev->devname == NULL) {
ret = -ENOMEM;
goto err;
}
sprintf(dev->devname, "%s@%s", pdriver->name,
master->unique);
return 0;
err:
return ret;
}
int drm_pci_set_unique(struct drm_device *dev,
struct drm_master *master,
struct drm_unique *u)
{
int domain, bus, slot, func, ret;
const char *bus_name;
master->unique_len = u->unique_len;
master->unique_size = u->unique_len + 1;
master->unique = kmalloc(master->unique_size, GFP_KERNEL);
if (!master->unique) {
ret = -ENOMEM;
goto err;
}
if (copy_from_user(master->unique, u->unique, master->unique_len)) {
ret = -EFAULT;
goto err;
}
master->unique[master->unique_len] = '\0';
bus_name = dev->driver->bus->get_name(dev);
dev->devname = kmalloc(strlen(bus_name) +
strlen(master->unique) + 2, GFP_KERNEL);
if (!dev->devname) {
ret = -ENOMEM;
goto err;
}
sprintf(dev->devname, "%s@%s", bus_name,
master->unique);
/* Return error if the busid submitted doesn't match the device's actual
* busid.
*/
ret = sscanf(master->unique, "PCI:%d:%d:%d", &bus, &slot, &func);
if (ret != 3) {
ret = -EINVAL;
goto err;
}
domain = bus >> 8;
bus &= 0xff;
if ((domain != drm_get_pci_domain(dev)) ||
(bus != dev->pdev->bus->number) ||
(slot != PCI_SLOT(dev->pdev->devfn)) ||
(func != PCI_FUNC(dev->pdev->devfn))) {
ret = -EINVAL;
goto err;
}
return 0;
err:
return ret;
}
int drm_pci_irq_by_busid(struct drm_device *dev, struct drm_irq_busid *p)
{
if ((p->busnum >> 8) != drm_get_pci_domain(dev) ||
(p->busnum & 0xff) != dev->pdev->bus->number ||
p->devnum != PCI_SLOT(dev->pdev->devfn) || p->funcnum != PCI_FUNC(dev->pdev->devfn))
return -EINVAL;
p->irq = dev->pdev->irq;
DRM_DEBUG("%d:%d:%d => IRQ %d\n", p->busnum, p->devnum, p->funcnum,
p->irq);
return 0;
}
int drm_pci_agp_init(struct drm_device *dev)
{
if (drm_core_has_AGP(dev)) {
if (drm_pci_device_is_agp(dev))
dev->agp = drm_agp_init(dev);
if (drm_core_check_feature(dev, DRIVER_REQUIRE_AGP)
&& (dev->agp == NULL)) {
DRM_ERROR("Cannot initialize the agpgart module.\n");
return -EINVAL;
}
if (drm_core_has_MTRR(dev)) {
if (dev->agp)
dev->agp->agp_mtrr =
mtrr_add(dev->agp->agp_info.aper_base,
dev->agp->agp_info.aper_size *
1024 * 1024, MTRR_TYPE_WRCOMB, 1);
}
}
return 0;
}
static struct drm_bus drm_pci_bus = {
.bus_type = DRIVER_BUS_PCI,
.get_irq = drm_pci_get_irq,
.get_name = drm_pci_get_name,
.set_busid = drm_pci_set_busid,
.set_unique = drm_pci_set_unique,
.agp_init = drm_pci_agp_init,
};
/**
* Register.
*
@ -219,7 +389,7 @@ err_g1:
EXPORT_SYMBOL(drm_get_pci_dev);
/**
* PCI device initialization. Called via drm_init at module load time,
* PCI device initialization. Called direct from modules at load time.
*
* \return zero on success or a negative number on failure.
*
@ -229,18 +399,24 @@ EXPORT_SYMBOL(drm_get_pci_dev);
* Expands the \c DRIVER_PREINIT and \c DRIVER_POST_INIT macros before and
* after the initialization for driver customization.
*/
int drm_pci_init(struct drm_driver *driver)
int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver)
{
struct pci_dev *pdev = NULL;
const struct pci_device_id *pid;
int i;
DRM_DEBUG("\n");
INIT_LIST_HEAD(&driver->device_list);
driver->kdriver.pci = pdriver;
driver->bus = &drm_pci_bus;
if (driver->driver_features & DRIVER_MODESET)
return pci_register_driver(&driver->pci_driver);
return pci_register_driver(pdriver);
/* If not using KMS, fall back to stealth mode manual scanning. */
for (i = 0; driver->pci_driver.id_table[i].vendor != 0; i++) {
pid = &driver->pci_driver.id_table[i];
for (i = 0; pdriver->id_table[i].vendor != 0; i++) {
pid = &pdriver->id_table[i];
/* Loop around setting up a DRM device for each PCI device
* matching our ID and device class. If we had the internal
@ -265,10 +441,27 @@ int drm_pci_init(struct drm_driver *driver)
#else
int drm_pci_init(struct drm_driver *driver)
int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver)
{
return -1;
}
#endif
EXPORT_SYMBOL(drm_pci_init);
/*@}*/
void drm_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver)
{
struct drm_device *dev, *tmp;
DRM_DEBUG("\n");
if (driver->driver_features & DRIVER_MODESET) {
pci_unregister_driver(pdriver);
} else {
list_for_each_entry_safe(dev, tmp, &driver->device_list, driver_item)
drm_put_dev(dev);
}
DRM_INFO("Module unloaded\n");
}
EXPORT_SYMBOL(drm_pci_exit);

View File

@ -109,8 +109,60 @@ err_g1:
}
EXPORT_SYMBOL(drm_get_platform_dev);
static int drm_platform_get_irq(struct drm_device *dev)
{
return platform_get_irq(dev->platformdev, 0);
}
static const char *drm_platform_get_name(struct drm_device *dev)
{
return dev->platformdev->name;
}
static int drm_platform_set_busid(struct drm_device *dev, struct drm_master *master)
{
int len, ret;
master->unique_len = 10 + strlen(dev->platformdev->name);
master->unique = kmalloc(master->unique_len + 1, GFP_KERNEL);
if (master->unique == NULL)
return -ENOMEM;
len = snprintf(master->unique, master->unique_len,
"platform:%s", dev->platformdev->name);
if (len > master->unique_len) {
DRM_ERROR("Unique buffer overflowed\n");
ret = -EINVAL;
goto err;
}
dev->devname =
kmalloc(strlen(dev->platformdev->name) +
master->unique_len + 2, GFP_KERNEL);
if (dev->devname == NULL) {
ret = -ENOMEM;
goto err;
}
sprintf(dev->devname, "%s@%s", dev->platformdev->name,
master->unique);
return 0;
err:
return ret;
}
static struct drm_bus drm_platform_bus = {
.bus_type = DRIVER_BUS_PLATFORM,
.get_irq = drm_platform_get_irq,
.get_name = drm_platform_get_name,
.set_busid = drm_platform_set_busid,
};
/**
* Platform device initialization. Called via drm_init at module load time,
* Platform device initialization. Called direct from modules.
*
* \return zero on success or a negative number on failure.
*
@ -121,7 +173,24 @@ EXPORT_SYMBOL(drm_get_platform_dev);
* after the initialization for driver customization.
*/
int drm_platform_init(struct drm_driver *driver)
int drm_platform_init(struct drm_driver *driver, struct platform_device *platform_device)
{
return drm_get_platform_dev(driver->platform_device, driver);
DRM_DEBUG("\n");
driver->kdriver.platform_device = platform_device;
driver->bus = &drm_platform_bus;
INIT_LIST_HEAD(&driver->device_list);
return drm_get_platform_dev(platform_device, driver);
}
EXPORT_SYMBOL(drm_platform_init);
void drm_platform_exit(struct drm_driver *driver, struct platform_device *platform_device)
{
struct drm_device *dev, *tmp;
DRM_DEBUG("\n");
list_for_each_entry_safe(dev, tmp, &driver->device_list, driver_item)
drm_put_dev(dev);
DRM_INFO("Module unloaded\n");
}
EXPORT_SYMBOL(drm_platform_exit);

View File

@ -269,25 +269,14 @@ int drm_fill_in_dev(struct drm_device *dev,
dev->driver = driver;
if (drm_core_has_AGP(dev)) {
if (drm_device_is_agp(dev))
dev->agp = drm_agp_init(dev);
if (drm_core_check_feature(dev, DRIVER_REQUIRE_AGP)
&& (dev->agp == NULL)) {
DRM_ERROR("Cannot initialize the agpgart module.\n");
retcode = -EINVAL;
if (dev->driver->bus->agp_init) {
retcode = dev->driver->bus->agp_init(dev);
if (retcode)
goto error_out_unreg;
}
if (drm_core_has_MTRR(dev)) {
if (dev->agp)
dev->agp->agp_mtrr =
mtrr_add(dev->agp->agp_info.aper_base,
dev->agp->agp_info.aper_size *
1024 * 1024, MTRR_TYPE_WRCOMB, 1);
}
}
retcode = drm_ctxbitmap_init(dev);
if (retcode) {
DRM_ERROR("Cannot allocate memory for context bitmap.\n");
@ -425,7 +414,6 @@ int drm_put_minor(struct drm_minor **minor_p)
*
* Cleans up all DRM device, calling drm_lastclose().
*
* \sa drm_init
*/
void drm_put_dev(struct drm_device *dev)
{
@ -475,6 +463,7 @@ void drm_put_dev(struct drm_device *dev)
drm_put_minor(&dev->primary);
list_del(&dev->driver_item);
if (dev->devname) {
kfree(dev->devname);
dev->devname = NULL;

View File

@ -158,8 +158,15 @@ static ssize_t status_show(struct device *device,
{
struct drm_connector *connector = to_drm_connector(device);
enum drm_connector_status status;
int ret;
ret = mutex_lock_interruptible(&connector->dev->mode_config.mutex);
if (ret)
return ret;
status = connector->funcs->detect(connector, true);
mutex_unlock(&connector->dev->mode_config.mutex);
return snprintf(buf, PAGE_SIZE, "%s\n",
drm_get_connector_status_name(status));
}

117
drivers/gpu/drm/drm_usb.c Normal file
View File

@ -0,0 +1,117 @@
#include "drmP.h"
#include <linux/usb.h>
#ifdef CONFIG_USB
int drm_get_usb_dev(struct usb_interface *interface,
const struct usb_device_id *id,
struct drm_driver *driver)
{
struct drm_device *dev;
struct usb_device *usbdev;
int ret;
DRM_DEBUG("\n");
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
return -ENOMEM;
usbdev = interface_to_usbdev(interface);
dev->usbdev = usbdev;
dev->dev = &usbdev->dev;
mutex_lock(&drm_global_mutex);
ret = drm_fill_in_dev(dev, NULL, driver);
if (ret) {
printk(KERN_ERR "DRM: Fill_in_dev failed.\n");
goto err_g1;
}
usb_set_intfdata(interface, dev);
ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL);
if (ret)
goto err_g1;
ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY);
if (ret)
goto err_g2;
if (dev->driver->load) {
ret = dev->driver->load(dev, 0);
if (ret)
goto err_g3;
}
/* setup the grouping for the legacy output */
ret = drm_mode_group_init_legacy_group(dev,
&dev->primary->mode_group);
if (ret)
goto err_g3;
list_add_tail(&dev->driver_item, &driver->device_list);
mutex_unlock(&drm_global_mutex);
DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
driver->name, driver->major, driver->minor, driver->patchlevel,
driver->date, dev->primary->index);
return 0;
err_g3:
drm_put_minor(&dev->primary);
err_g2:
drm_put_minor(&dev->control);
err_g1:
kfree(dev);
mutex_unlock(&drm_global_mutex);
return ret;
}
EXPORT_SYMBOL(drm_get_usb_dev);
static int drm_usb_get_irq(struct drm_device *dev)
{
return 0;
}
static const char *drm_usb_get_name(struct drm_device *dev)
{
return "USB";
}
static int drm_usb_set_busid(struct drm_device *dev,
struct drm_master *master)
{
return 0;
}
static struct drm_bus drm_usb_bus = {
.bus_type = DRIVER_BUS_USB,
.get_irq = drm_usb_get_irq,
.get_name = drm_usb_get_name,
.set_busid = drm_usb_set_busid,
};
int drm_usb_init(struct drm_driver *driver, struct usb_driver *udriver)
{
int res;
DRM_DEBUG("\n");
INIT_LIST_HEAD(&driver->device_list);
driver->kdriver.usb = udriver;
driver->bus = &drm_usb_bus;
res = usb_register(udriver);
return res;
}
EXPORT_SYMBOL(drm_usb_init);
void drm_usb_exit(struct drm_driver *driver,
struct usb_driver *udriver)
{
usb_deregister(udriver);
}
EXPORT_SYMBOL(drm_usb_exit);
#endif

View File

@ -37,7 +37,6 @@
#include <linux/interrupt.h> /* For task queue support */
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/smp_lock.h>
#include <linux/pagemap.h>
#define I810_BUF_FREE 2
@ -94,7 +93,6 @@ static int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
struct drm_buf *buf;
drm_i810_buf_priv_t *buf_priv;
lock_kernel();
dev = priv->minor->dev;
dev_priv = dev->dev_private;
buf = dev_priv->mmap_buffer;
@ -104,7 +102,6 @@ static int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
vma->vm_file = filp;
buf_priv->currently_mapped = I810_BUF_MAPPED;
unlock_kernel();
if (io_remap_pfn_range(vma, vma->vm_start,
vma->vm_pgoff,
@ -116,7 +113,7 @@ static int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
static const struct file_operations i810_buffer_fops = {
.open = drm_open,
.release = drm_release,
.unlocked_ioctl = i810_ioctl,
.unlocked_ioctl = drm_ioctl,
.mmap = i810_mmap_buffers,
.fasync = drm_fasync,
.llseek = noop_llseek,
@ -1242,19 +1239,6 @@ int i810_driver_dma_quiescent(struct drm_device *dev)
return 0;
}
/*
* call the drm_ioctl under the big kernel lock because
* to lock against the i810_mmap_buffers function.
*/
long i810_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
int ret;
lock_kernel();
ret = drm_ioctl(file, cmd, arg);
unlock_kernel();
return ret;
}
struct drm_ioctl_desc i810_ioctls[] = {
DRM_IOCTL_DEF_DRV(I810_INIT, i810_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I810_VERTEX, i810_dma_vertex, DRM_AUTH|DRM_UNLOCKED),

View File

@ -57,18 +57,13 @@ static struct drm_driver driver = {
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
.unlocked_ioctl = i810_ioctl,
.unlocked_ioctl = drm_ioctl,
.mmap = drm_mmap,
.poll = drm_poll,
.fasync = drm_fasync,
.llseek = noop_llseek,
},
.pci_driver = {
.name = DRIVER_NAME,
.id_table = pciidlist,
},
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
@ -77,15 +72,24 @@ static struct drm_driver driver = {
.patchlevel = DRIVER_PATCHLEVEL,
};
static struct pci_driver i810_pci_driver = {
.name = DRIVER_NAME,
.id_table = pciidlist,
};
static int __init i810_init(void)
{
if (num_possible_cpus() > 1) {
pr_err("drm/i810 does not support SMP\n");
return -EINVAL;
}
driver.num_ioctls = i810_max_ioctl;
return drm_init(&driver);
return drm_pci_init(&driver, &i810_pci_driver);
}
static void __exit i810_exit(void)
{
drm_exit(&driver);
drm_pci_exit(&driver, &i810_pci_driver);
}
module_init(i810_init);

View File

@ -1,8 +0,0 @@
#
# Makefile for the drm device driver. This driver provides support for the
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
ccflags-y := -Iinclude/drm
i830-y := i830_drv.o i830_dma.o i830_irq.o
obj-$(CONFIG_DRM_I830) += i830.o

File diff suppressed because it is too large Load Diff

View File

@ -1,107 +0,0 @@
/* i830_drv.c -- I810 driver -*- linux-c -*-
* Created: Mon Dec 13 01:56:22 1999 by jhartmann@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors:
* Rickard E. (Rik) Faith <faith@valinux.com>
* Jeff Hartmann <jhartmann@valinux.com>
* Gareth Hughes <gareth@valinux.com>
* Abraham vd Merwe <abraham@2d3d.co.za>
* Keith Whitwell <keith@tungstengraphics.com>
*/
#include "drmP.h"
#include "drm.h"
#include "i830_drm.h"
#include "i830_drv.h"
#include "drm_pciids.h"
static struct pci_device_id pciidlist[] = {
i830_PCI_IDS
};
static struct drm_driver driver = {
.driver_features =
DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | DRIVER_USE_MTRR |
DRIVER_HAVE_DMA | DRIVER_DMA_QUEUE,
#if USE_IRQS
.driver_features |= DRIVER_HAVE_IRQ | DRIVER_SHARED_IRQ,
#endif
.dev_priv_size = sizeof(drm_i830_buf_priv_t),
.load = i830_driver_load,
.lastclose = i830_driver_lastclose,
.preclose = i830_driver_preclose,
.device_is_agp = i830_driver_device_is_agp,
.reclaim_buffers_locked = i830_driver_reclaim_buffers_locked,
.dma_quiescent = i830_driver_dma_quiescent,
#if USE_IRQS
.irq_preinstall = i830_driver_irq_preinstall,
.irq_postinstall = i830_driver_irq_postinstall,
.irq_uninstall = i830_driver_irq_uninstall,
.irq_handler = i830_driver_irq_handler,
#endif
.ioctls = i830_ioctls,
.fops = {
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
.unlocked_ioctl = i830_ioctl,
.mmap = drm_mmap,
.poll = drm_poll,
.fasync = drm_fasync,
.llseek = noop_llseek,
},
.pci_driver = {
.name = DRIVER_NAME,
.id_table = pciidlist,
},
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
};
static int __init i830_init(void)
{
driver.num_ioctls = i830_max_ioctl;
return drm_init(&driver);
}
static void __exit i830_exit(void)
{
drm_exit(&driver);
}
module_init(i830_init);
module_exit(i830_exit);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL and additional rights");

View File

@ -1,295 +0,0 @@
/* i830_drv.h -- Private header for the I830 driver -*- linux-c -*-
* Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors: Rickard E. (Rik) Faith <faith@valinux.com>
* Jeff Hartmann <jhartmann@valinux.com>
*
*/
#ifndef _I830_DRV_H_
#define _I830_DRV_H_
/* General customization:
*/
#define DRIVER_AUTHOR "VA Linux Systems Inc."
#define DRIVER_NAME "i830"
#define DRIVER_DESC "Intel 830M"
#define DRIVER_DATE "20021108"
/* Interface history:
*
* 1.1: Original.
* 1.2: ?
* 1.3: New irq emit/wait ioctls.
* New pageflip ioctl.
* New getparam ioctl.
* State for texunits 3&4 in sarea.
* New (alternative) layout for texture state.
*/
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 3
#define DRIVER_PATCHLEVEL 2
/* Driver will work either way: IRQ's save cpu time when waiting for
* the card, but are subject to subtle interactions between bios,
* hardware and the driver.
*/
/* XXX: Add vblank support? */
#define USE_IRQS 0
typedef struct drm_i830_buf_priv {
u32 *in_use;
int my_use_idx;
int currently_mapped;
void __user *virtual;
void *kernel_virtual;
drm_local_map_t map;
} drm_i830_buf_priv_t;
typedef struct _drm_i830_ring_buffer {
int tail_mask;
unsigned long Start;
unsigned long End;
unsigned long Size;
u8 *virtual_start;
int head;
int tail;
int space;
drm_local_map_t map;
} drm_i830_ring_buffer_t;
typedef struct drm_i830_private {
struct drm_local_map *sarea_map;
struct drm_local_map *mmio_map;
drm_i830_sarea_t *sarea_priv;
drm_i830_ring_buffer_t ring;
void *hw_status_page;
unsigned long counter;
dma_addr_t dma_status_page;
struct drm_buf *mmap_buffer;
u32 front_di1, back_di1, zi1;
int back_offset;
int depth_offset;
int front_offset;
int w, h;
int pitch;
int back_pitch;
int depth_pitch;
unsigned int cpp;
int do_boxes;
int dma_used;
int current_page;
int page_flipping;
wait_queue_head_t irq_queue;
atomic_t irq_received;
atomic_t irq_emitted;
int use_mi_batchbuffer_start;
} drm_i830_private_t;
long i830_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
extern struct drm_ioctl_desc i830_ioctls[];
extern int i830_max_ioctl;
/* i830_irq.c */
extern int i830_irq_emit(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int i830_irq_wait(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern irqreturn_t i830_driver_irq_handler(DRM_IRQ_ARGS);
extern void i830_driver_irq_preinstall(struct drm_device *dev);
extern void i830_driver_irq_postinstall(struct drm_device *dev);
extern void i830_driver_irq_uninstall(struct drm_device *dev);
extern int i830_driver_load(struct drm_device *, unsigned long flags);
extern void i830_driver_preclose(struct drm_device *dev,
struct drm_file *file_priv);
extern void i830_driver_lastclose(struct drm_device *dev);
extern void i830_driver_reclaim_buffers_locked(struct drm_device *dev,
struct drm_file *file_priv);
extern int i830_driver_dma_quiescent(struct drm_device *dev);
extern int i830_driver_device_is_agp(struct drm_device *dev);
#define I830_READ(reg) DRM_READ32(dev_priv->mmio_map, reg)
#define I830_WRITE(reg, val) DRM_WRITE32(dev_priv->mmio_map, reg, val)
#define I830_READ16(reg) DRM_READ16(dev_priv->mmio_map, reg)
#define I830_WRITE16(reg, val) DRM_WRITE16(dev_priv->mmio_map, reg, val)
#define I830_VERBOSE 0
#define RING_LOCALS unsigned int outring, ringmask, outcount; \
volatile char *virt;
#define BEGIN_LP_RING(n) do { \
if (I830_VERBOSE) \
printk("BEGIN_LP_RING(%d)\n", (n)); \
if (dev_priv->ring.space < n*4) \
i830_wait_ring(dev, n*4, __func__); \
outcount = 0; \
outring = dev_priv->ring.tail; \
ringmask = dev_priv->ring.tail_mask; \
virt = dev_priv->ring.virtual_start; \
} while (0)
#define OUT_RING(n) do { \
if (I830_VERBOSE) \
printk(" OUT_RING %x\n", (int)(n)); \
*(volatile unsigned int *)(virt + outring) = n; \
outcount++; \
outring += 4; \
outring &= ringmask; \
} while (0)
#define ADVANCE_LP_RING() do { \
if (I830_VERBOSE) \
printk("ADVANCE_LP_RING %x\n", outring); \
dev_priv->ring.tail = outring; \
dev_priv->ring.space -= outcount * 4; \
I830_WRITE(LP_RING + RING_TAIL, outring); \
} while (0)
extern int i830_wait_ring(struct drm_device *dev, int n, const char *caller);
#define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23))
#define GFX_OP_BREAKPOINT_INTERRUPT ((0<<29)|(1<<23))
#define CMD_REPORT_HEAD (7<<23)
#define CMD_STORE_DWORD_IDX ((0x21<<23) | 0x1)
#define CMD_OP_BATCH_BUFFER ((0x0<<29)|(0x30<<23)|0x1)
#define STATE3D_LOAD_STATE_IMMEDIATE_2 ((0x3<<29)|(0x1d<<24)|(0x03<<16))
#define LOAD_TEXTURE_MAP0 (1<<11)
#define INST_PARSER_CLIENT 0x00000000
#define INST_OP_FLUSH 0x02000000
#define INST_FLUSH_MAP_CACHE 0x00000001
#define BB1_START_ADDR_MASK (~0x7)
#define BB1_PROTECTED (1<<0)
#define BB1_UNPROTECTED (0<<0)
#define BB2_END_ADDR_MASK (~0x7)
#define I830REG_HWSTAM 0x02098
#define I830REG_INT_IDENTITY_R 0x020a4
#define I830REG_INT_MASK_R 0x020a8
#define I830REG_INT_ENABLE_R 0x020a0
#define I830_IRQ_RESERVED ((1<<13)|(3<<2))
#define LP_RING 0x2030
#define HP_RING 0x2040
#define RING_TAIL 0x00
#define TAIL_ADDR 0x001FFFF8
#define RING_HEAD 0x04
#define HEAD_WRAP_COUNT 0xFFE00000
#define HEAD_WRAP_ONE 0x00200000
#define HEAD_ADDR 0x001FFFFC
#define RING_START 0x08
#define START_ADDR 0x0xFFFFF000
#define RING_LEN 0x0C
#define RING_NR_PAGES 0x001FF000
#define RING_REPORT_MASK 0x00000006
#define RING_REPORT_64K 0x00000002
#define RING_REPORT_128K 0x00000004
#define RING_NO_REPORT 0x00000000
#define RING_VALID_MASK 0x00000001
#define RING_VALID 0x00000001
#define RING_INVALID 0x00000000
#define GFX_OP_SCISSOR ((0x3<<29)|(0x1c<<24)|(0x10<<19))
#define SC_UPDATE_SCISSOR (0x1<<1)
#define SC_ENABLE_MASK (0x1<<0)
#define SC_ENABLE (0x1<<0)
#define GFX_OP_SCISSOR_INFO ((0x3<<29)|(0x1d<<24)|(0x81<<16)|(0x1))
#define SCI_YMIN_MASK (0xffff<<16)
#define SCI_XMIN_MASK (0xffff<<0)
#define SCI_YMAX_MASK (0xffff<<16)
#define SCI_XMAX_MASK (0xffff<<0)
#define GFX_OP_SCISSOR_ENABLE ((0x3<<29)|(0x1c<<24)|(0x10<<19))
#define GFX_OP_SCISSOR_RECT ((0x3<<29)|(0x1d<<24)|(0x81<<16)|1)
#define GFX_OP_COLOR_FACTOR ((0x3<<29)|(0x1d<<24)|(0x1<<16)|0x0)
#define GFX_OP_STIPPLE ((0x3<<29)|(0x1d<<24)|(0x83<<16))
#define GFX_OP_MAP_INFO ((0x3<<29)|(0x1d<<24)|0x4)
#define GFX_OP_DESTBUFFER_VARS ((0x3<<29)|(0x1d<<24)|(0x85<<16)|0x0)
#define GFX_OP_DRAWRECT_INFO ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3))
#define GFX_OP_PRIMITIVE ((0x3<<29)|(0x1f<<24))
#define CMD_OP_DESTBUFFER_INFO ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1)
#define CMD_OP_DISPLAYBUFFER_INFO ((0x0<<29)|(0x14<<23)|2)
#define ASYNC_FLIP (1<<22)
#define CMD_3D (0x3<<29)
#define STATE3D_CONST_BLEND_COLOR_CMD (CMD_3D|(0x1d<<24)|(0x88<<16))
#define STATE3D_MAP_COORD_SETBIND_CMD (CMD_3D|(0x1d<<24)|(0x02<<16))
#define BR00_BITBLT_CLIENT 0x40000000
#define BR00_OP_COLOR_BLT 0x10000000
#define BR00_OP_SRC_COPY_BLT 0x10C00000
#define BR13_SOLID_PATTERN 0x80000000
#define BUF_3D_ID_COLOR_BACK (0x3<<24)
#define BUF_3D_ID_DEPTH (0x7<<24)
#define BUF_3D_USE_FENCE (1<<23)
#define BUF_3D_PITCH(x) (((x)/4)<<2)
#define CMD_OP_MAP_PALETTE_LOAD ((3<<29)|(0x1d<<24)|(0x82<<16)|255)
#define MAP_PALETTE_NUM(x) ((x<<8) & (1<<8))
#define MAP_PALETTE_BOTH (1<<11)
#define XY_COLOR_BLT_CMD ((2<<29)|(0x50<<22)|0x4)
#define XY_COLOR_BLT_WRITE_ALPHA (1<<21)
#define XY_COLOR_BLT_WRITE_RGB (1<<20)
#define XY_SRC_COPY_BLT_CMD ((2<<29)|(0x53<<22)|6)
#define XY_SRC_COPY_BLT_WRITE_ALPHA (1<<21)
#define XY_SRC_COPY_BLT_WRITE_RGB (1<<20)
#define MI_BATCH_BUFFER ((0x30<<23)|1)
#define MI_BATCH_BUFFER_START (0x31<<23)
#define MI_BATCH_BUFFER_END (0xA<<23)
#define MI_BATCH_NON_SECURE (1)
#define MI_WAIT_FOR_EVENT ((0x3<<23))
#define MI_WAIT_FOR_PLANE_A_FLIP (1<<2)
#define MI_WAIT_FOR_PLANE_A_SCANLINES (1<<1)
#define MI_LOAD_SCAN_LINES_INCL ((0x12<<23))
#endif

View File

@ -1,186 +0,0 @@
/* i830_dma.c -- DMA support for the I830 -*- linux-c -*-
*
* Copyright 2002 Tungsten Graphics, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors: Keith Whitwell <keith@tungstengraphics.com>
*
*/
#include "drmP.h"
#include "drm.h"
#include "i830_drm.h"
#include "i830_drv.h"
#include <linux/interrupt.h> /* For task queue support */
#include <linux/delay.h>
irqreturn_t i830_driver_irq_handler(DRM_IRQ_ARGS)
{
struct drm_device *dev = (struct drm_device *) arg;
drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
u16 temp;
temp = I830_READ16(I830REG_INT_IDENTITY_R);
DRM_DEBUG("%x\n", temp);
if (!(temp & 2))
return IRQ_NONE;
I830_WRITE16(I830REG_INT_IDENTITY_R, temp);
atomic_inc(&dev_priv->irq_received);
wake_up_interruptible(&dev_priv->irq_queue);
return IRQ_HANDLED;
}
static int i830_emit_irq(struct drm_device *dev)
{
drm_i830_private_t *dev_priv = dev->dev_private;
RING_LOCALS;
DRM_DEBUG("%s\n", __func__);
atomic_inc(&dev_priv->irq_emitted);
BEGIN_LP_RING(2);
OUT_RING(0);
OUT_RING(GFX_OP_USER_INTERRUPT);
ADVANCE_LP_RING();
return atomic_read(&dev_priv->irq_emitted);
}
static int i830_wait_irq(struct drm_device *dev, int irq_nr)
{
drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
DECLARE_WAITQUEUE(entry, current);
unsigned long end = jiffies + HZ * 3;
int ret = 0;
DRM_DEBUG("%s\n", __func__);
if (atomic_read(&dev_priv->irq_received) >= irq_nr)
return 0;
dev_priv->sarea_priv->perf_boxes |= I830_BOX_WAIT;
add_wait_queue(&dev_priv->irq_queue, &entry);
for (;;) {
__set_current_state(TASK_INTERRUPTIBLE);
if (atomic_read(&dev_priv->irq_received) >= irq_nr)
break;
if ((signed)(end - jiffies) <= 0) {
DRM_ERROR("timeout iir %x imr %x ier %x hwstam %x\n",
I830_READ16(I830REG_INT_IDENTITY_R),
I830_READ16(I830REG_INT_MASK_R),
I830_READ16(I830REG_INT_ENABLE_R),
I830_READ16(I830REG_HWSTAM));
ret = -EBUSY; /* Lockup? Missed irq? */
break;
}
schedule_timeout(HZ * 3);
if (signal_pending(current)) {
ret = -EINTR;
break;
}
}
__set_current_state(TASK_RUNNING);
remove_wait_queue(&dev_priv->irq_queue, &entry);
return ret;
}
/* Needs the lock as it touches the ring.
*/
int i830_irq_emit(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_i830_private_t *dev_priv = dev->dev_private;
drm_i830_irq_emit_t *emit = data;
int result;
LOCK_TEST_WITH_RETURN(dev, file_priv);
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __func__);
return -EINVAL;
}
result = i830_emit_irq(dev);
if (copy_to_user(emit->irq_seq, &result, sizeof(int))) {
DRM_ERROR("copy_to_user\n");
return -EFAULT;
}
return 0;
}
/* Doesn't need the hardware lock.
*/
int i830_irq_wait(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_i830_private_t *dev_priv = dev->dev_private;
drm_i830_irq_wait_t *irqwait = data;
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __func__);
return -EINVAL;
}
return i830_wait_irq(dev, irqwait->irq_seq);
}
/* drm_dma.h hooks
*/
void i830_driver_irq_preinstall(struct drm_device *dev)
{
drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
I830_WRITE16(I830REG_HWSTAM, 0xffff);
I830_WRITE16(I830REG_INT_MASK_R, 0x0);
I830_WRITE16(I830REG_INT_ENABLE_R, 0x0);
atomic_set(&dev_priv->irq_received, 0);
atomic_set(&dev_priv->irq_emitted, 0);
init_waitqueue_head(&dev_priv->irq_queue);
}
void i830_driver_irq_postinstall(struct drm_device *dev)
{
drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
I830_WRITE16(I830REG_INT_ENABLE_R, 0x2);
}
void i830_driver_irq_uninstall(struct drm_device *dev)
{
drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
if (!dev_priv)
return;
I830_WRITE16(I830REG_INT_MASK_R, 0xffff);
I830_WRITE16(I830REG_INT_ENABLE_R, 0x0);
}

View File

@ -326,21 +326,21 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
struct intel_crtc *crtc;
list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
const char *pipe = crtc->pipe ? "B" : "A";
const char *plane = crtc->plane ? "B" : "A";
const char pipe = pipe_name(crtc->pipe);
const char plane = plane_name(crtc->plane);
struct intel_unpin_work *work;
spin_lock_irqsave(&dev->event_lock, flags);
work = crtc->unpin_work;
if (work == NULL) {
seq_printf(m, "No flip due on pipe %s (plane %s)\n",
seq_printf(m, "No flip due on pipe %c (plane %c)\n",
pipe, plane);
} else {
if (!work->pending) {
seq_printf(m, "Flip queued on pipe %s (plane %s)\n",
seq_printf(m, "Flip queued on pipe %c (plane %c)\n",
pipe, plane);
} else {
seq_printf(m, "Flip pending (waiting for vsync) on pipe %s (plane %s)\n",
seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n",
pipe, plane);
}
if (work->enable_stall_check)
@ -458,7 +458,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
int ret, i;
int ret, i, pipe;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
@ -471,10 +471,10 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
I915_READ(IIR));
seq_printf(m, "Interrupt mask: %08x\n",
I915_READ(IMR));
seq_printf(m, "Pipe A stat: %08x\n",
I915_READ(PIPEASTAT));
seq_printf(m, "Pipe B stat: %08x\n",
I915_READ(PIPEBSTAT));
for_each_pipe(pipe)
seq_printf(m, "Pipe %c stat: %08x\n",
pipe_name(pipe),
I915_READ(PIPESTAT(pipe)));
} else {
seq_printf(m, "North Display Interrupt enable: %08x\n",
I915_READ(DEIER));
@ -544,11 +544,11 @@ static int i915_hws_info(struct seq_file *m, void *data)
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring;
volatile u32 *hws;
const volatile u32 __iomem *hws;
int i;
ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
hws = (volatile u32 *)ring->status_page.page_addr;
hws = (volatile u32 __iomem *)ring->status_page.page_addr;
if (hws == NULL)
return 0;
@ -615,7 +615,7 @@ static int i915_ringbuffer_data(struct seq_file *m, void *data)
if (!ring->obj) {
seq_printf(m, "No ringbuffer setup\n");
} else {
u8 *virt = ring->virtual_start;
const u8 __iomem *virt = ring->virtual_start;
uint32_t off;
for (off = 0; off < ring->size; off += 4) {
@ -805,15 +805,20 @@ static int i915_error_state(struct seq_file *m, void *unused)
}
}
if (error->ringbuffer) {
struct drm_i915_error_object *obj = error->ringbuffer;
seq_printf(m, "--- ringbuffer = 0x%08x\n", obj->gtt_offset);
offset = 0;
for (page = 0; page < obj->page_count; page++) {
for (elt = 0; elt < PAGE_SIZE/4; elt++) {
seq_printf(m, "%08x : %08x\n", offset, obj->pages[page][elt]);
offset += 4;
for (i = 0; i < ARRAY_SIZE(error->ringbuffer); i++) {
if (error->ringbuffer[i]) {
struct drm_i915_error_object *obj = error->ringbuffer[i];
seq_printf(m, "%s --- ringbuffer = 0x%08x\n",
dev_priv->ring[i].name,
obj->gtt_offset);
offset = 0;
for (page = 0; page < obj->page_count; page++) {
for (elt = 0; elt < PAGE_SIZE/4; elt++) {
seq_printf(m, "%08x : %08x\n",
offset,
obj->pages[page][elt]);
offset += 4;
}
}
}
}
@ -862,19 +867,44 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
u32 rpstat;
u32 rpupei, rpcurup, rpprevup;
u32 rpdownei, rpcurdown, rpprevdown;
int max_freq;
/* RPSTAT1 is in the GT power well */
__gen6_gt_force_wake_get(dev_priv);
rpstat = I915_READ(GEN6_RPSTAT1);
rpupei = I915_READ(GEN6_RP_CUR_UP_EI);
rpcurup = I915_READ(GEN6_RP_CUR_UP);
rpprevup = I915_READ(GEN6_RP_PREV_UP);
rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI);
rpcurdown = I915_READ(GEN6_RP_CUR_DOWN);
rpprevdown = I915_READ(GEN6_RP_PREV_DOWN);
seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
seq_printf(m, "RPSTAT1: 0x%08x\n", I915_READ(GEN6_RPSTAT1));
seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
seq_printf(m, "Render p-state ratio: %d\n",
(gt_perf_status & 0xff00) >> 8);
seq_printf(m, "Render p-state VID: %d\n",
gt_perf_status & 0xff);
seq_printf(m, "Render p-state limit: %d\n",
rp_state_limits & 0xff);
seq_printf(m, "CAGF: %dMHz\n", ((rpstat & GEN6_CAGF_MASK) >>
GEN6_CAGF_SHIFT) * 100);
seq_printf(m, "RP CUR UP EI: %dus\n", rpupei &
GEN6_CURICONT_MASK);
seq_printf(m, "RP CUR UP: %dus\n", rpcurup &
GEN6_CURBSYTAVG_MASK);
seq_printf(m, "RP PREV UP: %dus\n", rpprevup &
GEN6_CURBSYTAVG_MASK);
seq_printf(m, "RP CUR DOWN EI: %dus\n", rpdownei &
GEN6_CURIAVG_MASK);
seq_printf(m, "RP CUR DOWN: %dus\n", rpcurdown &
GEN6_CURBSYTAVG_MASK);
seq_printf(m, "RP PREV DOWN: %dus\n", rpprevdown &
GEN6_CURBSYTAVG_MASK);
max_freq = (rp_state_cap & 0xff0000) >> 16;
seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
@ -1259,7 +1289,7 @@ static int i915_wedged_create(struct dentry *root, struct drm_minor *minor)
}
static struct drm_info_list i915_debugfs_list[] = {
{"i915_capabilities", i915_capabilities, 0, 0},
{"i915_capabilities", i915_capabilities, 0},
{"i915_gem_objects", i915_gem_object_info, 0},
{"i915_gem_gtt", i915_gem_gtt_info, 0},
{"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},

View File

@ -43,6 +43,17 @@
#include <linux/slab.h>
#include <acpi/video.h>
static void i915_write_hws_pga(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
u32 addr;
addr = dev_priv->status_page_dmah->busaddr;
if (INTEL_INFO(dev)->gen >= 4)
addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
I915_WRITE(HWS_PGA, addr);
}
/**
* Sets up the hardware status page for devices that need a physical address
* in the register.
@ -60,16 +71,13 @@ static int i915_init_phys_hws(struct drm_device *dev)
DRM_ERROR("Can not allocate hardware status page\n");
return -ENOMEM;
}
ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
ring->status_page.page_addr =
(void __force __iomem *)dev_priv->status_page_dmah->vaddr;
memset(ring->status_page.page_addr, 0, PAGE_SIZE);
memset_io(ring->status_page.page_addr, 0, PAGE_SIZE);
if (INTEL_INFO(dev)->gen >= 4)
dev_priv->dma_status_page |= (dev_priv->dma_status_page >> 28) &
0xf0;
i915_write_hws_pga(dev);
I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
DRM_DEBUG_DRIVER("Enabled hardware status page\n");
return 0;
}
@ -216,7 +224,7 @@ static int i915_dma_resume(struct drm_device * dev)
if (ring->status_page.gfx_addr != 0)
intel_ring_setup_status_page(ring);
else
I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
i915_write_hws_pga(dev);
DRM_DEBUG_DRIVER("Enabled hardware status page\n");
@ -771,6 +779,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
case I915_PARAM_HAS_EXEC_CONSTANTS:
value = INTEL_INFO(dev)->gen >= 4;
break;
case I915_PARAM_HAS_RELAXED_DELTA:
value = 1;
break;
default:
DRM_DEBUG_DRIVER("Unknown parameter %d\n",
param->param);
@ -859,8 +870,9 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
" G33 hw status page\n");
return -ENOMEM;
}
ring->status_page.page_addr = dev_priv->hws_map.handle;
memset(ring->status_page.page_addr, 0, PAGE_SIZE);
ring->status_page.page_addr =
(void __force __iomem *)dev_priv->hws_map.handle;
memset_io(ring->status_page.page_addr, 0, PAGE_SIZE);
I915_WRITE(HWS_PGA, ring->status_page.gfx_addr);
DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n",
@ -2013,9 +2025,13 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
spin_lock_init(&dev_priv->irq_lock);
spin_lock_init(&dev_priv->error_lock);
dev_priv->trace_irq_seqno = 0;
ret = drm_vblank_init(dev, I915_NUM_PIPE);
if (IS_MOBILE(dev) || !IS_GEN2(dev))
dev_priv->num_pipe = 2;
else
dev_priv->num_pipe = 1;
ret = drm_vblank_init(dev, dev_priv->num_pipe);
if (ret)
goto out_gem_unload;

View File

@ -43,10 +43,13 @@ module_param_named(modeset, i915_modeset, int, 0400);
unsigned int i915_fbpercrtc = 0;
module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400);
int i915_panel_ignore_lid = 0;
module_param_named(panel_ignore_lid, i915_panel_ignore_lid, int, 0600);
unsigned int i915_powersave = 1;
module_param_named(powersave, i915_powersave, int, 0600);
unsigned int i915_semaphores = 0;
unsigned int i915_semaphores = 1;
module_param_named(semaphores, i915_semaphores, int, 0600);
unsigned int i915_enable_rc6 = 0;
@ -58,7 +61,10 @@ module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400);
unsigned int i915_panel_use_ssc = 1;
module_param_named(lvds_use_ssc, i915_panel_use_ssc, int, 0600);
bool i915_try_reset = true;
int i915_vbt_sdvo_panel_type = -1;
module_param_named(vbt_sdvo_panel_type, i915_vbt_sdvo_panel_type, int, 0600);
static bool i915_try_reset = true;
module_param_named(reset, i915_try_reset, bool, 0600);
static struct drm_driver driver;
@ -716,6 +722,9 @@ static struct drm_driver driver = {
.gem_init_object = i915_gem_init_object,
.gem_free_object = i915_gem_free_object,
.gem_vm_ops = &i915_gem_vm_ops,
.dumb_create = i915_gem_dumb_create,
.dumb_map_offset = i915_gem_mmap_gtt,
.dumb_destroy = i915_gem_dumb_destroy,
.ioctls = i915_ioctls,
.fops = {
.owner = THIS_MODULE,
@ -732,14 +741,6 @@ static struct drm_driver driver = {
.llseek = noop_llseek,
},
.pci_driver = {
.name = DRIVER_NAME,
.id_table = pciidlist,
.probe = i915_pci_probe,
.remove = i915_pci_remove,
.driver.pm = &i915_pm_ops,
},
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
@ -748,6 +749,14 @@ static struct drm_driver driver = {
.patchlevel = DRIVER_PATCHLEVEL,
};
static struct pci_driver i915_pci_driver = {
.name = DRIVER_NAME,
.id_table = pciidlist,
.probe = i915_pci_probe,
.remove = i915_pci_remove,
.driver.pm = &i915_pm_ops,
};
static int __init i915_init(void)
{
if (!intel_agp_enabled) {
@ -781,12 +790,12 @@ static int __init i915_init(void)
if (!(driver.driver_features & DRIVER_MODESET))
driver.get_vblank_timestamp = NULL;
return drm_init(&driver);
return drm_pci_init(&driver, &i915_pci_driver);
}
static void __exit i915_exit(void)
{
drm_exit(&driver);
drm_pci_exit(&driver, &i915_pci_driver);
}
module_init(i915_init);

View File

@ -49,17 +49,22 @@
enum pipe {
PIPE_A = 0,
PIPE_B,
PIPE_C,
I915_MAX_PIPES
};
#define pipe_name(p) ((p) + 'A')
enum plane {
PLANE_A = 0,
PLANE_B,
PLANE_C,
};
#define I915_NUM_PIPE 2
#define plane_name(p) ((p) + 'A')
#define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
#define for_each_pipe(p) for ((p) = 0; (p) < dev_priv->num_pipe; (p)++)
/* Interface history:
*
* 1.1: Original.
@ -75,10 +80,7 @@ enum plane {
#define DRIVER_PATCHLEVEL 0
#define WATCH_COHERENCY 0
#define WATCH_EXEC 0
#define WATCH_RELOC 0
#define WATCH_LISTS 0
#define WATCH_PWRITE 0
#define I915_GEM_PHYS_CURSOR_0 1
#define I915_GEM_PHYS_CURSOR_1 2
@ -111,6 +113,7 @@ struct intel_opregion {
struct opregion_swsci *swsci;
struct opregion_asle *asle;
void *vbt;
u32 __iomem *lid_state;
};
#define OPREGION_SIZE (8*1024)
@ -144,8 +147,7 @@ struct intel_display_error_state;
struct drm_i915_error_state {
u32 eir;
u32 pgtbl_er;
u32 pipeastat;
u32 pipebstat;
u32 pipestat[I915_MAX_PIPES];
u32 ipeir;
u32 ipehr;
u32 instdone;
@ -172,7 +174,7 @@ struct drm_i915_error_state {
int page_count;
u32 gtt_offset;
u32 *pages[0];
} *ringbuffer, *batchbuffer[I915_NUM_RINGS];
} *ringbuffer[I915_NUM_RINGS], *batchbuffer[I915_NUM_RINGS];
struct drm_i915_error_buffer {
u32 size;
u32 name;
@ -200,9 +202,7 @@ struct drm_i915_display_funcs {
void (*disable_fbc)(struct drm_device *dev);
int (*get_display_clock_speed)(struct drm_device *dev);
int (*get_fifo_size)(struct drm_device *dev, int plane);
void (*update_wm)(struct drm_device *dev, int planea_clock,
int planeb_clock, int sr_hdisplay, int sr_htotal,
int pixel_size);
void (*update_wm)(struct drm_device *dev);
/* clock updates for mode set */
/* cursor updates */
/* render clock increase/decrease */
@ -274,7 +274,6 @@ typedef struct drm_i915_private {
uint32_t next_seqno;
drm_dma_handle_t *status_page_dmah;
dma_addr_t dma_status_page;
uint32_t counter;
drm_local_map_t hws_map;
struct drm_i915_gem_object *pwrctx;
@ -289,7 +288,6 @@ typedef struct drm_i915_private {
int page_flipping;
atomic_t irq_received;
u32 trace_irq_seqno;
/* protects the irq masks */
spinlock_t irq_lock;
@ -324,8 +322,6 @@ typedef struct drm_i915_private {
int cfb_plane;
int cfb_y;
int irq_enabled;
struct intel_opregion opregion;
/* overlay */
@ -387,7 +383,6 @@ typedef struct drm_i915_private {
u32 saveDSPACNTR;
u32 saveDSPBCNTR;
u32 saveDSPARB;
u32 saveHWS;
u32 savePIPEACONF;
u32 savePIPEBCONF;
u32 savePIPEASRC;
@ -614,6 +609,12 @@ typedef struct drm_i915_private {
*/
struct delayed_work retire_work;
/**
* Are we in a non-interruptible section of code like
* modesetting?
*/
bool interruptible;
/**
* Flag if the X Server, and thus DRM, is not currently in
* control of the device.
@ -652,6 +653,7 @@ typedef struct drm_i915_private {
unsigned int lvds_border_bits;
/* Panel fitter placement and size for Ironlake+ */
u32 pch_pf_pos, pch_pf_size;
int panel_t3, panel_t12;
struct drm_crtc *plane_to_crtc_mapping[2];
struct drm_crtc *pipe_to_crtc_mapping[2];
@ -698,6 +700,8 @@ typedef struct drm_i915_private {
/* list of fbdev register on this device */
struct intel_fbdev *fbdev;
struct drm_property *broadcast_rgb_property;
} drm_i915_private_t;
struct drm_i915_gem_object {
@ -955,10 +959,12 @@ enum intel_chip_family {
extern struct drm_ioctl_desc i915_ioctls[];
extern int i915_max_ioctl;
extern unsigned int i915_fbpercrtc;
extern int i915_panel_ignore_lid;
extern unsigned int i915_powersave;
extern unsigned int i915_semaphores;
extern unsigned int i915_lvds_downclock;
extern unsigned int i915_panel_use_ssc;
extern int i915_vbt_sdvo_panel_type;
extern unsigned int i915_enable_rc6;
extern int i915_suspend(struct drm_device *dev, pm_message_t state);
@ -998,8 +1004,6 @@ extern int i915_irq_emit(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int i915_irq_wait(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void i915_trace_irq_get(struct drm_device *dev, u32 seqno);
extern void i915_enable_interrupt (struct drm_device *dev);
extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS);
extern void i915_driver_irq_preinstall(struct drm_device * dev);
@ -1051,7 +1055,6 @@ extern void i915_mem_takedown(struct mem_block **heap);
extern void i915_mem_release(struct drm_device * dev,
struct drm_file *file_priv, struct mem_block *heap);
/* i915_gem.c */
int i915_gem_check_is_wedged(struct drm_device *dev);
int i915_gem_init_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_create_ioctl(struct drm_device *dev, void *data,
@ -1094,8 +1097,7 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void i915_gem_load(struct drm_device *dev);
int i915_gem_init_object(struct drm_gem_object *obj);
int __must_check i915_gem_flush_ring(struct drm_device *dev,
struct intel_ring_buffer *ring,
int __must_check i915_gem_flush_ring(struct intel_ring_buffer *ring,
uint32_t invalidate_domains,
uint32_t flush_domains);
struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
@ -1110,12 +1112,18 @@ void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
void i915_gem_lastclose(struct drm_device *dev);
int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
int __must_check i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
bool interruptible);
int __must_check i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj);
void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *ring,
u32 seqno);
int i915_gem_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
uint32_t handle, uint64_t *offset);
int i915_gem_dumb_destroy(struct drm_file *file_priv, struct drm_device *dev,
uint32_t handle);
/**
* Returns true if seq1 is later than seq2.
*/
@ -1126,16 +1134,14 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2)
}
static inline u32
i915_gem_next_request_seqno(struct drm_device *dev,
struct intel_ring_buffer *ring)
i915_gem_next_request_seqno(struct intel_ring_buffer *ring)
{
drm_i915_private_t *dev_priv = dev->dev_private;
drm_i915_private_t *dev_priv = ring->dev->dev_private;
return ring->outstanding_lazy_request = dev_priv->next_seqno;
}
int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *pipelined,
bool interruptible);
struct intel_ring_buffer *pipelined);
int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
void i915_gem_retire_requests(struct drm_device *dev);
@ -1144,8 +1150,7 @@ void i915_gem_clflush_object(struct drm_i915_gem_object *obj);
int __must_check i915_gem_object_set_domain(struct drm_i915_gem_object *obj,
uint32_t read_domains,
uint32_t write_domain);
int __must_check i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj,
bool interruptible);
int __must_check i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj);
int __must_check i915_gem_init_ringbuffer(struct drm_device *dev);
void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
void i915_gem_do_init(struct drm_device *dev,
@ -1154,14 +1159,11 @@ void i915_gem_do_init(struct drm_device *dev,
unsigned long end);
int __must_check i915_gpu_idle(struct drm_device *dev);
int __must_check i915_gem_idle(struct drm_device *dev);
int __must_check i915_add_request(struct drm_device *dev,
struct drm_file *file_priv,
struct drm_i915_gem_request *request,
struct intel_ring_buffer *ring);
int __must_check i915_do_wait_request(struct drm_device *dev,
uint32_t seqno,
bool interruptible,
struct intel_ring_buffer *ring);
int __must_check i915_add_request(struct intel_ring_buffer *ring,
struct drm_file *file,
struct drm_i915_gem_request *request);
int __must_check i915_wait_request(struct intel_ring_buffer *ring,
uint32_t seqno);
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
int __must_check
i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
@ -1313,7 +1315,7 @@ extern void intel_display_print_error_state(struct seq_file *m,
#define __i915_read(x, y) \
static inline u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
u##x val = read##y(dev_priv->regs + reg); \
trace_i915_reg_rw('R', reg, val, sizeof(val)); \
trace_i915_reg_rw(false, reg, val, sizeof(val)); \
return val; \
}
__i915_read(8, b)
@ -1324,7 +1326,7 @@ __i915_read(64, q)
#define __i915_write(x, y) \
static inline void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
trace_i915_reg_rw('W', reg, val, sizeof(val)); \
trace_i915_reg_rw(true, reg, val, sizeof(val)); \
write##y(val, dev_priv->regs + reg); \
}
__i915_write(8, b)
@ -1382,47 +1384,4 @@ static inline void i915_gt_write(struct drm_i915_private *dev_priv,
__gen6_gt_wait_for_fifo(dev_priv);
I915_WRITE(reg, val);
}
static inline void
i915_write(struct drm_i915_private *dev_priv, u32 reg, u64 val, int len)
{
/* Trace down the write operation before the real write */
trace_i915_reg_rw('W', reg, val, len);
switch (len) {
case 8:
writeq(val, dev_priv->regs + reg);
break;
case 4:
writel(val, dev_priv->regs + reg);
break;
case 2:
writew(val, dev_priv->regs + reg);
break;
case 1:
writeb(val, dev_priv->regs + reg);
break;
}
}
/**
* Reads a dword out of the status page, which is written to from the command
* queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
* MI_STORE_DATA_IMM.
*
* The following dwords have a reserved meaning:
* 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
* 0x04: ring 0 head pointer
* 0x05: ring 1 head pointer (915-class)
* 0x06: ring 2 head pointer (915-class)
* 0x10-0x1b: Context status DWords (GM45)
* 0x1f: Last written status offset. (GM45)
*
* The area from dword 0x20 to 0x3ff is available for driver usage.
*/
#define READ_HWSP(dev_priv, reg) (((volatile u32 *)\
(LP_RING(dev_priv)->status_page.page_addr))[reg])
#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX)
#define I915_GEM_HWS_INDEX 0x20
#define I915_BREADCRUMB_INDEX 0x21
#endif

View File

@ -75,8 +75,8 @@ static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
dev_priv->mm.object_memory -= size;
}
int
i915_gem_check_is_wedged(struct drm_device *dev)
static int
i915_gem_wait_for_error(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct completion *x = &dev_priv->error_completion;
@ -90,27 +90,24 @@ i915_gem_check_is_wedged(struct drm_device *dev)
if (ret)
return ret;
/* Success, we reset the GPU! */
if (!atomic_read(&dev_priv->mm.wedged))
return 0;
/* GPU is hung, bump the completion count to account for
* the token we just consumed so that we never hit zero and
* end up waiting upon a subsequent completion event that
* will never happen.
*/
spin_lock_irqsave(&x->wait.lock, flags);
x->done++;
spin_unlock_irqrestore(&x->wait.lock, flags);
return -EIO;
if (atomic_read(&dev_priv->mm.wedged)) {
/* GPU is hung, bump the completion count to account for
* the token we just consumed so that we never hit zero and
* end up waiting upon a subsequent completion event that
* will never happen.
*/
spin_lock_irqsave(&x->wait.lock, flags);
x->done++;
spin_unlock_irqrestore(&x->wait.lock, flags);
}
return 0;
}
int i915_mutex_lock_interruptible(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
ret = i915_gem_check_is_wedged(dev);
ret = i915_gem_wait_for_error(dev);
if (ret)
return ret;
@ -118,11 +115,6 @@ int i915_mutex_lock_interruptible(struct drm_device *dev)
if (ret)
return ret;
if (atomic_read(&dev_priv->mm.wedged)) {
mutex_unlock(&dev->struct_mutex);
return -EAGAIN;
}
WARN_ON(i915_verify_lists(dev));
return 0;
}
@ -193,22 +185,20 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
return 0;
}
/**
* Creates a new mm object and returns a handle to it.
*/
int
i915_gem_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
static int
i915_gem_create(struct drm_file *file,
struct drm_device *dev,
uint64_t size,
uint32_t *handle_p)
{
struct drm_i915_gem_create *args = data;
struct drm_i915_gem_object *obj;
int ret;
u32 handle;
args->size = roundup(args->size, PAGE_SIZE);
size = roundup(size, PAGE_SIZE);
/* Allocate the new object */
obj = i915_gem_alloc_object(dev, args->size);
obj = i915_gem_alloc_object(dev, size);
if (obj == NULL)
return -ENOMEM;
@ -224,10 +214,41 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
drm_gem_object_unreference(&obj->base);
trace_i915_gem_object_create(obj);
args->handle = handle;
*handle_p = handle;
return 0;
}
int
i915_gem_dumb_create(struct drm_file *file,
struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
/* have to work out size/pitch and return them */
args->pitch = ALIGN(args->width & ((args->bpp + 1) / 8), 64);
args->size = args->pitch * args->height;
return i915_gem_create(file, dev,
args->size, &args->handle);
}
int i915_gem_dumb_destroy(struct drm_file *file,
struct drm_device *dev,
uint32_t handle)
{
return drm_gem_handle_delete(file, handle);
}
/**
* Creates a new mm object and returns a handle to it.
*/
int
i915_gem_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_i915_gem_create *args = data;
return i915_gem_create(file, dev,
args->size, &args->handle);
}
static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
{
drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
@ -514,7 +535,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
return ret;
obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
if (obj == NULL) {
if (&obj->base == NULL) {
ret = -ENOENT;
goto unlock;
}
@ -526,6 +547,8 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
goto out;
}
trace_i915_gem_object_pread(obj, args->offset, args->size);
ret = i915_gem_object_set_cpu_read_domain_range(obj,
args->offset,
args->size);
@ -955,7 +978,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
return ret;
obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
if (obj == NULL) {
if (&obj->base == NULL) {
ret = -ENOENT;
goto unlock;
}
@ -967,6 +990,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
goto out;
}
trace_i915_gem_object_pwrite(obj, args->offset, args->size);
/* We can only do the GTT pwrite on untiled buffers, as otherwise
* it would end up going through the fenced access, and we'll get
* different detiling behavior between reading and writing.
@ -1049,7 +1074,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
return ret;
obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
if (obj == NULL) {
if (&obj->base == NULL) {
ret = -ENOENT;
goto unlock;
}
@ -1092,7 +1117,7 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
return ret;
obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
if (obj == NULL) {
if (&obj->base == NULL) {
ret = -ENOENT;
goto unlock;
}
@ -1121,7 +1146,6 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_mmap *args = data;
struct drm_gem_object *obj;
loff_t offset;
unsigned long addr;
if (!(dev->driver->driver_features & DRIVER_GEM))
@ -1136,8 +1160,6 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
return -E2BIG;
}
offset = args->offset;
down_write(&current->mm->mmap_sem);
addr = do_mmap(obj->filp, 0, args->size,
PROT_READ | PROT_WRITE, MAP_SHARED,
@ -1182,9 +1204,13 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
PAGE_SHIFT;
/* Now bind it into the GTT if needed */
mutex_lock(&dev->struct_mutex);
ret = i915_mutex_lock_interruptible(dev);
if (ret)
goto out;
trace_i915_gem_object_fault(obj, page_offset, true, write);
/* Now bind it into the GTT if needed */
if (!obj->map_and_fenceable) {
ret = i915_gem_object_unbind(obj);
if (ret)
@ -1203,7 +1229,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
if (obj->tiling_mode == I915_TILING_NONE)
ret = i915_gem_object_put_fence(obj);
else
ret = i915_gem_object_get_fence(obj, NULL, true);
ret = i915_gem_object_get_fence(obj, NULL);
if (ret)
goto unlock;
@ -1219,12 +1245,21 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
unlock:
mutex_unlock(&dev->struct_mutex);
out:
switch (ret) {
case -EIO:
case -EAGAIN:
/* Give the error handler a chance to run and move the
* objects off the GPU active list. Next time we service the
* fault, we should be able to transition the page into the
* GTT without touching the GPU (and so avoid further
* EIO/EGAIN). If the GPU is wedged, then there is no issue
* with coherency, just lost writes.
*/
set_need_resched();
case 0:
case -ERESTARTSYS:
case -EINTR:
return VM_FAULT_NOPAGE;
case -ENOMEM:
return VM_FAULT_OOM;
@ -1425,27 +1460,13 @@ i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj)
return tile_height * obj->stride * 2;
}
/**
* i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
* @dev: DRM device
* @data: GTT mapping ioctl data
* @file: GEM object info
*
* Simply returns the fake offset to userspace so it can mmap it.
* The mmap call will end up in drm_gem_mmap(), which will set things
* up so we can get faults in the handler above.
*
* The fault handler will take care of binding the object into the GTT
* (since it may have been evicted to make room for something), allocating
* a fence register, and mapping the appropriate aperture address into
* userspace.
*/
int
i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
i915_gem_mmap_gtt(struct drm_file *file,
struct drm_device *dev,
uint32_t handle,
uint64_t *offset)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_mmap_gtt *args = data;
struct drm_i915_gem_object *obj;
int ret;
@ -1456,8 +1477,8 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
if (ret)
return ret;
obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
if (obj == NULL) {
obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
if (&obj->base == NULL) {
ret = -ENOENT;
goto unlock;
}
@ -1479,7 +1500,7 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
goto out;
}
args->offset = (u64)obj->base.map_list.hash.key << PAGE_SHIFT;
*offset = (u64)obj->base.map_list.hash.key << PAGE_SHIFT;
out:
drm_gem_object_unreference(&obj->base);
@ -1488,6 +1509,34 @@ unlock:
return ret;
}
/**
* i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
* @dev: DRM device
* @data: GTT mapping ioctl data
* @file: GEM object info
*
* Simply returns the fake offset to userspace so it can mmap it.
* The mmap call will end up in drm_gem_mmap(), which will set things
* up so we can get faults in the handler above.
*
* The fault handler will take care of binding the object into the GTT
* (since it may have been evicted to make room for something), allocating
* a fence register, and mapping the appropriate aperture address into
* userspace.
*/
int
i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_i915_gem_mmap_gtt *args = data;
if (!(dev->driver->driver_features & DRIVER_GEM))
return -ENODEV;
return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
}
static int
i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
gfp_t gfpmask)
@ -1669,9 +1718,8 @@ i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
}
static void
i915_gem_process_flushing_list(struct drm_device *dev,
uint32_t flush_domains,
struct intel_ring_buffer *ring)
i915_gem_process_flushing_list(struct intel_ring_buffer *ring,
uint32_t flush_domains)
{
struct drm_i915_gem_object *obj, *next;
@ -1684,7 +1732,7 @@ i915_gem_process_flushing_list(struct drm_device *dev,
obj->base.write_domain = 0;
list_del_init(&obj->gpu_write_list);
i915_gem_object_move_to_active(obj, ring,
i915_gem_next_request_seqno(dev, ring));
i915_gem_next_request_seqno(ring));
trace_i915_gem_object_change_domain(obj,
obj->base.read_domains,
@ -1694,27 +1742,22 @@ i915_gem_process_flushing_list(struct drm_device *dev,
}
int
i915_add_request(struct drm_device *dev,
i915_add_request(struct intel_ring_buffer *ring,
struct drm_file *file,
struct drm_i915_gem_request *request,
struct intel_ring_buffer *ring)
struct drm_i915_gem_request *request)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_file_private *file_priv = NULL;
drm_i915_private_t *dev_priv = ring->dev->dev_private;
uint32_t seqno;
int was_empty;
int ret;
BUG_ON(request == NULL);
if (file != NULL)
file_priv = file->driver_priv;
ret = ring->add_request(ring, &seqno);
if (ret)
return ret;
ring->outstanding_lazy_request = false;
trace_i915_gem_request_add(ring, seqno);
request->seqno = seqno;
request->ring = ring;
@ -1722,7 +1765,9 @@ i915_add_request(struct drm_device *dev,
was_empty = list_empty(&ring->request_list);
list_add_tail(&request->list, &ring->request_list);
if (file_priv) {
if (file) {
struct drm_i915_file_private *file_priv = file->driver_priv;
spin_lock(&file_priv->mm.lock);
request->file_priv = file_priv;
list_add_tail(&request->client_list,
@ -1730,6 +1775,8 @@ i915_add_request(struct drm_device *dev,
spin_unlock(&file_priv->mm.lock);
}
ring->outstanding_lazy_request = false;
if (!dev_priv->mm.suspended) {
mod_timer(&dev_priv->hangcheck_timer,
jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
@ -1846,18 +1893,15 @@ void i915_gem_reset(struct drm_device *dev)
* This function clears the request list as sequence numbers are passed.
*/
static void
i915_gem_retire_requests_ring(struct drm_device *dev,
struct intel_ring_buffer *ring)
i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
{
drm_i915_private_t *dev_priv = dev->dev_private;
uint32_t seqno;
int i;
if (!ring->status_page.page_addr ||
list_empty(&ring->request_list))
if (list_empty(&ring->request_list))
return;
WARN_ON(i915_verify_lists(dev));
WARN_ON(i915_verify_lists(ring->dev));
seqno = ring->get_seqno(ring);
@ -1875,7 +1919,7 @@ i915_gem_retire_requests_ring(struct drm_device *dev,
if (!i915_seqno_passed(seqno, request->seqno))
break;
trace_i915_gem_request_retire(dev, request->seqno);
trace_i915_gem_request_retire(ring, request->seqno);
list_del(&request->list);
i915_gem_request_remove_from_client(request);
@ -1901,13 +1945,13 @@ i915_gem_retire_requests_ring(struct drm_device *dev,
i915_gem_object_move_to_inactive(obj);
}
if (unlikely (dev_priv->trace_irq_seqno &&
i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) {
if (unlikely(ring->trace_irq_seqno &&
i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
ring->irq_put(ring);
dev_priv->trace_irq_seqno = 0;
ring->trace_irq_seqno = 0;
}
WARN_ON(i915_verify_lists(dev));
WARN_ON(i915_verify_lists(ring->dev));
}
void
@ -1931,7 +1975,7 @@ i915_gem_retire_requests(struct drm_device *dev)
}
for (i = 0; i < I915_NUM_RINGS; i++)
i915_gem_retire_requests_ring(dev, &dev_priv->ring[i]);
i915_gem_retire_requests_ring(&dev_priv->ring[i]);
}
static void
@ -1965,11 +2009,11 @@ i915_gem_retire_work_handler(struct work_struct *work)
struct drm_i915_gem_request *request;
int ret;
ret = i915_gem_flush_ring(dev, ring, 0,
I915_GEM_GPU_DOMAINS);
ret = i915_gem_flush_ring(ring,
0, I915_GEM_GPU_DOMAINS);
request = kzalloc(sizeof(*request), GFP_KERNEL);
if (ret || request == NULL ||
i915_add_request(dev, NULL, request, ring))
i915_add_request(ring, NULL, request))
kfree(request);
}
@ -1982,18 +2026,32 @@ i915_gem_retire_work_handler(struct work_struct *work)
mutex_unlock(&dev->struct_mutex);
}
/**
* Waits for a sequence number to be signaled, and cleans up the
* request and object lists appropriately for that event.
*/
int
i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
bool interruptible, struct intel_ring_buffer *ring)
i915_wait_request(struct intel_ring_buffer *ring,
uint32_t seqno)
{
drm_i915_private_t *dev_priv = dev->dev_private;
drm_i915_private_t *dev_priv = ring->dev->dev_private;
u32 ier;
int ret = 0;
BUG_ON(seqno == 0);
if (atomic_read(&dev_priv->mm.wedged))
return -EAGAIN;
if (atomic_read(&dev_priv->mm.wedged)) {
struct completion *x = &dev_priv->error_completion;
bool recovery_complete;
unsigned long flags;
/* Give the error handler a chance to run. */
spin_lock_irqsave(&x->wait.lock, flags);
recovery_complete = x->done > 0;
spin_unlock_irqrestore(&x->wait.lock, flags);
return recovery_complete ? -EIO : -EAGAIN;
}
if (seqno == ring->outstanding_lazy_request) {
struct drm_i915_gem_request *request;
@ -2002,7 +2060,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
if (request == NULL)
return -ENOMEM;
ret = i915_add_request(dev, NULL, request, ring);
ret = i915_add_request(ring, NULL, request);
if (ret) {
kfree(request);
return ret;
@ -2012,22 +2070,22 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
}
if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) {
if (HAS_PCH_SPLIT(dev))
if (HAS_PCH_SPLIT(ring->dev))
ier = I915_READ(DEIER) | I915_READ(GTIER);
else
ier = I915_READ(IER);
if (!ier) {
DRM_ERROR("something (likely vbetool) disabled "
"interrupts, re-enabling\n");
i915_driver_irq_preinstall(dev);
i915_driver_irq_postinstall(dev);
i915_driver_irq_preinstall(ring->dev);
i915_driver_irq_postinstall(ring->dev);
}
trace_i915_gem_request_wait_begin(dev, seqno);
trace_i915_gem_request_wait_begin(ring, seqno);
ring->waiting_seqno = seqno;
if (ring->irq_get(ring)) {
if (interruptible)
if (dev_priv->mm.interruptible)
ret = wait_event_interruptible(ring->irq_queue,
i915_seqno_passed(ring->get_seqno(ring), seqno)
|| atomic_read(&dev_priv->mm.wedged));
@ -2043,7 +2101,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
ret = -EBUSY;
ring->waiting_seqno = 0;
trace_i915_gem_request_wait_end(dev, seqno);
trace_i915_gem_request_wait_end(ring, seqno);
}
if (atomic_read(&dev_priv->mm.wedged))
ret = -EAGAIN;
@ -2059,31 +2117,18 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
* a separate wait queue to handle that.
*/
if (ret == 0)
i915_gem_retire_requests_ring(dev, ring);
i915_gem_retire_requests_ring(ring);
return ret;
}
/**
* Waits for a sequence number to be signaled, and cleans up the
* request and object lists appropriately for that event.
*/
static int
i915_wait_request(struct drm_device *dev, uint32_t seqno,
struct intel_ring_buffer *ring)
{
return i915_do_wait_request(dev, seqno, 1, ring);
}
/**
* Ensures that all rendering to the object has completed and the object is
* safe to unbind from the GTT or access from the CPU.
*/
int
i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
bool interruptible)
i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj)
{
struct drm_device *dev = obj->base.dev;
int ret;
/* This function only exists to support waiting for existing rendering,
@ -2095,10 +2140,7 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
* it.
*/
if (obj->active) {
ret = i915_do_wait_request(dev,
obj->last_rendering_seqno,
interruptible,
obj->ring);
ret = i915_wait_request(obj->ring, obj->last_rendering_seqno);
if (ret)
return ret;
}
@ -2148,6 +2190,8 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
if (ret == -ERESTARTSYS)
return ret;
trace_i915_gem_object_unbind(obj);
i915_gem_gtt_unbind_object(obj);
i915_gem_object_put_pages_gtt(obj);
@ -2163,29 +2207,27 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
if (i915_gem_object_is_purgeable(obj))
i915_gem_object_truncate(obj);
trace_i915_gem_object_unbind(obj);
return ret;
}
int
i915_gem_flush_ring(struct drm_device *dev,
struct intel_ring_buffer *ring,
i915_gem_flush_ring(struct intel_ring_buffer *ring,
uint32_t invalidate_domains,
uint32_t flush_domains)
{
int ret;
trace_i915_gem_ring_flush(ring, invalidate_domains, flush_domains);
ret = ring->flush(ring, invalidate_domains, flush_domains);
if (ret)
return ret;
i915_gem_process_flushing_list(dev, flush_domains, ring);
i915_gem_process_flushing_list(ring, flush_domains);
return 0;
}
static int i915_ring_idle(struct drm_device *dev,
struct intel_ring_buffer *ring)
static int i915_ring_idle(struct intel_ring_buffer *ring)
{
int ret;
@ -2193,15 +2235,13 @@ static int i915_ring_idle(struct drm_device *dev,
return 0;
if (!list_empty(&ring->gpu_write_list)) {
ret = i915_gem_flush_ring(dev, ring,
ret = i915_gem_flush_ring(ring,
I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
if (ret)
return ret;
}
return i915_wait_request(dev,
i915_gem_next_request_seqno(dev, ring),
ring);
return i915_wait_request(ring, i915_gem_next_request_seqno(ring));
}
int
@ -2218,7 +2258,7 @@ i915_gpu_idle(struct drm_device *dev)
/* Flush everything onto the inactive list. */
for (i = 0; i < I915_NUM_RINGS; i++) {
ret = i915_ring_idle(dev, &dev_priv->ring[i]);
ret = i915_ring_idle(&dev_priv->ring[i]);
if (ret)
return ret;
}
@ -2402,15 +2442,13 @@ static bool ring_passed_seqno(struct intel_ring_buffer *ring, u32 seqno)
static int
i915_gem_object_flush_fence(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *pipelined,
bool interruptible)
struct intel_ring_buffer *pipelined)
{
int ret;
if (obj->fenced_gpu_access) {
if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
ret = i915_gem_flush_ring(obj->base.dev,
obj->last_fenced_ring,
ret = i915_gem_flush_ring(obj->last_fenced_ring,
0, obj->base.write_domain);
if (ret)
return ret;
@ -2422,10 +2460,8 @@ i915_gem_object_flush_fence(struct drm_i915_gem_object *obj,
if (obj->last_fenced_seqno && pipelined != obj->last_fenced_ring) {
if (!ring_passed_seqno(obj->last_fenced_ring,
obj->last_fenced_seqno)) {
ret = i915_do_wait_request(obj->base.dev,
obj->last_fenced_seqno,
interruptible,
obj->last_fenced_ring);
ret = i915_wait_request(obj->last_fenced_ring,
obj->last_fenced_seqno);
if (ret)
return ret;
}
@ -2451,7 +2487,7 @@ i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
if (obj->tiling_mode)
i915_gem_release_mmap(obj);
ret = i915_gem_object_flush_fence(obj, NULL, true);
ret = i915_gem_object_flush_fence(obj, NULL);
if (ret)
return ret;
@ -2528,8 +2564,7 @@ i915_find_fence_reg(struct drm_device *dev,
*/
int
i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *pipelined,
bool interruptible)
struct intel_ring_buffer *pipelined)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@ -2551,10 +2586,8 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
if (reg->setup_seqno) {
if (!ring_passed_seqno(obj->last_fenced_ring,
reg->setup_seqno)) {
ret = i915_do_wait_request(obj->base.dev,
reg->setup_seqno,
interruptible,
obj->last_fenced_ring);
ret = i915_wait_request(obj->last_fenced_ring,
reg->setup_seqno);
if (ret)
return ret;
}
@ -2563,15 +2596,13 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
}
} else if (obj->last_fenced_ring &&
obj->last_fenced_ring != pipelined) {
ret = i915_gem_object_flush_fence(obj,
pipelined,
interruptible);
ret = i915_gem_object_flush_fence(obj, pipelined);
if (ret)
return ret;
} else if (obj->tiling_changed) {
if (obj->fenced_gpu_access) {
if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
ret = i915_gem_flush_ring(obj->base.dev, obj->ring,
ret = i915_gem_flush_ring(obj->ring,
0, obj->base.write_domain);
if (ret)
return ret;
@ -2588,7 +2619,7 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
if (obj->tiling_changed) {
if (pipelined) {
reg->setup_seqno =
i915_gem_next_request_seqno(dev, pipelined);
i915_gem_next_request_seqno(pipelined);
obj->last_fenced_seqno = reg->setup_seqno;
obj->last_fenced_ring = pipelined;
}
@ -2602,7 +2633,7 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
if (reg == NULL)
return -ENOSPC;
ret = i915_gem_object_flush_fence(obj, pipelined, interruptible);
ret = i915_gem_object_flush_fence(obj, pipelined);
if (ret)
return ret;
@ -2614,9 +2645,7 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
if (old->tiling_mode)
i915_gem_release_mmap(old);
ret = i915_gem_object_flush_fence(old,
pipelined,
interruptible);
ret = i915_gem_object_flush_fence(old, pipelined);
if (ret) {
drm_gem_object_unreference(&old->base);
return ret;
@ -2628,7 +2657,7 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
old->fence_reg = I915_FENCE_REG_NONE;
old->last_fenced_ring = pipelined;
old->last_fenced_seqno =
pipelined ? i915_gem_next_request_seqno(dev, pipelined) : 0;
pipelined ? i915_gem_next_request_seqno(pipelined) : 0;
drm_gem_object_unreference(&old->base);
} else if (obj->last_fenced_seqno == 0)
@ -2640,7 +2669,7 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
obj->last_fenced_ring = pipelined;
reg->setup_seqno =
pipelined ? i915_gem_next_request_seqno(dev, pipelined) : 0;
pipelined ? i915_gem_next_request_seqno(pipelined) : 0;
obj->last_fenced_seqno = reg->setup_seqno;
update:
@ -2837,7 +2866,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
obj->map_and_fenceable = mappable && fenceable;
trace_i915_gem_object_bind(obj, obj->gtt_offset, map_and_fenceable);
trace_i915_gem_object_bind(obj, map_and_fenceable);
return 0;
}
@ -2860,13 +2889,11 @@ i915_gem_clflush_object(struct drm_i915_gem_object *obj)
static int
i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj)
{
struct drm_device *dev = obj->base.dev;
if ((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0)
return 0;
/* Queue the GPU write cache flushing we need. */
return i915_gem_flush_ring(dev, obj->ring, 0, obj->base.write_domain);
return i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain);
}
/** Flushes the GTT write domain for the object if it's dirty. */
@ -2933,12 +2960,15 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
if (obj->gtt_space == NULL)
return -EINVAL;
if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
return 0;
ret = i915_gem_object_flush_gpu_write_domain(obj);
if (ret)
return ret;
if (obj->pending_gpu_write || write) {
ret = i915_gem_object_wait_rendering(obj, true);
ret = i915_gem_object_wait_rendering(obj);
if (ret)
return ret;
}
@ -2988,7 +3018,7 @@ i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj,
/* Currently, we are always called from an non-interruptible context. */
if (pipelined != obj->ring) {
ret = i915_gem_object_wait_rendering(obj, false);
ret = i915_gem_object_wait_rendering(obj);
if (ret)
return ret;
}
@ -3006,8 +3036,7 @@ i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj,
}
int
i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj,
bool interruptible)
i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj)
{
int ret;
@ -3015,13 +3044,12 @@ i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj,
return 0;
if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
ret = i915_gem_flush_ring(obj->base.dev, obj->ring,
0, obj->base.write_domain);
ret = i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain);
if (ret)
return ret;
}
return i915_gem_object_wait_rendering(obj, interruptible);
return i915_gem_object_wait_rendering(obj);
}
/**
@ -3036,11 +3064,14 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
uint32_t old_write_domain, old_read_domains;
int ret;
if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
return 0;
ret = i915_gem_object_flush_gpu_write_domain(obj);
if (ret)
return ret;
ret = i915_gem_object_wait_rendering(obj, true);
ret = i915_gem_object_wait_rendering(obj);
if (ret)
return ret;
@ -3138,7 +3169,7 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
if (ret)
return ret;
ret = i915_gem_object_wait_rendering(obj, true);
ret = i915_gem_object_wait_rendering(obj);
if (ret)
return ret;
@ -3209,6 +3240,9 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
u32 seqno = 0;
int ret;
if (atomic_read(&dev_priv->mm.wedged))
return -EIO;
spin_lock(&file_priv->mm.lock);
list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
if (time_after_eq(request->emitted_jiffies, recent_enough))
@ -3324,7 +3358,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
return ret;
obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
if (obj == NULL) {
if (&obj->base == NULL) {
ret = -ENOENT;
goto unlock;
}
@ -3375,7 +3409,7 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
return ret;
obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
if (obj == NULL) {
if (&obj->base == NULL) {
ret = -ENOENT;
goto unlock;
}
@ -3412,7 +3446,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
return ret;
obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
if (obj == NULL) {
if (&obj->base == NULL) {
ret = -ENOENT;
goto unlock;
}
@ -3430,7 +3464,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
* flush earlier is beneficial.
*/
if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
ret = i915_gem_flush_ring(dev, obj->ring,
ret = i915_gem_flush_ring(obj->ring,
0, obj->base.write_domain);
} else if (obj->ring->outstanding_lazy_request ==
obj->last_rendering_seqno) {
@ -3441,9 +3475,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
*/
request = kzalloc(sizeof(*request), GFP_KERNEL);
if (request)
ret = i915_add_request(dev,
NULL, request,
obj->ring);
ret = i915_add_request(obj->ring, NULL,request);
else
ret = -ENOMEM;
}
@ -3453,7 +3485,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
* are actually unmasked, and our working set ends up being
* larger than required.
*/
i915_gem_retire_requests_ring(dev, obj->ring);
i915_gem_retire_requests_ring(obj->ring);
args->busy = obj->active;
}
@ -3492,7 +3524,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
return ret;
obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
if (obj == NULL) {
if (&obj->base == NULL) {
ret = -ENOENT;
goto unlock;
}
@ -3583,6 +3615,8 @@ static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj)
kfree(obj->page_cpu_valid);
kfree(obj->bit_17);
kfree(obj);
trace_i915_gem_object_destroy(obj);
}
void i915_gem_free_object(struct drm_gem_object *gem_obj)
@ -3590,8 +3624,6 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
struct drm_device *dev = obj->base.dev;
trace_i915_gem_object_destroy(obj);
while (obj->pin_count > 0)
i915_gem_object_unpin(obj);
@ -3837,6 +3869,8 @@ i915_gem_load(struct drm_device *dev)
i915_gem_detect_bit_6_swizzle(dev);
init_waitqueue_head(&dev_priv->pending_flip_queue);
dev_priv->mm.interruptible = true;
dev_priv->mm.inactive_shrinker.shrink = i915_gem_inactive_shrink;
dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
register_shrinker(&dev_priv->mm.inactive_shrinker);

View File

@ -134,51 +134,6 @@ i915_verify_lists(struct drm_device *dev)
}
#endif /* WATCH_INACTIVE */
#if WATCH_EXEC | WATCH_PWRITE
static void
i915_gem_dump_page(struct page *page, uint32_t start, uint32_t end,
uint32_t bias, uint32_t mark)
{
uint32_t *mem = kmap_atomic(page, KM_USER0);
int i;
for (i = start; i < end; i += 4)
DRM_INFO("%08x: %08x%s\n",
(int) (bias + i), mem[i / 4],
(bias + i == mark) ? " ********" : "");
kunmap_atomic(mem, KM_USER0);
/* give syslog time to catch up */
msleep(1);
}
void
i915_gem_dump_object(struct drm_i915_gem_object *obj, int len,
const char *where, uint32_t mark)
{
int page;
DRM_INFO("%s: object at offset %08x\n", where, obj->gtt_offset);
for (page = 0; page < (len + PAGE_SIZE-1) / PAGE_SIZE; page++) {
int page_len, chunk, chunk_len;
page_len = len - page * PAGE_SIZE;
if (page_len > PAGE_SIZE)
page_len = PAGE_SIZE;
for (chunk = 0; chunk < page_len; chunk += 128) {
chunk_len = page_len - chunk;
if (chunk_len > 128)
chunk_len = 128;
i915_gem_dump_page(obj->pages[page],
chunk, chunk + chunk_len,
obj->gtt_offset +
page * PAGE_SIZE,
mark);
}
}
}
#endif
#if WATCH_COHERENCY
void
i915_gem_object_check_coherency(struct drm_i915_gem_object *obj, int handle)

View File

@ -30,6 +30,7 @@
#include "drm.h"
#include "i915_drv.h"
#include "i915_drm.h"
#include "i915_trace.h"
static bool
mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind)
@ -63,6 +64,8 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
return 0;
}
trace_i915_gem_evict(dev, min_size, alignment, mappable);
/*
* The goal is to evict objects and amalgamate space in LRU order.
* The oldest idle objects reside on the inactive list, which is in
@ -189,6 +192,8 @@ i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only)
if (lists_empty)
return -ENOSPC;
trace_i915_gem_evict_everything(dev, purgeable_only);
/* Flush everything (on to the inactive lists) and evict */
ret = i915_gpu_idle(dev);
if (ret)

View File

@ -37,6 +37,7 @@ struct change_domains {
uint32_t invalidate_domains;
uint32_t flush_domains;
uint32_t flush_rings;
uint32_t flips;
};
/*
@ -190,6 +191,9 @@ i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_GTT)
i915_gem_release_mmap(obj);
if (obj->base.pending_write_domain)
cd->flips |= atomic_read(&obj->pending_flip);
/* The actual obj->write_domain will be updated with
* pending_write_domain after we emit the accumulated flush for all
* of our domain changes in execbuffers (which clears objects'
@ -282,21 +286,6 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
target_offset = to_intel_bo(target_obj)->gtt_offset;
#if WATCH_RELOC
DRM_INFO("%s: obj %p offset %08x target %d "
"read %08x write %08x gtt %08x "
"presumed %08x delta %08x\n",
__func__,
obj,
(int) reloc->offset,
(int) reloc->target_handle,
(int) reloc->read_domains,
(int) reloc->write_domain,
(int) target_offset,
(int) reloc->presumed_offset,
reloc->delta);
#endif
/* The target buffer should have appeared before us in the
* exec_object list, so it should have a GTT space bound by now.
*/
@ -365,16 +354,6 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
return ret;
}
/* and points to somewhere within the target object. */
if (unlikely(reloc->delta >= target_obj->size)) {
DRM_ERROR("Relocation beyond target object bounds: "
"obj %p target %d delta %d size %d.\n",
obj, reloc->target_handle,
(int) reloc->delta,
(int) target_obj->size);
return ret;
}
reloc->delta += target_offset;
if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) {
uint32_t page_offset = reloc->offset & ~PAGE_MASK;
@ -575,7 +554,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
if (has_fenced_gpu_access) {
if (need_fence) {
ret = i915_gem_object_get_fence(obj, ring, 1);
ret = i915_gem_object_get_fence(obj, ring);
if (ret)
break;
} else if (entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
@ -690,11 +669,9 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
/* reacquire the objects */
eb_reset(eb);
for (i = 0; i < count; i++) {
struct drm_i915_gem_object *obj;
obj = to_intel_bo(drm_gem_object_lookup(dev, file,
exec[i].handle));
if (obj == NULL) {
if (&obj->base == NULL) {
DRM_ERROR("Invalid object handle %d at index %d\n",
exec[i].handle, i);
ret = -ENOENT;
@ -749,8 +726,7 @@ i915_gem_execbuffer_flush(struct drm_device *dev,
if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) {
for (i = 0; i < I915_NUM_RINGS; i++)
if (flush_rings & (1 << i)) {
ret = i915_gem_flush_ring(dev,
&dev_priv->ring[i],
ret = i915_gem_flush_ring(&dev_priv->ring[i],
invalidate_domains,
flush_domains);
if (ret)
@ -774,7 +750,7 @@ i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
/* XXX gpu semaphores are implicated in various hard hangs on SNB */
if (INTEL_INFO(obj->base.dev)->gen < 6 || !i915_semaphores)
return i915_gem_object_wait_rendering(obj, true);
return i915_gem_object_wait_rendering(obj);
idx = intel_ring_sync_index(from, to);
@ -789,7 +765,7 @@ i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
if (request == NULL)
return -ENOMEM;
ret = i915_add_request(obj->base.dev, NULL, request, from);
ret = i915_add_request(from, NULL, request);
if (ret) {
kfree(request);
return ret;
@ -802,6 +778,39 @@ i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
return intel_ring_sync(to, from, seqno - 1);
}
static int
i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring, u32 flips)
{
u32 plane, flip_mask;
int ret;
/* Check for any pending flips. As we only maintain a flip queue depth
* of 1, we can simply insert a WAIT for the next display flip prior
* to executing the batch and avoid stalling the CPU.
*/
for (plane = 0; flips >> plane; plane++) {
if (((flips >> plane) & 1) == 0)
continue;
if (plane)
flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
else
flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
ret = intel_ring_begin(ring, 2);
if (ret)
return ret;
intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
}
return 0;
}
static int
i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
struct list_head *objects)
@ -810,19 +819,11 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
struct change_domains cd;
int ret;
cd.invalidate_domains = 0;
cd.flush_domains = 0;
cd.flush_rings = 0;
memset(&cd, 0, sizeof(cd));
list_for_each_entry(obj, objects, exec_list)
i915_gem_object_set_to_gpu_domain(obj, ring, &cd);
if (cd.invalidate_domains | cd.flush_domains) {
#if WATCH_EXEC
DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
__func__,
cd.invalidate_domains,
cd.flush_domains);
#endif
ret = i915_gem_execbuffer_flush(ring->dev,
cd.invalidate_domains,
cd.flush_domains,
@ -831,6 +832,12 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
return ret;
}
if (cd.flips) {
ret = i915_gem_execbuffer_wait_for_flips(ring, cd.flips);
if (ret)
return ret;
}
list_for_each_entry(obj, objects, exec_list) {
ret = i915_gem_execbuffer_sync_rings(obj, ring);
if (ret)
@ -877,47 +884,6 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
return 0;
}
static int
i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring,
struct list_head *objects)
{
struct drm_i915_gem_object *obj;
int flips;
/* Check for any pending flips. As we only maintain a flip queue depth
* of 1, we can simply insert a WAIT for the next display flip prior
* to executing the batch and avoid stalling the CPU.
*/
flips = 0;
list_for_each_entry(obj, objects, exec_list) {
if (obj->base.write_domain)
flips |= atomic_read(&obj->pending_flip);
}
if (flips) {
int plane, flip_mask, ret;
for (plane = 0; flips >> plane; plane++) {
if (((flips >> plane) & 1) == 0)
continue;
if (plane)
flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
else
flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
ret = intel_ring_begin(ring, 2);
if (ret)
return ret;
intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
}
}
return 0;
}
static void
i915_gem_execbuffer_move_to_active(struct list_head *objects,
struct intel_ring_buffer *ring,
@ -926,6 +892,10 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects,
struct drm_i915_gem_object *obj;
list_for_each_entry(obj, objects, exec_list) {
u32 old_read = obj->base.read_domains;
u32 old_write = obj->base.write_domain;
obj->base.read_domains = obj->base.pending_read_domains;
obj->base.write_domain = obj->base.pending_write_domain;
obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
@ -939,9 +909,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects,
intel_mark_busy(ring->dev, obj);
}
trace_i915_gem_object_change_domain(obj,
obj->base.read_domains,
obj->base.write_domain);
trace_i915_gem_object_change_domain(obj, old_read, old_write);
}
}
@ -963,14 +931,14 @@ i915_gem_execbuffer_retire_commands(struct drm_device *dev,
if (INTEL_INFO(dev)->gen >= 4)
invalidate |= I915_GEM_DOMAIN_SAMPLER;
if (ring->flush(ring, invalidate, 0)) {
i915_gem_next_request_seqno(dev, ring);
i915_gem_next_request_seqno(ring);
return;
}
/* Add a breadcrumb for the completion of the batch buffer */
request = kzalloc(sizeof(*request), GFP_KERNEL);
if (request == NULL || i915_add_request(dev, file, request, ring)) {
i915_gem_next_request_seqno(dev, ring);
if (request == NULL || i915_add_request(ring, file, request)) {
i915_gem_next_request_seqno(ring);
kfree(request);
}
}
@ -1000,10 +968,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
if (ret)
return ret;
#if WATCH_EXEC
DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
(int) args->buffers_ptr, args->buffer_count, args->batch_len);
#endif
switch (args->flags & I915_EXEC_RING_MASK) {
case I915_EXEC_DEFAULT:
case I915_EXEC_RENDER:
@ -1113,7 +1077,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
obj = to_intel_bo(drm_gem_object_lookup(dev, file,
exec[i].handle));
if (obj == NULL) {
if (&obj->base == NULL) {
DRM_ERROR("Invalid object handle %d at index %d\n",
exec[i].handle, i);
/* prevent error path from reading uninitialized data */
@ -1170,11 +1134,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
if (ret)
goto err;
ret = i915_gem_execbuffer_wait_for_flips(ring, &objects);
if (ret)
goto err;
seqno = i915_gem_next_request_seqno(dev, ring);
seqno = i915_gem_next_request_seqno(ring);
for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++) {
if (seqno < ring->sync_seqno[i]) {
/* The GPU can not handle its semaphore value wrapping,
@ -1189,6 +1149,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
}
}
trace_i915_gem_ring_dispatch(ring, seqno);
exec_start = batch_obj->gtt_offset + args->batch_start_offset;
exec_len = args->batch_len;
if (cliprects) {
@ -1245,11 +1207,6 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
struct drm_i915_gem_exec_object2 *exec2_list = NULL;
int ret, i;
#if WATCH_EXEC
DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
(int) args->buffers_ptr, args->buffer_count, args->batch_len);
#endif
if (args->buffer_count < 1) {
DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
return -EINVAL;
@ -1330,17 +1287,16 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
struct drm_i915_gem_exec_object2 *exec2_list = NULL;
int ret;
#if WATCH_EXEC
DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
(int) args->buffers_ptr, args->buffer_count, args->batch_len);
#endif
if (args->buffer_count < 1) {
DRM_ERROR("execbuf2 with %d buffers\n", args->buffer_count);
return -EINVAL;
}
exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
exec2_list = kmalloc(sizeof(*exec2_list)*args->buffer_count,
GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
if (exec2_list == NULL)
exec2_list = drm_malloc_ab(sizeof(*exec2_list),
args->buffer_count);
if (exec2_list == NULL) {
DRM_ERROR("Failed to allocate exec list for %d buffers\n",
args->buffer_count);

View File

@ -284,14 +284,10 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
struct drm_i915_gem_set_tiling *args = data;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
int ret;
ret = i915_gem_check_is_wedged(dev);
if (ret)
return ret;
int ret = 0;
obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
if (obj == NULL)
if (&obj->base == NULL)
return -ENOENT;
if (!i915_tiling_ok(dev,
@ -384,7 +380,7 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
struct drm_i915_gem_object *obj;
obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
if (obj == NULL)
if (&obj->base == NULL)
return -ENOENT;
mutex_lock(&dev->struct_mutex);

View File

@ -85,21 +85,11 @@ ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
}
}
static inline u32
i915_pipestat(int pipe)
{
if (pipe == 0)
return PIPEASTAT;
if (pipe == 1)
return PIPEBSTAT;
BUG();
}
void
i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
{
if ((dev_priv->pipestat[pipe] & mask) != mask) {
u32 reg = i915_pipestat(pipe);
u32 reg = PIPESTAT(pipe);
dev_priv->pipestat[pipe] |= mask;
/* Enable the interrupt, clear any pending status */
@ -112,7 +102,7 @@ void
i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
{
if ((dev_priv->pipestat[pipe] & mask) != 0) {
u32 reg = i915_pipestat(pipe);
u32 reg = PIPESTAT(pipe);
dev_priv->pipestat[pipe] &= ~mask;
I915_WRITE(reg, dev_priv->pipestat[pipe]);
@ -171,12 +161,12 @@ u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
if (!i915_pipe_enabled(dev, pipe)) {
DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
"pipe %d\n", pipe);
"pipe %c\n", pipe_name(pipe));
return 0;
}
high_frame = pipe ? PIPEBFRAMEHIGH : PIPEAFRAMEHIGH;
low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL;
high_frame = PIPEFRAME(pipe);
low_frame = PIPEFRAMEPIXEL(pipe);
/*
* High & low register fields aren't synchronized, so make sure
@ -197,11 +187,11 @@ u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int reg = pipe ? PIPEB_FRMCOUNT_GM45 : PIPEA_FRMCOUNT_GM45;
int reg = PIPE_FRMCOUNT_GM45(pipe);
if (!i915_pipe_enabled(dev, pipe)) {
DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
"pipe %d\n", pipe);
"pipe %c\n", pipe_name(pipe));
return 0;
}
@ -219,7 +209,7 @@ int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
if (!i915_pipe_enabled(dev, pipe)) {
DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
"pipe %d\n", pipe);
"pipe %c\n", pipe_name(pipe));
return 0;
}
@ -367,7 +357,7 @@ static void notify_ring(struct drm_device *dev,
return;
seqno = ring->get_seqno(ring);
trace_i915_gem_request_complete(dev, seqno);
trace_i915_gem_request_complete(ring, seqno);
ring->irq_seqno = seqno;
wake_up_all(&ring->irq_queue);
@ -419,6 +409,7 @@ static void pch_irq_handler(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
u32 pch_iir;
int pipe;
pch_iir = I915_READ(SDEIIR);
@ -439,13 +430,11 @@ static void pch_irq_handler(struct drm_device *dev)
if (pch_iir & SDE_POISON)
DRM_ERROR("PCH poison interrupt\n");
if (pch_iir & SDE_FDI_MASK) {
u32 fdia, fdib;
fdia = I915_READ(FDI_RXA_IIR);
fdib = I915_READ(FDI_RXB_IIR);
DRM_DEBUG_DRIVER("PCH FDI RX interrupt; FDI RXA IIR: 0x%08x, FDI RXB IIR: 0x%08x\n", fdia, fdib);
}
if (pch_iir & SDE_FDI_MASK)
for_each_pipe(pipe)
DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
pipe_name(pipe),
I915_READ(FDI_RX_IIR(pipe)));
if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
@ -650,9 +639,14 @@ static void
i915_error_state_free(struct drm_device *dev,
struct drm_i915_error_state *error)
{
i915_error_object_free(error->batchbuffer[0]);
i915_error_object_free(error->batchbuffer[1]);
i915_error_object_free(error->ringbuffer);
int i;
for (i = 0; i < ARRAY_SIZE(error->batchbuffer); i++)
i915_error_object_free(error->batchbuffer[i]);
for (i = 0; i < ARRAY_SIZE(error->ringbuffer); i++)
i915_error_object_free(error->ringbuffer[i]);
kfree(error->active_bo);
kfree(error->overlay);
kfree(error);
@ -767,7 +761,7 @@ static void i915_capture_error_state(struct drm_device *dev)
struct drm_i915_gem_object *obj;
struct drm_i915_error_state *error;
unsigned long flags;
int i;
int i, pipe;
spin_lock_irqsave(&dev_priv->error_lock, flags);
error = dev_priv->first_error;
@ -775,19 +769,21 @@ static void i915_capture_error_state(struct drm_device *dev)
if (error)
return;
/* Account for pipe specific data like PIPE*STAT */
error = kmalloc(sizeof(*error), GFP_ATOMIC);
if (!error) {
DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
return;
}
DRM_DEBUG_DRIVER("generating error event\n");
DRM_INFO("capturing error event; look for more information in /debug/dri/%d/i915_error_state\n",
dev->primary->index);
error->seqno = dev_priv->ring[RCS].get_seqno(&dev_priv->ring[RCS]);
error->eir = I915_READ(EIR);
error->pgtbl_er = I915_READ(PGTBL_ER);
error->pipeastat = I915_READ(PIPEASTAT);
error->pipebstat = I915_READ(PIPEBSTAT);
for_each_pipe(pipe)
error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
error->instpm = I915_READ(INSTPM);
error->error = 0;
if (INTEL_INFO(dev)->gen >= 6) {
@ -826,15 +822,16 @@ static void i915_capture_error_state(struct drm_device *dev)
}
i915_gem_record_fences(dev, error);
/* Record the active batchbuffers */
for (i = 0; i < I915_NUM_RINGS; i++)
/* Record the active batch and ring buffers */
for (i = 0; i < I915_NUM_RINGS; i++) {
error->batchbuffer[i] =
i915_error_first_batchbuffer(dev_priv,
&dev_priv->ring[i]);
/* Record the ringbuffer */
error->ringbuffer = i915_error_object_create(dev_priv,
dev_priv->ring[RCS].obj);
error->ringbuffer[i] =
i915_error_object_create(dev_priv,
dev_priv->ring[i].obj);
}
/* Record buffers on the active and pinned lists. */
error->active_bo = NULL;
@ -907,6 +904,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 eir = I915_READ(EIR);
int pipe;
if (!eir)
return;
@ -955,14 +953,10 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
}
if (eir & I915_ERROR_MEMORY_REFRESH) {
u32 pipea_stats = I915_READ(PIPEASTAT);
u32 pipeb_stats = I915_READ(PIPEBSTAT);
printk(KERN_ERR "memory refresh error\n");
printk(KERN_ERR "PIPEASTAT: 0x%08x\n",
pipea_stats);
printk(KERN_ERR "PIPEBSTAT: 0x%08x\n",
pipeb_stats);
printk(KERN_ERR "memory refresh error:\n");
for_each_pipe(pipe)
printk(KERN_ERR "pipe %c stat: 0x%08x\n",
pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
/* pipestat has already been acked */
}
if (eir & I915_ERROR_INSTRUCTION) {
@ -1076,10 +1070,10 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
/* Potential stall - if we see that the flip has happened, assume a missed interrupt */
obj = work->pending_flip_obj;
if (INTEL_INFO(dev)->gen >= 4) {
int dspsurf = intel_crtc->plane == 0 ? DSPASURF : DSPBSURF;
int dspsurf = DSPSURF(intel_crtc->plane);
stall_detected = I915_READ(dspsurf) == obj->gtt_offset;
} else {
int dspaddr = intel_crtc->plane == 0 ? DSPAADDR : DSPBADDR;
int dspaddr = DSPADDR(intel_crtc->plane);
stall_detected = I915_READ(dspaddr) == (obj->gtt_offset +
crtc->y * crtc->fb->pitch +
crtc->x * crtc->fb->bits_per_pixel/8);
@ -1099,12 +1093,13 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
struct drm_i915_master_private *master_priv;
u32 iir, new_iir;
u32 pipea_stats, pipeb_stats;
u32 pipe_stats[I915_MAX_PIPES];
u32 vblank_status;
int vblank = 0;
unsigned long irqflags;
int irq_received;
int ret = IRQ_NONE;
int ret = IRQ_NONE, pipe;
bool blc_event = false;
atomic_inc(&dev_priv->irq_received);
@ -1127,27 +1122,23 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
* interrupts (for non-MSI).
*/
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
pipea_stats = I915_READ(PIPEASTAT);
pipeb_stats = I915_READ(PIPEBSTAT);
if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
i915_handle_error(dev, false);
/*
* Clear the PIPE(A|B)STAT regs before the IIR
*/
if (pipea_stats & 0x8000ffff) {
if (pipea_stats & PIPE_FIFO_UNDERRUN_STATUS)
DRM_DEBUG_DRIVER("pipe a underrun\n");
I915_WRITE(PIPEASTAT, pipea_stats);
irq_received = 1;
}
for_each_pipe(pipe) {
int reg = PIPESTAT(pipe);
pipe_stats[pipe] = I915_READ(reg);
if (pipeb_stats & 0x8000ffff) {
if (pipeb_stats & PIPE_FIFO_UNDERRUN_STATUS)
DRM_DEBUG_DRIVER("pipe b underrun\n");
I915_WRITE(PIPEBSTAT, pipeb_stats);
irq_received = 1;
/*
* Clear the PIPE*STAT regs before the IIR
*/
if (pipe_stats[pipe] & 0x8000ffff) {
if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
DRM_DEBUG_DRIVER("pipe %c underrun\n",
pipe_name(pipe));
I915_WRITE(reg, pipe_stats[pipe]);
irq_received = 1;
}
}
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
@ -1198,27 +1189,22 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
intel_finish_page_flip_plane(dev, 1);
}
if (pipea_stats & vblank_status &&
drm_handle_vblank(dev, 0)) {
vblank++;
if (!dev_priv->flip_pending_is_done) {
i915_pageflip_stall_check(dev, 0);
intel_finish_page_flip(dev, 0);
for_each_pipe(pipe) {
if (pipe_stats[pipe] & vblank_status &&
drm_handle_vblank(dev, pipe)) {
vblank++;
if (!dev_priv->flip_pending_is_done) {
i915_pageflip_stall_check(dev, pipe);
intel_finish_page_flip(dev, pipe);
}
}
if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
blc_event = true;
}
if (pipeb_stats & vblank_status &&
drm_handle_vblank(dev, 1)) {
vblank++;
if (!dev_priv->flip_pending_is_done) {
i915_pageflip_stall_check(dev, 1);
intel_finish_page_flip(dev, 1);
}
}
if ((pipea_stats & PIPE_LEGACY_BLC_EVENT_STATUS) ||
(pipeb_stats & PIPE_LEGACY_BLC_EVENT_STATUS) ||
(iir & I915_ASLE_INTERRUPT))
if (blc_event || (iir & I915_ASLE_INTERRUPT))
intel_opregion_asle_intr(dev);
/* With MSI, interrupts are only generated when iir
@ -1268,16 +1254,6 @@ static int i915_emit_irq(struct drm_device * dev)
return dev_priv->counter;
}
void i915_trace_irq_get(struct drm_device *dev, u32 seqno)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
struct intel_ring_buffer *ring = LP_RING(dev_priv);
if (dev_priv->trace_irq_seqno == 0 &&
ring->irq_get(ring))
dev_priv->trace_irq_seqno = seqno;
}
static int i915_wait_irq(struct drm_device * dev, int irq_nr)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@ -1377,7 +1353,12 @@ int i915_enable_vblank(struct drm_device *dev, int pipe)
else
i915_enable_pipestat(dev_priv, pipe,
PIPE_VBLANK_INTERRUPT_ENABLE);
/* maintain vblank delivery even in deep C-states */
if (dev_priv->info->gen == 3)
I915_WRITE(INSTPM, INSTPM_AGPBUSY_DIS << 16);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
return 0;
}
@ -1390,6 +1371,10 @@ void i915_disable_vblank(struct drm_device *dev, int pipe)
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
if (dev_priv->info->gen == 3)
I915_WRITE(INSTPM,
INSTPM_AGPBUSY_DIS << 16 | INSTPM_AGPBUSY_DIS);
if (HAS_PCH_SPLIT(dev))
ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
@ -1400,16 +1385,6 @@ void i915_disable_vblank(struct drm_device *dev, int pipe)
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
void i915_enable_interrupt (struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (!HAS_PCH_SPLIT(dev))
intel_opregion_enable_asle(dev);
dev_priv->irq_enabled = 1;
}
/* Set the vblank monitor pipe
*/
int i915_vblank_pipe_set(struct drm_device *dev, void *data,
@ -1646,12 +1621,16 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
POSTING_READ(GTIER);
if (HAS_PCH_CPT(dev)) {
hotplug_mask = SDE_CRT_HOTPLUG_CPT | SDE_PORTB_HOTPLUG_CPT |
SDE_PORTC_HOTPLUG_CPT | SDE_PORTD_HOTPLUG_CPT ;
hotplug_mask = (SDE_CRT_HOTPLUG_CPT |
SDE_PORTB_HOTPLUG_CPT |
SDE_PORTC_HOTPLUG_CPT |
SDE_PORTD_HOTPLUG_CPT);
} else {
hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG |
SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
hotplug_mask |= SDE_AUX_MASK;
hotplug_mask = (SDE_CRT_HOTPLUG |
SDE_PORTB_HOTPLUG |
SDE_PORTC_HOTPLUG |
SDE_PORTD_HOTPLUG |
SDE_AUX_MASK);
}
dev_priv->pch_irq_mask = ~hotplug_mask;
@ -1674,6 +1653,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
void i915_driver_irq_preinstall(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int pipe;
atomic_set(&dev_priv->irq_received, 0);
@ -1691,8 +1671,8 @@ void i915_driver_irq_preinstall(struct drm_device * dev)
}
I915_WRITE(HWSTAM, 0xeffe);
I915_WRITE(PIPEASTAT, 0);
I915_WRITE(PIPEBSTAT, 0);
for_each_pipe(pipe)
I915_WRITE(PIPESTAT(pipe), 0);
I915_WRITE(IMR, 0xffffffff);
I915_WRITE(IER, 0x0);
POSTING_READ(IER);
@ -1804,6 +1784,7 @@ static void ironlake_irq_uninstall(struct drm_device *dev)
void i915_driver_irq_uninstall(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int pipe;
if (!dev_priv)
return;
@ -1821,12 +1802,13 @@ void i915_driver_irq_uninstall(struct drm_device * dev)
}
I915_WRITE(HWSTAM, 0xffffffff);
I915_WRITE(PIPEASTAT, 0);
I915_WRITE(PIPEBSTAT, 0);
for_each_pipe(pipe)
I915_WRITE(PIPESTAT(pipe), 0);
I915_WRITE(IMR, 0xffffffff);
I915_WRITE(IER, 0x0);
I915_WRITE(PIPEASTAT, I915_READ(PIPEASTAT) & 0x8000ffff);
I915_WRITE(PIPEBSTAT, I915_READ(PIPEBSTAT) & 0x8000ffff);
for_each_pipe(pipe)
I915_WRITE(PIPESTAT(pipe),
I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
I915_WRITE(IIR, I915_READ(IIR));
}

View File

@ -405,9 +405,12 @@
#define I915_ERROR_INSTRUCTION (1<<0)
#define INSTPM 0x020c0
#define INSTPM_SELF_EN (1<<12) /* 915GM only */
#define INSTPM_AGPBUSY_DIS (1<<11) /* gen3: when disabled, pending interrupts
will not assert AGPBUSY# and will only
be delivered when out of C3. */
#define ACTHD 0x020c8
#define FW_BLC 0x020d8
#define FW_BLC2 0x020dc
#define FW_BLC2 0x020dc
#define FW_BLC_SELF 0x020e0 /* 915+ only */
#define FW_BLC_SELF_EN_MASK (1<<31)
#define FW_BLC_SELF_FIFO_MASK (1<<16) /* 945 only */
@ -706,9 +709,9 @@
#define VGA1_PD_P1_DIV_2 (1 << 13)
#define VGA1_PD_P1_SHIFT 8
#define VGA1_PD_P1_MASK (0x1f << 8)
#define DPLL_A 0x06014
#define DPLL_B 0x06018
#define DPLL(pipe) _PIPE(pipe, DPLL_A, DPLL_B)
#define _DPLL_A 0x06014
#define _DPLL_B 0x06018
#define DPLL(pipe) _PIPE(pipe, _DPLL_A, _DPLL_B)
#define DPLL_VCO_ENABLE (1 << 31)
#define DPLL_DVO_HIGH_SPEED (1 << 30)
#define DPLL_SYNCLOCK_ENABLE (1 << 29)
@ -779,7 +782,7 @@
#define SDVO_MULTIPLIER_MASK 0x000000ff
#define SDVO_MULTIPLIER_SHIFT_HIRES 4
#define SDVO_MULTIPLIER_SHIFT_VGA 0
#define DPLL_A_MD 0x0601c /* 965+ only */
#define _DPLL_A_MD 0x0601c /* 965+ only */
/*
* UDI pixel divider, controlling how many pixels are stuffed into a packet.
*
@ -816,14 +819,14 @@
*/
#define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f
#define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0
#define DPLL_B_MD 0x06020 /* 965+ only */
#define DPLL_MD(pipe) _PIPE(pipe, DPLL_A_MD, DPLL_B_MD)
#define FPA0 0x06040
#define FPA1 0x06044
#define FPB0 0x06048
#define FPB1 0x0604c
#define FP0(pipe) _PIPE(pipe, FPA0, FPB0)
#define FP1(pipe) _PIPE(pipe, FPA1, FPB1)
#define _DPLL_B_MD 0x06020 /* 965+ only */
#define DPLL_MD(pipe) _PIPE(pipe, _DPLL_A_MD, _DPLL_B_MD)
#define _FPA0 0x06040
#define _FPA1 0x06044
#define _FPB0 0x06048
#define _FPB1 0x0604c
#define FP0(pipe) _PIPE(pipe, _FPA0, _FPB0)
#define FP1(pipe) _PIPE(pipe, _FPA1, _FPB1)
#define FP_N_DIV_MASK 0x003f0000
#define FP_N_PINEVIEW_DIV_MASK 0x00ff0000
#define FP_N_DIV_SHIFT 16
@ -962,8 +965,9 @@
* Palette regs
*/
#define PALETTE_A 0x0a000
#define PALETTE_B 0x0a800
#define _PALETTE_A 0x0a000
#define _PALETTE_B 0x0a800
#define PALETTE(pipe) _PIPE(pipe, _PALETTE_A, _PALETTE_B)
/* MCH MMIO space */
@ -1267,32 +1271,32 @@
*/
/* Pipe A timing regs */
#define HTOTAL_A 0x60000
#define HBLANK_A 0x60004
#define HSYNC_A 0x60008
#define VTOTAL_A 0x6000c
#define VBLANK_A 0x60010
#define VSYNC_A 0x60014
#define PIPEASRC 0x6001c
#define BCLRPAT_A 0x60020
#define _HTOTAL_A 0x60000
#define _HBLANK_A 0x60004
#define _HSYNC_A 0x60008
#define _VTOTAL_A 0x6000c
#define _VBLANK_A 0x60010
#define _VSYNC_A 0x60014
#define _PIPEASRC 0x6001c
#define _BCLRPAT_A 0x60020
/* Pipe B timing regs */
#define HTOTAL_B 0x61000
#define HBLANK_B 0x61004
#define HSYNC_B 0x61008
#define VTOTAL_B 0x6100c
#define VBLANK_B 0x61010
#define VSYNC_B 0x61014
#define PIPEBSRC 0x6101c
#define BCLRPAT_B 0x61020
#define _HTOTAL_B 0x61000
#define _HBLANK_B 0x61004
#define _HSYNC_B 0x61008
#define _VTOTAL_B 0x6100c
#define _VBLANK_B 0x61010
#define _VSYNC_B 0x61014
#define _PIPEBSRC 0x6101c
#define _BCLRPAT_B 0x61020
#define HTOTAL(pipe) _PIPE(pipe, HTOTAL_A, HTOTAL_B)
#define HBLANK(pipe) _PIPE(pipe, HBLANK_A, HBLANK_B)
#define HSYNC(pipe) _PIPE(pipe, HSYNC_A, HSYNC_B)
#define VTOTAL(pipe) _PIPE(pipe, VTOTAL_A, VTOTAL_B)
#define VBLANK(pipe) _PIPE(pipe, VBLANK_A, VBLANK_B)
#define VSYNC(pipe) _PIPE(pipe, VSYNC_A, VSYNC_B)
#define BCLRPAT(pipe) _PIPE(pipe, BCLRPAT_A, BCLRPAT_B)
#define HTOTAL(pipe) _PIPE(pipe, _HTOTAL_A, _HTOTAL_B)
#define HBLANK(pipe) _PIPE(pipe, _HBLANK_A, _HBLANK_B)
#define HSYNC(pipe) _PIPE(pipe, _HSYNC_A, _HSYNC_B)
#define VTOTAL(pipe) _PIPE(pipe, _VTOTAL_A, _VTOTAL_B)
#define VBLANK(pipe) _PIPE(pipe, _VBLANK_A, _VBLANK_B)
#define VSYNC(pipe) _PIPE(pipe, _VSYNC_A, _VSYNC_B)
#define BCLRPAT(pipe) _PIPE(pipe, _BCLRPAT_A, _BCLRPAT_B)
/* VGA port control */
#define ADPA 0x61100
@ -1386,6 +1390,7 @@
#define SDVO_ENCODING_HDMI (0x2 << 10)
/** Requird for HDMI operation */
#define SDVO_NULL_PACKETS_DURING_VSYNC (1 << 9)
#define SDVO_COLOR_RANGE_16_235 (1 << 8)
#define SDVO_BORDER_ENABLE (1 << 7)
#define SDVO_AUDIO_ENABLE (1 << 6)
/** New with 965, default is to be set */
@ -1441,8 +1446,13 @@
#define LVDS_PORT_EN (1 << 31)
/* Selects pipe B for LVDS data. Must be set on pre-965. */
#define LVDS_PIPEB_SELECT (1 << 30)
#define LVDS_PIPE_MASK (1 << 30)
/* LVDS dithering flag on 965/g4x platform */
#define LVDS_ENABLE_DITHER (1 << 25)
/* LVDS sync polarity flags. Set to invert (i.e. negative) */
#define LVDS_VSYNC_POLARITY (1 << 21)
#define LVDS_HSYNC_POLARITY (1 << 20)
/* Enable border for unscaled (or aspect-scaled) display */
#define LVDS_BORDER_ENABLE (1 << 15)
/*
@ -1476,6 +1486,9 @@
#define LVDS_B0B3_POWER_DOWN (0 << 2)
#define LVDS_B0B3_POWER_UP (3 << 2)
#define LVDS_PIPE_ENABLED(V, P) \
(((V) & (LVDS_PIPE_MASK | LVDS_PORT_EN)) == ((P) << 30 | LVDS_PORT_EN))
/* Video Data Island Packet control */
#define VIDEO_DIP_DATA 0x61178
#define VIDEO_DIP_CTL 0x61170
@ -2064,6 +2077,10 @@
#define DP_PORT_EN (1 << 31)
#define DP_PIPEB_SELECT (1 << 30)
#define DP_PIPE_MASK (1 << 30)
#define DP_PIPE_ENABLED(V, P) \
(((V) & (DP_PIPE_MASK | DP_PORT_EN)) == ((P) << 30 | DP_PORT_EN))
/* Link training mode - select a suitable mode for each stage */
#define DP_LINK_TRAIN_PAT_1 (0 << 28)
@ -2206,8 +2223,8 @@
* which is after the LUTs, so we want the bytes for our color format.
* For our current usage, this is always 3, one byte for R, G and B.
*/
#define PIPEA_GMCH_DATA_M 0x70050
#define PIPEB_GMCH_DATA_M 0x71050
#define _PIPEA_GMCH_DATA_M 0x70050
#define _PIPEB_GMCH_DATA_M 0x71050
/* Transfer unit size for display port - 1, default is 0x3f (for TU size 64) */
#define PIPE_GMCH_DATA_M_TU_SIZE_MASK (0x3f << 25)
@ -2215,8 +2232,8 @@
#define PIPE_GMCH_DATA_M_MASK (0xffffff)
#define PIPEA_GMCH_DATA_N 0x70054
#define PIPEB_GMCH_DATA_N 0x71054
#define _PIPEA_GMCH_DATA_N 0x70054
#define _PIPEB_GMCH_DATA_N 0x71054
#define PIPE_GMCH_DATA_N_MASK (0xffffff)
/*
@ -2230,20 +2247,25 @@
* Attributes and VB-ID.
*/
#define PIPEA_DP_LINK_M 0x70060
#define PIPEB_DP_LINK_M 0x71060
#define _PIPEA_DP_LINK_M 0x70060
#define _PIPEB_DP_LINK_M 0x71060
#define PIPEA_DP_LINK_M_MASK (0xffffff)
#define PIPEA_DP_LINK_N 0x70064
#define PIPEB_DP_LINK_N 0x71064
#define _PIPEA_DP_LINK_N 0x70064
#define _PIPEB_DP_LINK_N 0x71064
#define PIPEA_DP_LINK_N_MASK (0xffffff)
#define PIPE_GMCH_DATA_M(pipe) _PIPE(pipe, _PIPEA_GMCH_DATA_M, _PIPEB_GMCH_DATA_M)
#define PIPE_GMCH_DATA_N(pipe) _PIPE(pipe, _PIPEA_GMCH_DATA_N, _PIPEB_GMCH_DATA_N)
#define PIPE_DP_LINK_M(pipe) _PIPE(pipe, _PIPEA_DP_LINK_M, _PIPEB_DP_LINK_M)
#define PIPE_DP_LINK_N(pipe) _PIPE(pipe, _PIPEA_DP_LINK_N, _PIPEB_DP_LINK_N)
/* Display & cursor control */
/* Pipe A */
#define PIPEADSL 0x70000
#define _PIPEADSL 0x70000
#define DSL_LINEMASK 0x00000fff
#define PIPEACONF 0x70008
#define _PIPEACONF 0x70008
#define PIPECONF_ENABLE (1<<31)
#define PIPECONF_DISABLE 0
#define PIPECONF_DOUBLE_WIDE (1<<30)
@ -2269,7 +2291,7 @@
#define PIPECONF_DITHER_TYPE_ST1 (1<<2)
#define PIPECONF_DITHER_TYPE_ST2 (2<<2)
#define PIPECONF_DITHER_TYPE_TEMP (3<<2)
#define PIPEASTAT 0x70024
#define _PIPEASTAT 0x70024
#define PIPE_FIFO_UNDERRUN_STATUS (1UL<<31)
#define PIPE_CRC_ERROR_ENABLE (1UL<<29)
#define PIPE_CRC_DONE_ENABLE (1UL<<28)
@ -2305,10 +2327,12 @@
#define PIPE_6BPC (2 << 5)
#define PIPE_12BPC (3 << 5)
#define PIPESRC(pipe) _PIPE(pipe, PIPEASRC, PIPEBSRC)
#define PIPECONF(pipe) _PIPE(pipe, PIPEACONF, PIPEBCONF)
#define PIPEDSL(pipe) _PIPE(pipe, PIPEADSL, PIPEBDSL)
#define PIPEFRAMEPIXEL(pipe) _PIPE(pipe, PIPEAFRAMEPIXEL, PIPEBFRAMEPIXEL)
#define PIPESRC(pipe) _PIPE(pipe, _PIPEASRC, _PIPEBSRC)
#define PIPECONF(pipe) _PIPE(pipe, _PIPEACONF, _PIPEBCONF)
#define PIPEDSL(pipe) _PIPE(pipe, _PIPEADSL, _PIPEBDSL)
#define PIPEFRAME(pipe) _PIPE(pipe, _PIPEAFRAMEHIGH, _PIPEBFRAMEHIGH)
#define PIPEFRAMEPIXEL(pipe) _PIPE(pipe, _PIPEAFRAMEPIXEL, _PIPEBFRAMEPIXEL)
#define PIPESTAT(pipe) _PIPE(pipe, _PIPEASTAT, _PIPEBSTAT)
#define DSPARB 0x70030
#define DSPARB_CSTART_MASK (0x7f << 7)
@ -2470,20 +2494,21 @@
* } while (high1 != high2);
* frame = (high1 << 8) | low1;
*/
#define PIPEAFRAMEHIGH 0x70040
#define _PIPEAFRAMEHIGH 0x70040
#define PIPE_FRAME_HIGH_MASK 0x0000ffff
#define PIPE_FRAME_HIGH_SHIFT 0
#define PIPEAFRAMEPIXEL 0x70044
#define _PIPEAFRAMEPIXEL 0x70044
#define PIPE_FRAME_LOW_MASK 0xff000000
#define PIPE_FRAME_LOW_SHIFT 24
#define PIPE_PIXEL_MASK 0x00ffffff
#define PIPE_PIXEL_SHIFT 0
/* GM45+ just has to be different */
#define PIPEA_FRMCOUNT_GM45 0x70040
#define PIPEA_FLIPCOUNT_GM45 0x70044
#define _PIPEA_FRMCOUNT_GM45 0x70040
#define _PIPEA_FLIPCOUNT_GM45 0x70044
#define PIPE_FRMCOUNT_GM45(pipe) _PIPE(pipe, _PIPEA_FRMCOUNT_GM45, _PIPEB_FRMCOUNT_GM45)
/* Cursor A & B regs */
#define CURACNTR 0x70080
#define _CURACNTR 0x70080
/* Old style CUR*CNTR flags (desktop 8xx) */
#define CURSOR_ENABLE 0x80000000
#define CURSOR_GAMMA_ENABLE 0x40000000
@ -2504,23 +2529,23 @@
#define MCURSOR_PIPE_A 0x00
#define MCURSOR_PIPE_B (1 << 28)
#define MCURSOR_GAMMA_ENABLE (1 << 26)
#define CURABASE 0x70084
#define CURAPOS 0x70088
#define _CURABASE 0x70084
#define _CURAPOS 0x70088
#define CURSOR_POS_MASK 0x007FF
#define CURSOR_POS_SIGN 0x8000
#define CURSOR_X_SHIFT 0
#define CURSOR_Y_SHIFT 16
#define CURSIZE 0x700a0
#define CURBCNTR 0x700c0
#define CURBBASE 0x700c4
#define CURBPOS 0x700c8
#define _CURBCNTR 0x700c0
#define _CURBBASE 0x700c4
#define _CURBPOS 0x700c8
#define CURCNTR(pipe) _PIPE(pipe, CURACNTR, CURBCNTR)
#define CURBASE(pipe) _PIPE(pipe, CURABASE, CURBBASE)
#define CURPOS(pipe) _PIPE(pipe, CURAPOS, CURBPOS)
#define CURCNTR(pipe) _PIPE(pipe, _CURACNTR, _CURBCNTR)
#define CURBASE(pipe) _PIPE(pipe, _CURABASE, _CURBBASE)
#define CURPOS(pipe) _PIPE(pipe, _CURAPOS, _CURBPOS)
/* Display A control */
#define DSPACNTR 0x70180
#define _DSPACNTR 0x70180
#define DISPLAY_PLANE_ENABLE (1<<31)
#define DISPLAY_PLANE_DISABLE 0
#define DISPPLANE_GAMMA_ENABLE (1<<30)
@ -2534,9 +2559,10 @@
#define DISPPLANE_32BPP_30BIT_NO_ALPHA (0xa<<26)
#define DISPPLANE_STEREO_ENABLE (1<<25)
#define DISPPLANE_STEREO_DISABLE 0
#define DISPPLANE_SEL_PIPE_MASK (1<<24)
#define DISPPLANE_SEL_PIPE_SHIFT 24
#define DISPPLANE_SEL_PIPE_MASK (3<<DISPPLANE_SEL_PIPE_SHIFT)
#define DISPPLANE_SEL_PIPE_A 0
#define DISPPLANE_SEL_PIPE_B (1<<24)
#define DISPPLANE_SEL_PIPE_B (1<<DISPPLANE_SEL_PIPE_SHIFT)
#define DISPPLANE_SRC_KEY_ENABLE (1<<22)
#define DISPPLANE_SRC_KEY_DISABLE 0
#define DISPPLANE_LINE_DOUBLE (1<<20)
@ -2545,20 +2571,20 @@
#define DISPPLANE_STEREO_POLARITY_SECOND (1<<18)
#define DISPPLANE_TRICKLE_FEED_DISABLE (1<<14) /* Ironlake */
#define DISPPLANE_TILED (1<<10)
#define DSPAADDR 0x70184
#define DSPASTRIDE 0x70188
#define DSPAPOS 0x7018C /* reserved */
#define DSPASIZE 0x70190
#define DSPASURF 0x7019C /* 965+ only */
#define DSPATILEOFF 0x701A4 /* 965+ only */
#define _DSPAADDR 0x70184
#define _DSPASTRIDE 0x70188
#define _DSPAPOS 0x7018C /* reserved */
#define _DSPASIZE 0x70190
#define _DSPASURF 0x7019C /* 965+ only */
#define _DSPATILEOFF 0x701A4 /* 965+ only */
#define DSPCNTR(plane) _PIPE(plane, DSPACNTR, DSPBCNTR)
#define DSPADDR(plane) _PIPE(plane, DSPAADDR, DSPBADDR)
#define DSPSTRIDE(plane) _PIPE(plane, DSPASTRIDE, DSPBSTRIDE)
#define DSPPOS(plane) _PIPE(plane, DSPAPOS, DSPBPOS)
#define DSPSIZE(plane) _PIPE(plane, DSPASIZE, DSPBSIZE)
#define DSPSURF(plane) _PIPE(plane, DSPASURF, DSPBSURF)
#define DSPTILEOFF(plane) _PIPE(plane, DSPATILEOFF, DSPBTILEOFF)
#define DSPCNTR(plane) _PIPE(plane, _DSPACNTR, _DSPBCNTR)
#define DSPADDR(plane) _PIPE(plane, _DSPAADDR, _DSPBADDR)
#define DSPSTRIDE(plane) _PIPE(plane, _DSPASTRIDE, _DSPBSTRIDE)
#define DSPPOS(plane) _PIPE(plane, _DSPAPOS, _DSPBPOS)
#define DSPSIZE(plane) _PIPE(plane, _DSPASIZE, _DSPBSIZE)
#define DSPSURF(plane) _PIPE(plane, _DSPASURF, _DSPBSURF)
#define DSPTILEOFF(plane) _PIPE(plane, _DSPATILEOFF, _DSPBTILEOFF)
/* VBIOS flags */
#define SWF00 0x71410
@ -2576,27 +2602,27 @@
#define SWF32 0x7241c
/* Pipe B */
#define PIPEBDSL 0x71000
#define PIPEBCONF 0x71008
#define PIPEBSTAT 0x71024
#define PIPEBFRAMEHIGH 0x71040
#define PIPEBFRAMEPIXEL 0x71044
#define PIPEB_FRMCOUNT_GM45 0x71040
#define PIPEB_FLIPCOUNT_GM45 0x71044
#define _PIPEBDSL 0x71000
#define _PIPEBCONF 0x71008
#define _PIPEBSTAT 0x71024
#define _PIPEBFRAMEHIGH 0x71040
#define _PIPEBFRAMEPIXEL 0x71044
#define _PIPEB_FRMCOUNT_GM45 0x71040
#define _PIPEB_FLIPCOUNT_GM45 0x71044
/* Display B control */
#define DSPBCNTR 0x71180
#define _DSPBCNTR 0x71180
#define DISPPLANE_ALPHA_TRANS_ENABLE (1<<15)
#define DISPPLANE_ALPHA_TRANS_DISABLE 0
#define DISPPLANE_SPRITE_ABOVE_DISPLAY 0
#define DISPPLANE_SPRITE_ABOVE_OVERLAY (1)
#define DSPBADDR 0x71184
#define DSPBSTRIDE 0x71188
#define DSPBPOS 0x7118C
#define DSPBSIZE 0x71190
#define DSPBSURF 0x7119C
#define DSPBTILEOFF 0x711A4
#define _DSPBADDR 0x71184
#define _DSPBSTRIDE 0x71188
#define _DSPBPOS 0x7118C
#define _DSPBSIZE 0x71190
#define _DSPBSURF 0x7119C
#define _DSPBTILEOFF 0x711A4
/* VBIOS regs */
#define VGACNTRL 0x71400
@ -2650,68 +2676,80 @@
#define FDI_PLL_FREQ_DISABLE_COUNT_LIMIT_MASK 0xff
#define PIPEA_DATA_M1 0x60030
#define _PIPEA_DATA_M1 0x60030
#define TU_SIZE(x) (((x)-1) << 25) /* default size 64 */
#define TU_SIZE_MASK 0x7e000000
#define PIPE_DATA_M1_OFFSET 0
#define PIPEA_DATA_N1 0x60034
#define _PIPEA_DATA_N1 0x60034
#define PIPE_DATA_N1_OFFSET 0
#define PIPEA_DATA_M2 0x60038
#define _PIPEA_DATA_M2 0x60038
#define PIPE_DATA_M2_OFFSET 0
#define PIPEA_DATA_N2 0x6003c
#define _PIPEA_DATA_N2 0x6003c
#define PIPE_DATA_N2_OFFSET 0
#define PIPEA_LINK_M1 0x60040
#define _PIPEA_LINK_M1 0x60040
#define PIPE_LINK_M1_OFFSET 0
#define PIPEA_LINK_N1 0x60044
#define _PIPEA_LINK_N1 0x60044
#define PIPE_LINK_N1_OFFSET 0
#define PIPEA_LINK_M2 0x60048
#define _PIPEA_LINK_M2 0x60048
#define PIPE_LINK_M2_OFFSET 0
#define PIPEA_LINK_N2 0x6004c
#define _PIPEA_LINK_N2 0x6004c
#define PIPE_LINK_N2_OFFSET 0
/* PIPEB timing regs are same start from 0x61000 */
#define PIPEB_DATA_M1 0x61030
#define PIPEB_DATA_N1 0x61034
#define _PIPEB_DATA_M1 0x61030
#define _PIPEB_DATA_N1 0x61034
#define PIPEB_DATA_M2 0x61038
#define PIPEB_DATA_N2 0x6103c
#define _PIPEB_DATA_M2 0x61038
#define _PIPEB_DATA_N2 0x6103c
#define PIPEB_LINK_M1 0x61040
#define PIPEB_LINK_N1 0x61044
#define _PIPEB_LINK_M1 0x61040
#define _PIPEB_LINK_N1 0x61044
#define PIPEB_LINK_M2 0x61048
#define PIPEB_LINK_N2 0x6104c
#define _PIPEB_LINK_M2 0x61048
#define _PIPEB_LINK_N2 0x6104c
#define PIPE_DATA_M1(pipe) _PIPE(pipe, PIPEA_DATA_M1, PIPEB_DATA_M1)
#define PIPE_DATA_N1(pipe) _PIPE(pipe, PIPEA_DATA_N1, PIPEB_DATA_N1)
#define PIPE_DATA_M2(pipe) _PIPE(pipe, PIPEA_DATA_M2, PIPEB_DATA_M2)
#define PIPE_DATA_N2(pipe) _PIPE(pipe, PIPEA_DATA_N2, PIPEB_DATA_N2)
#define PIPE_LINK_M1(pipe) _PIPE(pipe, PIPEA_LINK_M1, PIPEB_LINK_M1)
#define PIPE_LINK_N1(pipe) _PIPE(pipe, PIPEA_LINK_N1, PIPEB_LINK_N1)
#define PIPE_LINK_M2(pipe) _PIPE(pipe, PIPEA_LINK_M2, PIPEB_LINK_M2)
#define PIPE_LINK_N2(pipe) _PIPE(pipe, PIPEA_LINK_N2, PIPEB_LINK_N2)
#define PIPE_DATA_M1(pipe) _PIPE(pipe, _PIPEA_DATA_M1, _PIPEB_DATA_M1)
#define PIPE_DATA_N1(pipe) _PIPE(pipe, _PIPEA_DATA_N1, _PIPEB_DATA_N1)
#define PIPE_DATA_M2(pipe) _PIPE(pipe, _PIPEA_DATA_M2, _PIPEB_DATA_M2)
#define PIPE_DATA_N2(pipe) _PIPE(pipe, _PIPEA_DATA_N2, _PIPEB_DATA_N2)
#define PIPE_LINK_M1(pipe) _PIPE(pipe, _PIPEA_LINK_M1, _PIPEB_LINK_M1)
#define PIPE_LINK_N1(pipe) _PIPE(pipe, _PIPEA_LINK_N1, _PIPEB_LINK_N1)
#define PIPE_LINK_M2(pipe) _PIPE(pipe, _PIPEA_LINK_M2, _PIPEB_LINK_M2)
#define PIPE_LINK_N2(pipe) _PIPE(pipe, _PIPEA_LINK_N2, _PIPEB_LINK_N2)
/* CPU panel fitter */
#define PFA_CTL_1 0x68080
#define PFB_CTL_1 0x68880
/* IVB+ has 3 fitters, 0 is 7x5 capable, the other two only 3x3 */
#define _PFA_CTL_1 0x68080
#define _PFB_CTL_1 0x68880
#define PF_ENABLE (1<<31)
#define PF_FILTER_MASK (3<<23)
#define PF_FILTER_PROGRAMMED (0<<23)
#define PF_FILTER_MED_3x3 (1<<23)
#define PF_FILTER_EDGE_ENHANCE (2<<23)
#define PF_FILTER_EDGE_SOFTEN (3<<23)
#define PFA_WIN_SZ 0x68074
#define PFB_WIN_SZ 0x68874
#define PFA_WIN_POS 0x68070
#define PFB_WIN_POS 0x68870
#define _PFA_WIN_SZ 0x68074
#define _PFB_WIN_SZ 0x68874
#define _PFA_WIN_POS 0x68070
#define _PFB_WIN_POS 0x68870
#define _PFA_VSCALE 0x68084
#define _PFB_VSCALE 0x68884
#define _PFA_HSCALE 0x68090
#define _PFB_HSCALE 0x68890
#define PF_CTL(pipe) _PIPE(pipe, _PFA_CTL_1, _PFB_CTL_1)
#define PF_WIN_SZ(pipe) _PIPE(pipe, _PFA_WIN_SZ, _PFB_WIN_SZ)
#define PF_WIN_POS(pipe) _PIPE(pipe, _PFA_WIN_POS, _PFB_WIN_POS)
#define PF_VSCALE(pipe) _PIPE(pipe, _PFA_VSCALE, _PFB_VSCALE)
#define PF_HSCALE(pipe) _PIPE(pipe, _PFA_HSCALE, _PFB_HSCALE)
/* legacy palette */
#define LGC_PALETTE_A 0x4a000
#define LGC_PALETTE_B 0x4a800
#define _LGC_PALETTE_A 0x4a000
#define _LGC_PALETTE_B 0x4a800
#define LGC_PALETTE(pipe) _PIPE(pipe, _LGC_PALETTE_A, _LGC_PALETTE_B)
/* interrupts */
#define DE_MASTER_IRQ_CONTROL (1 << 31)
@ -2877,17 +2915,17 @@
#define PCH_GMBUS4 0xc5110
#define PCH_GMBUS5 0xc5120
#define PCH_DPLL_A 0xc6014
#define PCH_DPLL_B 0xc6018
#define PCH_DPLL(pipe) _PIPE(pipe, PCH_DPLL_A, PCH_DPLL_B)
#define _PCH_DPLL_A 0xc6014
#define _PCH_DPLL_B 0xc6018
#define PCH_DPLL(pipe) _PIPE(pipe, _PCH_DPLL_A, _PCH_DPLL_B)
#define PCH_FPA0 0xc6040
#define _PCH_FPA0 0xc6040
#define FP_CB_TUNE (0x3<<22)
#define PCH_FPA1 0xc6044
#define PCH_FPB0 0xc6048
#define PCH_FPB1 0xc604c
#define PCH_FP0(pipe) _PIPE(pipe, PCH_FPA0, PCH_FPB0)
#define PCH_FP1(pipe) _PIPE(pipe, PCH_FPA1, PCH_FPB1)
#define _PCH_FPA1 0xc6044
#define _PCH_FPB0 0xc6048
#define _PCH_FPB1 0xc604c
#define PCH_FP0(pipe) _PIPE(pipe, _PCH_FPA0, _PCH_FPB0)
#define PCH_FP1(pipe) _PIPE(pipe, _PCH_FPA1, _PCH_FPB1)
#define PCH_DPLL_TEST 0xc606c
@ -2906,6 +2944,7 @@
#define DREF_NONSPREAD_SOURCE_MASK (3<<9)
#define DREF_SUPERSPREAD_SOURCE_DISABLE (0<<7)
#define DREF_SUPERSPREAD_SOURCE_ENABLE (2<<7)
#define DREF_SUPERSPREAD_SOURCE_MASK (3<<7)
#define DREF_SSC4_DOWNSPREAD (0<<6)
#define DREF_SSC4_CENTERSPREAD (1<<6)
#define DREF_SSC1_DISABLE (0<<1)
@ -2938,60 +2977,69 @@
/* transcoder */
#define TRANS_HTOTAL_A 0xe0000
#define _TRANS_HTOTAL_A 0xe0000
#define TRANS_HTOTAL_SHIFT 16
#define TRANS_HACTIVE_SHIFT 0
#define TRANS_HBLANK_A 0xe0004
#define _TRANS_HBLANK_A 0xe0004
#define TRANS_HBLANK_END_SHIFT 16
#define TRANS_HBLANK_START_SHIFT 0
#define TRANS_HSYNC_A 0xe0008
#define _TRANS_HSYNC_A 0xe0008
#define TRANS_HSYNC_END_SHIFT 16
#define TRANS_HSYNC_START_SHIFT 0
#define TRANS_VTOTAL_A 0xe000c
#define _TRANS_VTOTAL_A 0xe000c
#define TRANS_VTOTAL_SHIFT 16
#define TRANS_VACTIVE_SHIFT 0
#define TRANS_VBLANK_A 0xe0010
#define _TRANS_VBLANK_A 0xe0010
#define TRANS_VBLANK_END_SHIFT 16
#define TRANS_VBLANK_START_SHIFT 0
#define TRANS_VSYNC_A 0xe0014
#define _TRANS_VSYNC_A 0xe0014
#define TRANS_VSYNC_END_SHIFT 16
#define TRANS_VSYNC_START_SHIFT 0
#define TRANSA_DATA_M1 0xe0030
#define TRANSA_DATA_N1 0xe0034
#define TRANSA_DATA_M2 0xe0038
#define TRANSA_DATA_N2 0xe003c
#define TRANSA_DP_LINK_M1 0xe0040
#define TRANSA_DP_LINK_N1 0xe0044
#define TRANSA_DP_LINK_M2 0xe0048
#define TRANSA_DP_LINK_N2 0xe004c
#define _TRANSA_DATA_M1 0xe0030
#define _TRANSA_DATA_N1 0xe0034
#define _TRANSA_DATA_M2 0xe0038
#define _TRANSA_DATA_N2 0xe003c
#define _TRANSA_DP_LINK_M1 0xe0040
#define _TRANSA_DP_LINK_N1 0xe0044
#define _TRANSA_DP_LINK_M2 0xe0048
#define _TRANSA_DP_LINK_N2 0xe004c
#define TRANS_HTOTAL_B 0xe1000
#define TRANS_HBLANK_B 0xe1004
#define TRANS_HSYNC_B 0xe1008
#define TRANS_VTOTAL_B 0xe100c
#define TRANS_VBLANK_B 0xe1010
#define TRANS_VSYNC_B 0xe1014
#define _TRANS_HTOTAL_B 0xe1000
#define _TRANS_HBLANK_B 0xe1004
#define _TRANS_HSYNC_B 0xe1008
#define _TRANS_VTOTAL_B 0xe100c
#define _TRANS_VBLANK_B 0xe1010
#define _TRANS_VSYNC_B 0xe1014
#define TRANS_HTOTAL(pipe) _PIPE(pipe, TRANS_HTOTAL_A, TRANS_HTOTAL_B)
#define TRANS_HBLANK(pipe) _PIPE(pipe, TRANS_HBLANK_A, TRANS_HBLANK_B)
#define TRANS_HSYNC(pipe) _PIPE(pipe, TRANS_HSYNC_A, TRANS_HSYNC_B)
#define TRANS_VTOTAL(pipe) _PIPE(pipe, TRANS_VTOTAL_A, TRANS_VTOTAL_B)
#define TRANS_VBLANK(pipe) _PIPE(pipe, TRANS_VBLANK_A, TRANS_VBLANK_B)
#define TRANS_VSYNC(pipe) _PIPE(pipe, TRANS_VSYNC_A, TRANS_VSYNC_B)
#define TRANS_HTOTAL(pipe) _PIPE(pipe, _TRANS_HTOTAL_A, _TRANS_HTOTAL_B)
#define TRANS_HBLANK(pipe) _PIPE(pipe, _TRANS_HBLANK_A, _TRANS_HBLANK_B)
#define TRANS_HSYNC(pipe) _PIPE(pipe, _TRANS_HSYNC_A, _TRANS_HSYNC_B)
#define TRANS_VTOTAL(pipe) _PIPE(pipe, _TRANS_VTOTAL_A, _TRANS_VTOTAL_B)
#define TRANS_VBLANK(pipe) _PIPE(pipe, _TRANS_VBLANK_A, _TRANS_VBLANK_B)
#define TRANS_VSYNC(pipe) _PIPE(pipe, _TRANS_VSYNC_A, _TRANS_VSYNC_B)
#define TRANSB_DATA_M1 0xe1030
#define TRANSB_DATA_N1 0xe1034
#define TRANSB_DATA_M2 0xe1038
#define TRANSB_DATA_N2 0xe103c
#define TRANSB_DP_LINK_M1 0xe1040
#define TRANSB_DP_LINK_N1 0xe1044
#define TRANSB_DP_LINK_M2 0xe1048
#define TRANSB_DP_LINK_N2 0xe104c
#define _TRANSB_DATA_M1 0xe1030
#define _TRANSB_DATA_N1 0xe1034
#define _TRANSB_DATA_M2 0xe1038
#define _TRANSB_DATA_N2 0xe103c
#define _TRANSB_DP_LINK_M1 0xe1040
#define _TRANSB_DP_LINK_N1 0xe1044
#define _TRANSB_DP_LINK_M2 0xe1048
#define _TRANSB_DP_LINK_N2 0xe104c
#define TRANSACONF 0xf0008
#define TRANSBCONF 0xf1008
#define TRANSCONF(plane) _PIPE(plane, TRANSACONF, TRANSBCONF)
#define TRANSDATA_M1(pipe) _PIPE(pipe, _TRANSA_DATA_M1, _TRANSB_DATA_M1)
#define TRANSDATA_N1(pipe) _PIPE(pipe, _TRANSA_DATA_N1, _TRANSB_DATA_N1)
#define TRANSDATA_M2(pipe) _PIPE(pipe, _TRANSA_DATA_M2, _TRANSB_DATA_M2)
#define TRANSDATA_N2(pipe) _PIPE(pipe, _TRANSA_DATA_N2, _TRANSB_DATA_N2)
#define TRANSDPLINK_M1(pipe) _PIPE(pipe, _TRANSA_DP_LINK_M1, _TRANSB_DP_LINK_M1)
#define TRANSDPLINK_N1(pipe) _PIPE(pipe, _TRANSA_DP_LINK_N1, _TRANSB_DP_LINK_N1)
#define TRANSDPLINK_M2(pipe) _PIPE(pipe, _TRANSA_DP_LINK_M2, _TRANSB_DP_LINK_M2)
#define TRANSDPLINK_N2(pipe) _PIPE(pipe, _TRANSA_DP_LINK_N2, _TRANSB_DP_LINK_N2)
#define _TRANSACONF 0xf0008
#define _TRANSBCONF 0xf1008
#define TRANSCONF(plane) _PIPE(plane, _TRANSACONF, _TRANSBCONF)
#define TRANS_DISABLE (0<<31)
#define TRANS_ENABLE (1<<31)
#define TRANS_STATE_MASK (1<<30)
@ -3009,18 +3057,19 @@
#define TRANS_6BPC (2<<5)
#define TRANS_12BPC (3<<5)
#define FDI_RXA_CHICKEN 0xc200c
#define FDI_RXB_CHICKEN 0xc2010
#define FDI_RX_PHASE_SYNC_POINTER_ENABLE (1)
#define FDI_RX_CHICKEN(pipe) _PIPE(pipe, FDI_RXA_CHICKEN, FDI_RXB_CHICKEN)
#define _FDI_RXA_CHICKEN 0xc200c
#define _FDI_RXB_CHICKEN 0xc2010
#define FDI_RX_PHASE_SYNC_POINTER_OVR (1<<1)
#define FDI_RX_PHASE_SYNC_POINTER_EN (1<<0)
#define FDI_RX_CHICKEN(pipe) _PIPE(pipe, _FDI_RXA_CHICKEN, _FDI_RXB_CHICKEN)
#define SOUTH_DSPCLK_GATE_D 0xc2020
#define PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1<<29)
/* CPU: FDI_TX */
#define FDI_TXA_CTL 0x60100
#define FDI_TXB_CTL 0x61100
#define FDI_TX_CTL(pipe) _PIPE(pipe, FDI_TXA_CTL, FDI_TXB_CTL)
#define _FDI_TXA_CTL 0x60100
#define _FDI_TXB_CTL 0x61100
#define FDI_TX_CTL(pipe) _PIPE(pipe, _FDI_TXA_CTL, _FDI_TXB_CTL)
#define FDI_TX_DISABLE (0<<31)
#define FDI_TX_ENABLE (1<<31)
#define FDI_LINK_TRAIN_PATTERN_1 (0<<28)
@ -3060,9 +3109,9 @@
#define FDI_SCRAMBLING_DISABLE (1<<7)
/* FDI_RX, FDI_X is hard-wired to Transcoder_X */
#define FDI_RXA_CTL 0xf000c
#define FDI_RXB_CTL 0xf100c
#define FDI_RX_CTL(pipe) _PIPE(pipe, FDI_RXA_CTL, FDI_RXB_CTL)
#define _FDI_RXA_CTL 0xf000c
#define _FDI_RXB_CTL 0xf100c
#define FDI_RX_CTL(pipe) _PIPE(pipe, _FDI_RXA_CTL, _FDI_RXB_CTL)
#define FDI_RX_ENABLE (1<<31)
/* train, dp width same as FDI_TX */
#define FDI_DP_PORT_WIDTH_X8 (7<<19)
@ -3087,15 +3136,15 @@
#define FDI_LINK_TRAIN_NORMAL_CPT (3<<8)
#define FDI_LINK_TRAIN_PATTERN_MASK_CPT (3<<8)
#define FDI_RXA_MISC 0xf0010
#define FDI_RXB_MISC 0xf1010
#define FDI_RXA_TUSIZE1 0xf0030
#define FDI_RXA_TUSIZE2 0xf0038
#define FDI_RXB_TUSIZE1 0xf1030
#define FDI_RXB_TUSIZE2 0xf1038
#define FDI_RX_MISC(pipe) _PIPE(pipe, FDI_RXA_MISC, FDI_RXB_MISC)
#define FDI_RX_TUSIZE1(pipe) _PIPE(pipe, FDI_RXA_TUSIZE1, FDI_RXB_TUSIZE1)
#define FDI_RX_TUSIZE2(pipe) _PIPE(pipe, FDI_RXA_TUSIZE2, FDI_RXB_TUSIZE2)
#define _FDI_RXA_MISC 0xf0010
#define _FDI_RXB_MISC 0xf1010
#define _FDI_RXA_TUSIZE1 0xf0030
#define _FDI_RXA_TUSIZE2 0xf0038
#define _FDI_RXB_TUSIZE1 0xf1030
#define _FDI_RXB_TUSIZE2 0xf1038
#define FDI_RX_MISC(pipe) _PIPE(pipe, _FDI_RXA_MISC, _FDI_RXB_MISC)
#define FDI_RX_TUSIZE1(pipe) _PIPE(pipe, _FDI_RXA_TUSIZE1, _FDI_RXB_TUSIZE1)
#define FDI_RX_TUSIZE2(pipe) _PIPE(pipe, _FDI_RXA_TUSIZE2, _FDI_RXB_TUSIZE2)
/* FDI_RX interrupt register format */
#define FDI_RX_INTER_LANE_ALIGN (1<<10)
@ -3110,12 +3159,12 @@
#define FDI_RX_CROSS_CLOCK_OVERFLOW (1<<1)
#define FDI_RX_SYMBOL_QUEUE_OVERFLOW (1<<0)
#define FDI_RXA_IIR 0xf0014
#define FDI_RXA_IMR 0xf0018
#define FDI_RXB_IIR 0xf1014
#define FDI_RXB_IMR 0xf1018
#define FDI_RX_IIR(pipe) _PIPE(pipe, FDI_RXA_IIR, FDI_RXB_IIR)
#define FDI_RX_IMR(pipe) _PIPE(pipe, FDI_RXA_IMR, FDI_RXB_IMR)
#define _FDI_RXA_IIR 0xf0014
#define _FDI_RXA_IMR 0xf0018
#define _FDI_RXB_IIR 0xf1014
#define _FDI_RXB_IMR 0xf1018
#define FDI_RX_IIR(pipe) _PIPE(pipe, _FDI_RXA_IIR, _FDI_RXB_IIR)
#define FDI_RX_IMR(pipe) _PIPE(pipe, _FDI_RXA_IMR, _FDI_RXB_IMR)
#define FDI_PLL_CTL_1 0xfe000
#define FDI_PLL_CTL_2 0xfe004
@ -3145,11 +3194,15 @@
#define ADPA_CRT_HOTPLUG_VOLREF_475MV (1<<17)
#define ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16)
#define ADPA_PIPE_ENABLED(V, P) \
(((V) & (ADPA_TRANS_SELECT_MASK | ADPA_DAC_ENABLE)) == ((P) << 30 | ADPA_DAC_ENABLE))
/* or SDVOB */
#define HDMIB 0xe1140
#define PORT_ENABLE (1 << 31)
#define TRANSCODER_A (0)
#define TRANSCODER_B (1 << 30)
#define TRANSCODER_MASK (1 << 30)
#define COLOR_FORMAT_8bpc (0)
#define COLOR_FORMAT_12bpc (3 << 26)
#define SDVOB_HOTPLUG_ENABLE (1 << 23)
@ -3165,6 +3218,9 @@
#define HSYNC_ACTIVE_HIGH (1 << 3)
#define PORT_DETECTED (1 << 2)
#define HDMI_PIPE_ENABLED(V, P) \
(((V) & (TRANSCODER_MASK | PORT_ENABLE)) == ((P) << 30 | PORT_ENABLE))
/* PCH SDVOB multiplex with HDMIB */
#define PCH_SDVOB HDMIB
@ -3240,6 +3296,7 @@
#define TRANS_DP_PORT_SEL_B (0<<29)
#define TRANS_DP_PORT_SEL_C (1<<29)
#define TRANS_DP_PORT_SEL_D (2<<29)
#define TRANS_DP_PORT_SEL_NONE (3<<29)
#define TRANS_DP_PORT_SEL_MASK (3<<29)
#define TRANS_DP_AUDIO_ONLY (1<<26)
#define TRANS_DP_ENH_FRAMING (1<<18)
@ -3290,15 +3347,28 @@
#define GEN6_RP_DOWN_TIMEOUT 0xA010
#define GEN6_RP_INTERRUPT_LIMITS 0xA014
#define GEN6_RPSTAT1 0xA01C
#define GEN6_CAGF_SHIFT 8
#define GEN6_CAGF_MASK (0x7f << GEN6_CAGF_SHIFT)
#define GEN6_RP_CONTROL 0xA024
#define GEN6_RP_MEDIA_TURBO (1<<11)
#define GEN6_RP_USE_NORMAL_FREQ (1<<9)
#define GEN6_RP_MEDIA_IS_GFX (1<<8)
#define GEN6_RP_ENABLE (1<<7)
#define GEN6_RP_UP_BUSY_MAX (0x2<<3)
#define GEN6_RP_DOWN_BUSY_MIN (0x2<<0)
#define GEN6_RP_UP_IDLE_MIN (0x1<<3)
#define GEN6_RP_UP_BUSY_AVG (0x2<<3)
#define GEN6_RP_UP_BUSY_CONT (0x4<<3)
#define GEN6_RP_DOWN_IDLE_CONT (0x1<<0)
#define GEN6_RP_UP_THRESHOLD 0xA02C
#define GEN6_RP_DOWN_THRESHOLD 0xA030
#define GEN6_RP_CUR_UP_EI 0xA050
#define GEN6_CURICONT_MASK 0xffffff
#define GEN6_RP_CUR_UP 0xA054
#define GEN6_CURBSYTAVG_MASK 0xffffff
#define GEN6_RP_PREV_UP 0xA058
#define GEN6_RP_CUR_DOWN_EI 0xA05C
#define GEN6_CURIAVG_MASK 0xffffff
#define GEN6_RP_CUR_DOWN 0xA060
#define GEN6_RP_PREV_DOWN 0xA064
#define GEN6_RP_UP_EI 0xA068
#define GEN6_RP_DOWN_EI 0xA06C
#define GEN6_RP_IDLE_HYSTERSIS 0xA070

View File

@ -34,11 +34,10 @@ static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpll_reg;
if (HAS_PCH_SPLIT(dev)) {
dpll_reg = (pipe == PIPE_A) ? PCH_DPLL_A: PCH_DPLL_B;
} else {
dpll_reg = (pipe == PIPE_A) ? DPLL_A: DPLL_B;
}
if (HAS_PCH_SPLIT(dev))
dpll_reg = (pipe == PIPE_A) ? _PCH_DPLL_A : _PCH_DPLL_B;
else
dpll_reg = (pipe == PIPE_A) ? _DPLL_A : _DPLL_B;
return (I915_READ(dpll_reg) & DPLL_VCO_ENABLE);
}
@ -46,7 +45,7 @@ static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
static void i915_save_palette(struct drm_device *dev, enum pipe pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B);
unsigned long reg = (pipe == PIPE_A ? _PALETTE_A : _PALETTE_B);
u32 *array;
int i;
@ -54,7 +53,7 @@ static void i915_save_palette(struct drm_device *dev, enum pipe pipe)
return;
if (HAS_PCH_SPLIT(dev))
reg = (pipe == PIPE_A) ? LGC_PALETTE_A : LGC_PALETTE_B;
reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B;
if (pipe == PIPE_A)
array = dev_priv->save_palette_a;
@ -68,7 +67,7 @@ static void i915_save_palette(struct drm_device *dev, enum pipe pipe)
static void i915_restore_palette(struct drm_device *dev, enum pipe pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B);
unsigned long reg = (pipe == PIPE_A ? _PALETTE_A : _PALETTE_B);
u32 *array;
int i;
@ -76,7 +75,7 @@ static void i915_restore_palette(struct drm_device *dev, enum pipe pipe)
return;
if (HAS_PCH_SPLIT(dev))
reg = (pipe == PIPE_A) ? LGC_PALETTE_A : LGC_PALETTE_B;
reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B;
if (pipe == PIPE_A)
array = dev_priv->save_palette_a;
@ -241,12 +240,12 @@ static void i915_save_modeset_reg(struct drm_device *dev)
return;
/* Cursor state */
dev_priv->saveCURACNTR = I915_READ(CURACNTR);
dev_priv->saveCURAPOS = I915_READ(CURAPOS);
dev_priv->saveCURABASE = I915_READ(CURABASE);
dev_priv->saveCURBCNTR = I915_READ(CURBCNTR);
dev_priv->saveCURBPOS = I915_READ(CURBPOS);
dev_priv->saveCURBBASE = I915_READ(CURBBASE);
dev_priv->saveCURACNTR = I915_READ(_CURACNTR);
dev_priv->saveCURAPOS = I915_READ(_CURAPOS);
dev_priv->saveCURABASE = I915_READ(_CURABASE);
dev_priv->saveCURBCNTR = I915_READ(_CURBCNTR);
dev_priv->saveCURBPOS = I915_READ(_CURBPOS);
dev_priv->saveCURBBASE = I915_READ(_CURBBASE);
if (IS_GEN2(dev))
dev_priv->saveCURSIZE = I915_READ(CURSIZE);
@ -256,118 +255,118 @@ static void i915_save_modeset_reg(struct drm_device *dev)
}
/* Pipe & plane A info */
dev_priv->savePIPEACONF = I915_READ(PIPEACONF);
dev_priv->savePIPEASRC = I915_READ(PIPEASRC);
dev_priv->savePIPEACONF = I915_READ(_PIPEACONF);
dev_priv->savePIPEASRC = I915_READ(_PIPEASRC);
if (HAS_PCH_SPLIT(dev)) {
dev_priv->saveFPA0 = I915_READ(PCH_FPA0);
dev_priv->saveFPA1 = I915_READ(PCH_FPA1);
dev_priv->saveDPLL_A = I915_READ(PCH_DPLL_A);
dev_priv->saveFPA0 = I915_READ(_PCH_FPA0);
dev_priv->saveFPA1 = I915_READ(_PCH_FPA1);
dev_priv->saveDPLL_A = I915_READ(_PCH_DPLL_A);
} else {
dev_priv->saveFPA0 = I915_READ(FPA0);
dev_priv->saveFPA1 = I915_READ(FPA1);
dev_priv->saveDPLL_A = I915_READ(DPLL_A);
dev_priv->saveFPA0 = I915_READ(_FPA0);
dev_priv->saveFPA1 = I915_READ(_FPA1);
dev_priv->saveDPLL_A = I915_READ(_DPLL_A);
}
if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
dev_priv->saveDPLL_A_MD = I915_READ(DPLL_A_MD);
dev_priv->saveHTOTAL_A = I915_READ(HTOTAL_A);
dev_priv->saveHBLANK_A = I915_READ(HBLANK_A);
dev_priv->saveHSYNC_A = I915_READ(HSYNC_A);
dev_priv->saveVTOTAL_A = I915_READ(VTOTAL_A);
dev_priv->saveVBLANK_A = I915_READ(VBLANK_A);
dev_priv->saveVSYNC_A = I915_READ(VSYNC_A);
dev_priv->saveDPLL_A_MD = I915_READ(_DPLL_A_MD);
dev_priv->saveHTOTAL_A = I915_READ(_HTOTAL_A);
dev_priv->saveHBLANK_A = I915_READ(_HBLANK_A);
dev_priv->saveHSYNC_A = I915_READ(_HSYNC_A);
dev_priv->saveVTOTAL_A = I915_READ(_VTOTAL_A);
dev_priv->saveVBLANK_A = I915_READ(_VBLANK_A);
dev_priv->saveVSYNC_A = I915_READ(_VSYNC_A);
if (!HAS_PCH_SPLIT(dev))
dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A);
dev_priv->saveBCLRPAT_A = I915_READ(_BCLRPAT_A);
if (HAS_PCH_SPLIT(dev)) {
dev_priv->savePIPEA_DATA_M1 = I915_READ(PIPEA_DATA_M1);
dev_priv->savePIPEA_DATA_N1 = I915_READ(PIPEA_DATA_N1);
dev_priv->savePIPEA_LINK_M1 = I915_READ(PIPEA_LINK_M1);
dev_priv->savePIPEA_LINK_N1 = I915_READ(PIPEA_LINK_N1);
dev_priv->savePIPEA_DATA_M1 = I915_READ(_PIPEA_DATA_M1);
dev_priv->savePIPEA_DATA_N1 = I915_READ(_PIPEA_DATA_N1);
dev_priv->savePIPEA_LINK_M1 = I915_READ(_PIPEA_LINK_M1);
dev_priv->savePIPEA_LINK_N1 = I915_READ(_PIPEA_LINK_N1);
dev_priv->saveFDI_TXA_CTL = I915_READ(FDI_TXA_CTL);
dev_priv->saveFDI_RXA_CTL = I915_READ(FDI_RXA_CTL);
dev_priv->saveFDI_TXA_CTL = I915_READ(_FDI_TXA_CTL);
dev_priv->saveFDI_RXA_CTL = I915_READ(_FDI_RXA_CTL);
dev_priv->savePFA_CTL_1 = I915_READ(PFA_CTL_1);
dev_priv->savePFA_WIN_SZ = I915_READ(PFA_WIN_SZ);
dev_priv->savePFA_WIN_POS = I915_READ(PFA_WIN_POS);
dev_priv->savePFA_CTL_1 = I915_READ(_PFA_CTL_1);
dev_priv->savePFA_WIN_SZ = I915_READ(_PFA_WIN_SZ);
dev_priv->savePFA_WIN_POS = I915_READ(_PFA_WIN_POS);
dev_priv->saveTRANSACONF = I915_READ(TRANSACONF);
dev_priv->saveTRANS_HTOTAL_A = I915_READ(TRANS_HTOTAL_A);
dev_priv->saveTRANS_HBLANK_A = I915_READ(TRANS_HBLANK_A);
dev_priv->saveTRANS_HSYNC_A = I915_READ(TRANS_HSYNC_A);
dev_priv->saveTRANS_VTOTAL_A = I915_READ(TRANS_VTOTAL_A);
dev_priv->saveTRANS_VBLANK_A = I915_READ(TRANS_VBLANK_A);
dev_priv->saveTRANS_VSYNC_A = I915_READ(TRANS_VSYNC_A);
dev_priv->saveTRANSACONF = I915_READ(_TRANSACONF);
dev_priv->saveTRANS_HTOTAL_A = I915_READ(_TRANS_HTOTAL_A);
dev_priv->saveTRANS_HBLANK_A = I915_READ(_TRANS_HBLANK_A);
dev_priv->saveTRANS_HSYNC_A = I915_READ(_TRANS_HSYNC_A);
dev_priv->saveTRANS_VTOTAL_A = I915_READ(_TRANS_VTOTAL_A);
dev_priv->saveTRANS_VBLANK_A = I915_READ(_TRANS_VBLANK_A);
dev_priv->saveTRANS_VSYNC_A = I915_READ(_TRANS_VSYNC_A);
}
dev_priv->saveDSPACNTR = I915_READ(DSPACNTR);
dev_priv->saveDSPASTRIDE = I915_READ(DSPASTRIDE);
dev_priv->saveDSPASIZE = I915_READ(DSPASIZE);
dev_priv->saveDSPAPOS = I915_READ(DSPAPOS);
dev_priv->saveDSPAADDR = I915_READ(DSPAADDR);
dev_priv->saveDSPACNTR = I915_READ(_DSPACNTR);
dev_priv->saveDSPASTRIDE = I915_READ(_DSPASTRIDE);
dev_priv->saveDSPASIZE = I915_READ(_DSPASIZE);
dev_priv->saveDSPAPOS = I915_READ(_DSPAPOS);
dev_priv->saveDSPAADDR = I915_READ(_DSPAADDR);
if (INTEL_INFO(dev)->gen >= 4) {
dev_priv->saveDSPASURF = I915_READ(DSPASURF);
dev_priv->saveDSPATILEOFF = I915_READ(DSPATILEOFF);
dev_priv->saveDSPASURF = I915_READ(_DSPASURF);
dev_priv->saveDSPATILEOFF = I915_READ(_DSPATILEOFF);
}
i915_save_palette(dev, PIPE_A);
dev_priv->savePIPEASTAT = I915_READ(PIPEASTAT);
dev_priv->savePIPEASTAT = I915_READ(_PIPEASTAT);
/* Pipe & plane B info */
dev_priv->savePIPEBCONF = I915_READ(PIPEBCONF);
dev_priv->savePIPEBSRC = I915_READ(PIPEBSRC);
dev_priv->savePIPEBCONF = I915_READ(_PIPEBCONF);
dev_priv->savePIPEBSRC = I915_READ(_PIPEBSRC);
if (HAS_PCH_SPLIT(dev)) {
dev_priv->saveFPB0 = I915_READ(PCH_FPB0);
dev_priv->saveFPB1 = I915_READ(PCH_FPB1);
dev_priv->saveDPLL_B = I915_READ(PCH_DPLL_B);
dev_priv->saveFPB0 = I915_READ(_PCH_FPB0);
dev_priv->saveFPB1 = I915_READ(_PCH_FPB1);
dev_priv->saveDPLL_B = I915_READ(_PCH_DPLL_B);
} else {
dev_priv->saveFPB0 = I915_READ(FPB0);
dev_priv->saveFPB1 = I915_READ(FPB1);
dev_priv->saveDPLL_B = I915_READ(DPLL_B);
dev_priv->saveFPB0 = I915_READ(_FPB0);
dev_priv->saveFPB1 = I915_READ(_FPB1);
dev_priv->saveDPLL_B = I915_READ(_DPLL_B);
}
if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
dev_priv->saveDPLL_B_MD = I915_READ(DPLL_B_MD);
dev_priv->saveHTOTAL_B = I915_READ(HTOTAL_B);
dev_priv->saveHBLANK_B = I915_READ(HBLANK_B);
dev_priv->saveHSYNC_B = I915_READ(HSYNC_B);
dev_priv->saveVTOTAL_B = I915_READ(VTOTAL_B);
dev_priv->saveVBLANK_B = I915_READ(VBLANK_B);
dev_priv->saveVSYNC_B = I915_READ(VSYNC_B);
dev_priv->saveDPLL_B_MD = I915_READ(_DPLL_B_MD);
dev_priv->saveHTOTAL_B = I915_READ(_HTOTAL_B);
dev_priv->saveHBLANK_B = I915_READ(_HBLANK_B);
dev_priv->saveHSYNC_B = I915_READ(_HSYNC_B);
dev_priv->saveVTOTAL_B = I915_READ(_VTOTAL_B);
dev_priv->saveVBLANK_B = I915_READ(_VBLANK_B);
dev_priv->saveVSYNC_B = I915_READ(_VSYNC_B);
if (!HAS_PCH_SPLIT(dev))
dev_priv->saveBCLRPAT_B = I915_READ(BCLRPAT_B);
dev_priv->saveBCLRPAT_B = I915_READ(_BCLRPAT_B);
if (HAS_PCH_SPLIT(dev)) {
dev_priv->savePIPEB_DATA_M1 = I915_READ(PIPEB_DATA_M1);
dev_priv->savePIPEB_DATA_N1 = I915_READ(PIPEB_DATA_N1);
dev_priv->savePIPEB_LINK_M1 = I915_READ(PIPEB_LINK_M1);
dev_priv->savePIPEB_LINK_N1 = I915_READ(PIPEB_LINK_N1);
dev_priv->savePIPEB_DATA_M1 = I915_READ(_PIPEB_DATA_M1);
dev_priv->savePIPEB_DATA_N1 = I915_READ(_PIPEB_DATA_N1);
dev_priv->savePIPEB_LINK_M1 = I915_READ(_PIPEB_LINK_M1);
dev_priv->savePIPEB_LINK_N1 = I915_READ(_PIPEB_LINK_N1);
dev_priv->saveFDI_TXB_CTL = I915_READ(FDI_TXB_CTL);
dev_priv->saveFDI_RXB_CTL = I915_READ(FDI_RXB_CTL);
dev_priv->saveFDI_TXB_CTL = I915_READ(_FDI_TXB_CTL);
dev_priv->saveFDI_RXB_CTL = I915_READ(_FDI_RXB_CTL);
dev_priv->savePFB_CTL_1 = I915_READ(PFB_CTL_1);
dev_priv->savePFB_WIN_SZ = I915_READ(PFB_WIN_SZ);
dev_priv->savePFB_WIN_POS = I915_READ(PFB_WIN_POS);
dev_priv->savePFB_CTL_1 = I915_READ(_PFB_CTL_1);
dev_priv->savePFB_WIN_SZ = I915_READ(_PFB_WIN_SZ);
dev_priv->savePFB_WIN_POS = I915_READ(_PFB_WIN_POS);
dev_priv->saveTRANSBCONF = I915_READ(TRANSBCONF);
dev_priv->saveTRANS_HTOTAL_B = I915_READ(TRANS_HTOTAL_B);
dev_priv->saveTRANS_HBLANK_B = I915_READ(TRANS_HBLANK_B);
dev_priv->saveTRANS_HSYNC_B = I915_READ(TRANS_HSYNC_B);
dev_priv->saveTRANS_VTOTAL_B = I915_READ(TRANS_VTOTAL_B);
dev_priv->saveTRANS_VBLANK_B = I915_READ(TRANS_VBLANK_B);
dev_priv->saveTRANS_VSYNC_B = I915_READ(TRANS_VSYNC_B);
dev_priv->saveTRANSBCONF = I915_READ(_TRANSBCONF);
dev_priv->saveTRANS_HTOTAL_B = I915_READ(_TRANS_HTOTAL_B);
dev_priv->saveTRANS_HBLANK_B = I915_READ(_TRANS_HBLANK_B);
dev_priv->saveTRANS_HSYNC_B = I915_READ(_TRANS_HSYNC_B);
dev_priv->saveTRANS_VTOTAL_B = I915_READ(_TRANS_VTOTAL_B);
dev_priv->saveTRANS_VBLANK_B = I915_READ(_TRANS_VBLANK_B);
dev_priv->saveTRANS_VSYNC_B = I915_READ(_TRANS_VSYNC_B);
}
dev_priv->saveDSPBCNTR = I915_READ(DSPBCNTR);
dev_priv->saveDSPBSTRIDE = I915_READ(DSPBSTRIDE);
dev_priv->saveDSPBSIZE = I915_READ(DSPBSIZE);
dev_priv->saveDSPBPOS = I915_READ(DSPBPOS);
dev_priv->saveDSPBADDR = I915_READ(DSPBADDR);
dev_priv->saveDSPBCNTR = I915_READ(_DSPBCNTR);
dev_priv->saveDSPBSTRIDE = I915_READ(_DSPBSTRIDE);
dev_priv->saveDSPBSIZE = I915_READ(_DSPBSIZE);
dev_priv->saveDSPBPOS = I915_READ(_DSPBPOS);
dev_priv->saveDSPBADDR = I915_READ(_DSPBADDR);
if (INTEL_INFO(dev)->gen >= 4) {
dev_priv->saveDSPBSURF = I915_READ(DSPBSURF);
dev_priv->saveDSPBTILEOFF = I915_READ(DSPBTILEOFF);
dev_priv->saveDSPBSURF = I915_READ(_DSPBSURF);
dev_priv->saveDSPBTILEOFF = I915_READ(_DSPBTILEOFF);
}
i915_save_palette(dev, PIPE_B);
dev_priv->savePIPEBSTAT = I915_READ(PIPEBSTAT);
dev_priv->savePIPEBSTAT = I915_READ(_PIPEBSTAT);
/* Fences */
switch (INTEL_INFO(dev)->gen) {
@ -426,19 +425,19 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
if (HAS_PCH_SPLIT(dev)) {
dpll_a_reg = PCH_DPLL_A;
dpll_b_reg = PCH_DPLL_B;
fpa0_reg = PCH_FPA0;
fpb0_reg = PCH_FPB0;
fpa1_reg = PCH_FPA1;
fpb1_reg = PCH_FPB1;
dpll_a_reg = _PCH_DPLL_A;
dpll_b_reg = _PCH_DPLL_B;
fpa0_reg = _PCH_FPA0;
fpb0_reg = _PCH_FPB0;
fpa1_reg = _PCH_FPA1;
fpb1_reg = _PCH_FPB1;
} else {
dpll_a_reg = DPLL_A;
dpll_b_reg = DPLL_B;
fpa0_reg = FPA0;
fpb0_reg = FPB0;
fpa1_reg = FPA1;
fpb1_reg = FPB1;
dpll_a_reg = _DPLL_A;
dpll_b_reg = _DPLL_B;
fpa0_reg = _FPA0;
fpb0_reg = _FPB0;
fpa1_reg = _FPA1;
fpb1_reg = _FPB1;
}
if (HAS_PCH_SPLIT(dev)) {
@ -461,60 +460,60 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
POSTING_READ(dpll_a_reg);
udelay(150);
if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
I915_WRITE(DPLL_A_MD, dev_priv->saveDPLL_A_MD);
POSTING_READ(DPLL_A_MD);
I915_WRITE(_DPLL_A_MD, dev_priv->saveDPLL_A_MD);
POSTING_READ(_DPLL_A_MD);
}
udelay(150);
/* Restore mode */
I915_WRITE(HTOTAL_A, dev_priv->saveHTOTAL_A);
I915_WRITE(HBLANK_A, dev_priv->saveHBLANK_A);
I915_WRITE(HSYNC_A, dev_priv->saveHSYNC_A);
I915_WRITE(VTOTAL_A, dev_priv->saveVTOTAL_A);
I915_WRITE(VBLANK_A, dev_priv->saveVBLANK_A);
I915_WRITE(VSYNC_A, dev_priv->saveVSYNC_A);
I915_WRITE(_HTOTAL_A, dev_priv->saveHTOTAL_A);
I915_WRITE(_HBLANK_A, dev_priv->saveHBLANK_A);
I915_WRITE(_HSYNC_A, dev_priv->saveHSYNC_A);
I915_WRITE(_VTOTAL_A, dev_priv->saveVTOTAL_A);
I915_WRITE(_VBLANK_A, dev_priv->saveVBLANK_A);
I915_WRITE(_VSYNC_A, dev_priv->saveVSYNC_A);
if (!HAS_PCH_SPLIT(dev))
I915_WRITE(BCLRPAT_A, dev_priv->saveBCLRPAT_A);
I915_WRITE(_BCLRPAT_A, dev_priv->saveBCLRPAT_A);
if (HAS_PCH_SPLIT(dev)) {
I915_WRITE(PIPEA_DATA_M1, dev_priv->savePIPEA_DATA_M1);
I915_WRITE(PIPEA_DATA_N1, dev_priv->savePIPEA_DATA_N1);
I915_WRITE(PIPEA_LINK_M1, dev_priv->savePIPEA_LINK_M1);
I915_WRITE(PIPEA_LINK_N1, dev_priv->savePIPEA_LINK_N1);
I915_WRITE(_PIPEA_DATA_M1, dev_priv->savePIPEA_DATA_M1);
I915_WRITE(_PIPEA_DATA_N1, dev_priv->savePIPEA_DATA_N1);
I915_WRITE(_PIPEA_LINK_M1, dev_priv->savePIPEA_LINK_M1);
I915_WRITE(_PIPEA_LINK_N1, dev_priv->savePIPEA_LINK_N1);
I915_WRITE(FDI_RXA_CTL, dev_priv->saveFDI_RXA_CTL);
I915_WRITE(FDI_TXA_CTL, dev_priv->saveFDI_TXA_CTL);
I915_WRITE(_FDI_RXA_CTL, dev_priv->saveFDI_RXA_CTL);
I915_WRITE(_FDI_TXA_CTL, dev_priv->saveFDI_TXA_CTL);
I915_WRITE(PFA_CTL_1, dev_priv->savePFA_CTL_1);
I915_WRITE(PFA_WIN_SZ, dev_priv->savePFA_WIN_SZ);
I915_WRITE(PFA_WIN_POS, dev_priv->savePFA_WIN_POS);
I915_WRITE(_PFA_CTL_1, dev_priv->savePFA_CTL_1);
I915_WRITE(_PFA_WIN_SZ, dev_priv->savePFA_WIN_SZ);
I915_WRITE(_PFA_WIN_POS, dev_priv->savePFA_WIN_POS);
I915_WRITE(TRANSACONF, dev_priv->saveTRANSACONF);
I915_WRITE(TRANS_HTOTAL_A, dev_priv->saveTRANS_HTOTAL_A);
I915_WRITE(TRANS_HBLANK_A, dev_priv->saveTRANS_HBLANK_A);
I915_WRITE(TRANS_HSYNC_A, dev_priv->saveTRANS_HSYNC_A);
I915_WRITE(TRANS_VTOTAL_A, dev_priv->saveTRANS_VTOTAL_A);
I915_WRITE(TRANS_VBLANK_A, dev_priv->saveTRANS_VBLANK_A);
I915_WRITE(TRANS_VSYNC_A, dev_priv->saveTRANS_VSYNC_A);
I915_WRITE(_TRANSACONF, dev_priv->saveTRANSACONF);
I915_WRITE(_TRANS_HTOTAL_A, dev_priv->saveTRANS_HTOTAL_A);
I915_WRITE(_TRANS_HBLANK_A, dev_priv->saveTRANS_HBLANK_A);
I915_WRITE(_TRANS_HSYNC_A, dev_priv->saveTRANS_HSYNC_A);
I915_WRITE(_TRANS_VTOTAL_A, dev_priv->saveTRANS_VTOTAL_A);
I915_WRITE(_TRANS_VBLANK_A, dev_priv->saveTRANS_VBLANK_A);
I915_WRITE(_TRANS_VSYNC_A, dev_priv->saveTRANS_VSYNC_A);
}
/* Restore plane info */
I915_WRITE(DSPASIZE, dev_priv->saveDSPASIZE);
I915_WRITE(DSPAPOS, dev_priv->saveDSPAPOS);
I915_WRITE(PIPEASRC, dev_priv->savePIPEASRC);
I915_WRITE(DSPAADDR, dev_priv->saveDSPAADDR);
I915_WRITE(DSPASTRIDE, dev_priv->saveDSPASTRIDE);
I915_WRITE(_DSPASIZE, dev_priv->saveDSPASIZE);
I915_WRITE(_DSPAPOS, dev_priv->saveDSPAPOS);
I915_WRITE(_PIPEASRC, dev_priv->savePIPEASRC);
I915_WRITE(_DSPAADDR, dev_priv->saveDSPAADDR);
I915_WRITE(_DSPASTRIDE, dev_priv->saveDSPASTRIDE);
if (INTEL_INFO(dev)->gen >= 4) {
I915_WRITE(DSPASURF, dev_priv->saveDSPASURF);
I915_WRITE(DSPATILEOFF, dev_priv->saveDSPATILEOFF);
I915_WRITE(_DSPASURF, dev_priv->saveDSPASURF);
I915_WRITE(_DSPATILEOFF, dev_priv->saveDSPATILEOFF);
}
I915_WRITE(PIPEACONF, dev_priv->savePIPEACONF);
I915_WRITE(_PIPEACONF, dev_priv->savePIPEACONF);
i915_restore_palette(dev, PIPE_A);
/* Enable the plane */
I915_WRITE(DSPACNTR, dev_priv->saveDSPACNTR);
I915_WRITE(DSPAADDR, I915_READ(DSPAADDR));
I915_WRITE(_DSPACNTR, dev_priv->saveDSPACNTR);
I915_WRITE(_DSPAADDR, I915_READ(_DSPAADDR));
/* Pipe & plane B info */
if (dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) {
@ -530,68 +529,68 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
POSTING_READ(dpll_b_reg);
udelay(150);
if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD);
POSTING_READ(DPLL_B_MD);
I915_WRITE(_DPLL_B_MD, dev_priv->saveDPLL_B_MD);
POSTING_READ(_DPLL_B_MD);
}
udelay(150);
/* Restore mode */
I915_WRITE(HTOTAL_B, dev_priv->saveHTOTAL_B);
I915_WRITE(HBLANK_B, dev_priv->saveHBLANK_B);
I915_WRITE(HSYNC_B, dev_priv->saveHSYNC_B);
I915_WRITE(VTOTAL_B, dev_priv->saveVTOTAL_B);
I915_WRITE(VBLANK_B, dev_priv->saveVBLANK_B);
I915_WRITE(VSYNC_B, dev_priv->saveVSYNC_B);
I915_WRITE(_HTOTAL_B, dev_priv->saveHTOTAL_B);
I915_WRITE(_HBLANK_B, dev_priv->saveHBLANK_B);
I915_WRITE(_HSYNC_B, dev_priv->saveHSYNC_B);
I915_WRITE(_VTOTAL_B, dev_priv->saveVTOTAL_B);
I915_WRITE(_VBLANK_B, dev_priv->saveVBLANK_B);
I915_WRITE(_VSYNC_B, dev_priv->saveVSYNC_B);
if (!HAS_PCH_SPLIT(dev))
I915_WRITE(BCLRPAT_B, dev_priv->saveBCLRPAT_B);
I915_WRITE(_BCLRPAT_B, dev_priv->saveBCLRPAT_B);
if (HAS_PCH_SPLIT(dev)) {
I915_WRITE(PIPEB_DATA_M1, dev_priv->savePIPEB_DATA_M1);
I915_WRITE(PIPEB_DATA_N1, dev_priv->savePIPEB_DATA_N1);
I915_WRITE(PIPEB_LINK_M1, dev_priv->savePIPEB_LINK_M1);
I915_WRITE(PIPEB_LINK_N1, dev_priv->savePIPEB_LINK_N1);
I915_WRITE(_PIPEB_DATA_M1, dev_priv->savePIPEB_DATA_M1);
I915_WRITE(_PIPEB_DATA_N1, dev_priv->savePIPEB_DATA_N1);
I915_WRITE(_PIPEB_LINK_M1, dev_priv->savePIPEB_LINK_M1);
I915_WRITE(_PIPEB_LINK_N1, dev_priv->savePIPEB_LINK_N1);
I915_WRITE(FDI_RXB_CTL, dev_priv->saveFDI_RXB_CTL);
I915_WRITE(FDI_TXB_CTL, dev_priv->saveFDI_TXB_CTL);
I915_WRITE(_FDI_RXB_CTL, dev_priv->saveFDI_RXB_CTL);
I915_WRITE(_FDI_TXB_CTL, dev_priv->saveFDI_TXB_CTL);
I915_WRITE(PFB_CTL_1, dev_priv->savePFB_CTL_1);
I915_WRITE(PFB_WIN_SZ, dev_priv->savePFB_WIN_SZ);
I915_WRITE(PFB_WIN_POS, dev_priv->savePFB_WIN_POS);
I915_WRITE(_PFB_CTL_1, dev_priv->savePFB_CTL_1);
I915_WRITE(_PFB_WIN_SZ, dev_priv->savePFB_WIN_SZ);
I915_WRITE(_PFB_WIN_POS, dev_priv->savePFB_WIN_POS);
I915_WRITE(TRANSBCONF, dev_priv->saveTRANSBCONF);
I915_WRITE(TRANS_HTOTAL_B, dev_priv->saveTRANS_HTOTAL_B);
I915_WRITE(TRANS_HBLANK_B, dev_priv->saveTRANS_HBLANK_B);
I915_WRITE(TRANS_HSYNC_B, dev_priv->saveTRANS_HSYNC_B);
I915_WRITE(TRANS_VTOTAL_B, dev_priv->saveTRANS_VTOTAL_B);
I915_WRITE(TRANS_VBLANK_B, dev_priv->saveTRANS_VBLANK_B);
I915_WRITE(TRANS_VSYNC_B, dev_priv->saveTRANS_VSYNC_B);
I915_WRITE(_TRANSBCONF, dev_priv->saveTRANSBCONF);
I915_WRITE(_TRANS_HTOTAL_B, dev_priv->saveTRANS_HTOTAL_B);
I915_WRITE(_TRANS_HBLANK_B, dev_priv->saveTRANS_HBLANK_B);
I915_WRITE(_TRANS_HSYNC_B, dev_priv->saveTRANS_HSYNC_B);
I915_WRITE(_TRANS_VTOTAL_B, dev_priv->saveTRANS_VTOTAL_B);
I915_WRITE(_TRANS_VBLANK_B, dev_priv->saveTRANS_VBLANK_B);
I915_WRITE(_TRANS_VSYNC_B, dev_priv->saveTRANS_VSYNC_B);
}
/* Restore plane info */
I915_WRITE(DSPBSIZE, dev_priv->saveDSPBSIZE);
I915_WRITE(DSPBPOS, dev_priv->saveDSPBPOS);
I915_WRITE(PIPEBSRC, dev_priv->savePIPEBSRC);
I915_WRITE(DSPBADDR, dev_priv->saveDSPBADDR);
I915_WRITE(DSPBSTRIDE, dev_priv->saveDSPBSTRIDE);
I915_WRITE(_DSPBSIZE, dev_priv->saveDSPBSIZE);
I915_WRITE(_DSPBPOS, dev_priv->saveDSPBPOS);
I915_WRITE(_PIPEBSRC, dev_priv->savePIPEBSRC);
I915_WRITE(_DSPBADDR, dev_priv->saveDSPBADDR);
I915_WRITE(_DSPBSTRIDE, dev_priv->saveDSPBSTRIDE);
if (INTEL_INFO(dev)->gen >= 4) {
I915_WRITE(DSPBSURF, dev_priv->saveDSPBSURF);
I915_WRITE(DSPBTILEOFF, dev_priv->saveDSPBTILEOFF);
I915_WRITE(_DSPBSURF, dev_priv->saveDSPBSURF);
I915_WRITE(_DSPBTILEOFF, dev_priv->saveDSPBTILEOFF);
}
I915_WRITE(PIPEBCONF, dev_priv->savePIPEBCONF);
I915_WRITE(_PIPEBCONF, dev_priv->savePIPEBCONF);
i915_restore_palette(dev, PIPE_B);
/* Enable the plane */
I915_WRITE(DSPBCNTR, dev_priv->saveDSPBCNTR);
I915_WRITE(DSPBADDR, I915_READ(DSPBADDR));
I915_WRITE(_DSPBCNTR, dev_priv->saveDSPBCNTR);
I915_WRITE(_DSPBADDR, I915_READ(_DSPBADDR));
/* Cursor state */
I915_WRITE(CURAPOS, dev_priv->saveCURAPOS);
I915_WRITE(CURACNTR, dev_priv->saveCURACNTR);
I915_WRITE(CURABASE, dev_priv->saveCURABASE);
I915_WRITE(CURBPOS, dev_priv->saveCURBPOS);
I915_WRITE(CURBCNTR, dev_priv->saveCURBCNTR);
I915_WRITE(CURBBASE, dev_priv->saveCURBBASE);
I915_WRITE(_CURAPOS, dev_priv->saveCURAPOS);
I915_WRITE(_CURACNTR, dev_priv->saveCURACNTR);
I915_WRITE(_CURABASE, dev_priv->saveCURABASE);
I915_WRITE(_CURBPOS, dev_priv->saveCURBPOS);
I915_WRITE(_CURBCNTR, dev_priv->saveCURBCNTR);
I915_WRITE(_CURBBASE, dev_priv->saveCURBBASE);
if (IS_GEN2(dev))
I915_WRITE(CURSIZE, dev_priv->saveCURSIZE);
@ -653,14 +652,14 @@ void i915_save_display(struct drm_device *dev)
dev_priv->saveDP_B = I915_READ(DP_B);
dev_priv->saveDP_C = I915_READ(DP_C);
dev_priv->saveDP_D = I915_READ(DP_D);
dev_priv->savePIPEA_GMCH_DATA_M = I915_READ(PIPEA_GMCH_DATA_M);
dev_priv->savePIPEB_GMCH_DATA_M = I915_READ(PIPEB_GMCH_DATA_M);
dev_priv->savePIPEA_GMCH_DATA_N = I915_READ(PIPEA_GMCH_DATA_N);
dev_priv->savePIPEB_GMCH_DATA_N = I915_READ(PIPEB_GMCH_DATA_N);
dev_priv->savePIPEA_DP_LINK_M = I915_READ(PIPEA_DP_LINK_M);
dev_priv->savePIPEB_DP_LINK_M = I915_READ(PIPEB_DP_LINK_M);
dev_priv->savePIPEA_DP_LINK_N = I915_READ(PIPEA_DP_LINK_N);
dev_priv->savePIPEB_DP_LINK_N = I915_READ(PIPEB_DP_LINK_N);
dev_priv->savePIPEA_GMCH_DATA_M = I915_READ(_PIPEA_GMCH_DATA_M);
dev_priv->savePIPEB_GMCH_DATA_M = I915_READ(_PIPEB_GMCH_DATA_M);
dev_priv->savePIPEA_GMCH_DATA_N = I915_READ(_PIPEA_GMCH_DATA_N);
dev_priv->savePIPEB_GMCH_DATA_N = I915_READ(_PIPEB_GMCH_DATA_N);
dev_priv->savePIPEA_DP_LINK_M = I915_READ(_PIPEA_DP_LINK_M);
dev_priv->savePIPEB_DP_LINK_M = I915_READ(_PIPEB_DP_LINK_M);
dev_priv->savePIPEA_DP_LINK_N = I915_READ(_PIPEA_DP_LINK_N);
dev_priv->savePIPEB_DP_LINK_N = I915_READ(_PIPEB_DP_LINK_N);
}
/* FIXME: save TV & SDVO state */
@ -699,14 +698,14 @@ void i915_restore_display(struct drm_device *dev)
/* Display port ratios (must be done before clock is set) */
if (SUPPORTS_INTEGRATED_DP(dev)) {
I915_WRITE(PIPEA_GMCH_DATA_M, dev_priv->savePIPEA_GMCH_DATA_M);
I915_WRITE(PIPEB_GMCH_DATA_M, dev_priv->savePIPEB_GMCH_DATA_M);
I915_WRITE(PIPEA_GMCH_DATA_N, dev_priv->savePIPEA_GMCH_DATA_N);
I915_WRITE(PIPEB_GMCH_DATA_N, dev_priv->savePIPEB_GMCH_DATA_N);
I915_WRITE(PIPEA_DP_LINK_M, dev_priv->savePIPEA_DP_LINK_M);
I915_WRITE(PIPEB_DP_LINK_M, dev_priv->savePIPEB_DP_LINK_M);
I915_WRITE(PIPEA_DP_LINK_N, dev_priv->savePIPEA_DP_LINK_N);
I915_WRITE(PIPEB_DP_LINK_N, dev_priv->savePIPEB_DP_LINK_N);
I915_WRITE(_PIPEA_GMCH_DATA_M, dev_priv->savePIPEA_GMCH_DATA_M);
I915_WRITE(_PIPEB_GMCH_DATA_M, dev_priv->savePIPEB_GMCH_DATA_M);
I915_WRITE(_PIPEA_GMCH_DATA_N, dev_priv->savePIPEA_GMCH_DATA_N);
I915_WRITE(_PIPEB_GMCH_DATA_N, dev_priv->savePIPEB_GMCH_DATA_N);
I915_WRITE(_PIPEA_DP_LINK_M, dev_priv->savePIPEA_DP_LINK_M);
I915_WRITE(_PIPEB_DP_LINK_M, dev_priv->savePIPEB_DP_LINK_M);
I915_WRITE(_PIPEA_DP_LINK_N, dev_priv->savePIPEA_DP_LINK_N);
I915_WRITE(_PIPEB_DP_LINK_N, dev_priv->savePIPEB_DP_LINK_N);
}
/* This is only meaningful in non-KMS mode */
@ -797,9 +796,6 @@ int i915_save_state(struct drm_device *dev)
pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB);
/* Hardware status page */
dev_priv->saveHWS = I915_READ(HWS_PGA);
i915_save_display(dev);
/* Interrupt state */
@ -808,8 +804,8 @@ int i915_save_state(struct drm_device *dev)
dev_priv->saveDEIMR = I915_READ(DEIMR);
dev_priv->saveGTIER = I915_READ(GTIER);
dev_priv->saveGTIMR = I915_READ(GTIMR);
dev_priv->saveFDI_RXA_IMR = I915_READ(FDI_RXA_IMR);
dev_priv->saveFDI_RXB_IMR = I915_READ(FDI_RXB_IMR);
dev_priv->saveFDI_RXA_IMR = I915_READ(_FDI_RXA_IMR);
dev_priv->saveFDI_RXB_IMR = I915_READ(_FDI_RXB_IMR);
dev_priv->saveMCHBAR_RENDER_STANDBY =
I915_READ(RSTDBYCTL);
} else {
@ -846,9 +842,6 @@ int i915_restore_state(struct drm_device *dev)
pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB);
/* Hardware status page */
I915_WRITE(HWS_PGA, dev_priv->saveHWS);
i915_restore_display(dev);
/* Interrupt state */
@ -857,11 +850,11 @@ int i915_restore_state(struct drm_device *dev)
I915_WRITE(DEIMR, dev_priv->saveDEIMR);
I915_WRITE(GTIER, dev_priv->saveGTIER);
I915_WRITE(GTIMR, dev_priv->saveGTIMR);
I915_WRITE(FDI_RXA_IMR, dev_priv->saveFDI_RXA_IMR);
I915_WRITE(FDI_RXB_IMR, dev_priv->saveFDI_RXB_IMR);
I915_WRITE(_FDI_RXA_IMR, dev_priv->saveFDI_RXA_IMR);
I915_WRITE(_FDI_RXB_IMR, dev_priv->saveFDI_RXB_IMR);
} else {
I915_WRITE (IER, dev_priv->saveIER);
I915_WRITE (IMR, dev_priv->saveIMR);
I915_WRITE(IER, dev_priv->saveIER);
I915_WRITE(IMR, dev_priv->saveIMR);
}
/* Clock gating state */

View File

@ -7,6 +7,7 @@
#include <drm/drmP.h>
#include "i915_drv.h"
#include "intel_ringbuffer.h"
#undef TRACE_SYSTEM
#define TRACE_SYSTEM i915
@ -16,9 +17,7 @@
/* object tracking */
TRACE_EVENT(i915_gem_object_create,
TP_PROTO(struct drm_i915_gem_object *obj),
TP_ARGS(obj),
TP_STRUCT__entry(
@ -35,33 +34,51 @@ TRACE_EVENT(i915_gem_object_create,
);
TRACE_EVENT(i915_gem_object_bind,
TP_PROTO(struct drm_i915_gem_object *obj, u32 gtt_offset, bool mappable),
TP_ARGS(obj, gtt_offset, mappable),
TP_PROTO(struct drm_i915_gem_object *obj, bool mappable),
TP_ARGS(obj, mappable),
TP_STRUCT__entry(
__field(struct drm_i915_gem_object *, obj)
__field(u32, gtt_offset)
__field(u32, offset)
__field(u32, size)
__field(bool, mappable)
),
TP_fast_assign(
__entry->obj = obj;
__entry->gtt_offset = gtt_offset;
__entry->offset = obj->gtt_space->start;
__entry->size = obj->gtt_space->size;
__entry->mappable = mappable;
),
TP_printk("obj=%p, gtt_offset=%08x%s",
__entry->obj, __entry->gtt_offset,
TP_printk("obj=%p, offset=%08x size=%x%s",
__entry->obj, __entry->offset, __entry->size,
__entry->mappable ? ", mappable" : "")
);
TRACE_EVENT(i915_gem_object_unbind,
TP_PROTO(struct drm_i915_gem_object *obj),
TP_ARGS(obj),
TP_STRUCT__entry(
__field(struct drm_i915_gem_object *, obj)
__field(u32, offset)
__field(u32, size)
),
TP_fast_assign(
__entry->obj = obj;
__entry->offset = obj->gtt_space->start;
__entry->size = obj->gtt_space->size;
),
TP_printk("obj=%p, offset=%08x size=%x",
__entry->obj, __entry->offset, __entry->size)
);
TRACE_EVENT(i915_gem_object_change_domain,
TP_PROTO(struct drm_i915_gem_object *obj, uint32_t old_read_domains, uint32_t old_write_domain),
TP_ARGS(obj, old_read_domains, old_write_domain),
TP_PROTO(struct drm_i915_gem_object *obj, u32 old_read, u32 old_write),
TP_ARGS(obj, old_read, old_write),
TP_STRUCT__entry(
__field(struct drm_i915_gem_object *, obj)
@ -71,19 +88,85 @@ TRACE_EVENT(i915_gem_object_change_domain,
TP_fast_assign(
__entry->obj = obj;
__entry->read_domains = obj->base.read_domains | (old_read_domains << 16);
__entry->write_domain = obj->base.write_domain | (old_write_domain << 16);
__entry->read_domains = obj->base.read_domains | (old_read << 16);
__entry->write_domain = obj->base.write_domain | (old_write << 16);
),
TP_printk("obj=%p, read=%04x, write=%04x",
TP_printk("obj=%p, read=%02x=>%02x, write=%02x=>%02x",
__entry->obj,
__entry->read_domains, __entry->write_domain)
__entry->read_domains >> 16,
__entry->read_domains & 0xffff,
__entry->write_domain >> 16,
__entry->write_domain & 0xffff)
);
TRACE_EVENT(i915_gem_object_pwrite,
TP_PROTO(struct drm_i915_gem_object *obj, u32 offset, u32 len),
TP_ARGS(obj, offset, len),
TP_STRUCT__entry(
__field(struct drm_i915_gem_object *, obj)
__field(u32, offset)
__field(u32, len)
),
TP_fast_assign(
__entry->obj = obj;
__entry->offset = offset;
__entry->len = len;
),
TP_printk("obj=%p, offset=%u, len=%u",
__entry->obj, __entry->offset, __entry->len)
);
TRACE_EVENT(i915_gem_object_pread,
TP_PROTO(struct drm_i915_gem_object *obj, u32 offset, u32 len),
TP_ARGS(obj, offset, len),
TP_STRUCT__entry(
__field(struct drm_i915_gem_object *, obj)
__field(u32, offset)
__field(u32, len)
),
TP_fast_assign(
__entry->obj = obj;
__entry->offset = offset;
__entry->len = len;
),
TP_printk("obj=%p, offset=%u, len=%u",
__entry->obj, __entry->offset, __entry->len)
);
TRACE_EVENT(i915_gem_object_fault,
TP_PROTO(struct drm_i915_gem_object *obj, u32 index, bool gtt, bool write),
TP_ARGS(obj, index, gtt, write),
TP_STRUCT__entry(
__field(struct drm_i915_gem_object *, obj)
__field(u32, index)
__field(bool, gtt)
__field(bool, write)
),
TP_fast_assign(
__entry->obj = obj;
__entry->index = index;
__entry->gtt = gtt;
__entry->write = write;
),
TP_printk("obj=%p, %s index=%u %s",
__entry->obj,
__entry->gtt ? "GTT" : "CPU",
__entry->index,
__entry->write ? ", writable" : "")
);
DECLARE_EVENT_CLASS(i915_gem_object,
TP_PROTO(struct drm_i915_gem_object *obj),
TP_ARGS(obj),
TP_STRUCT__entry(
@ -98,150 +181,171 @@ DECLARE_EVENT_CLASS(i915_gem_object,
);
DEFINE_EVENT(i915_gem_object, i915_gem_object_clflush,
TP_PROTO(struct drm_i915_gem_object *obj),
TP_ARGS(obj)
);
DEFINE_EVENT(i915_gem_object, i915_gem_object_unbind,
TP_PROTO(struct drm_i915_gem_object *obj),
TP_ARGS(obj)
TP_PROTO(struct drm_i915_gem_object *obj),
TP_ARGS(obj)
);
DEFINE_EVENT(i915_gem_object, i915_gem_object_destroy,
TP_PROTO(struct drm_i915_gem_object *obj),
TP_ARGS(obj)
);
/* batch tracing */
TRACE_EVENT(i915_gem_request_submit,
TP_PROTO(struct drm_device *dev, u32 seqno),
TP_ARGS(dev, seqno),
TRACE_EVENT(i915_gem_evict,
TP_PROTO(struct drm_device *dev, u32 size, u32 align, bool mappable),
TP_ARGS(dev, size, align, mappable),
TP_STRUCT__entry(
__field(u32, dev)
__field(u32, seqno)
),
__field(u32, size)
__field(u32, align)
__field(bool, mappable)
),
TP_fast_assign(
__entry->dev = dev->primary->index;
__entry->seqno = seqno;
i915_trace_irq_get(dev, seqno);
),
__entry->size = size;
__entry->align = align;
__entry->mappable = mappable;
),
TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno)
TP_printk("dev=%d, size=%d, align=%d %s",
__entry->dev, __entry->size, __entry->align,
__entry->mappable ? ", mappable" : "")
);
TRACE_EVENT(i915_gem_request_flush,
TP_PROTO(struct drm_device *dev, u32 seqno,
u32 flush_domains, u32 invalidate_domains),
TP_ARGS(dev, seqno, flush_domains, invalidate_domains),
TRACE_EVENT(i915_gem_evict_everything,
TP_PROTO(struct drm_device *dev, bool purgeable),
TP_ARGS(dev, purgeable),
TP_STRUCT__entry(
__field(u32, dev)
__field(u32, seqno)
__field(u32, flush_domains)
__field(u32, invalidate_domains)
),
__field(bool, purgeable)
),
TP_fast_assign(
__entry->dev = dev->primary->index;
__entry->purgeable = purgeable;
),
TP_printk("dev=%d%s",
__entry->dev,
__entry->purgeable ? ", purgeable only" : "")
);
TRACE_EVENT(i915_gem_ring_dispatch,
TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
TP_ARGS(ring, seqno),
TP_STRUCT__entry(
__field(u32, dev)
__field(u32, ring)
__field(u32, seqno)
),
TP_fast_assign(
__entry->dev = ring->dev->primary->index;
__entry->ring = ring->id;
__entry->seqno = seqno;
__entry->flush_domains = flush_domains;
__entry->invalidate_domains = invalidate_domains;
i915_trace_irq_get(ring, seqno);
),
TP_printk("dev=%u, seqno=%u, flush=%04x, invalidate=%04x",
__entry->dev, __entry->seqno,
__entry->flush_domains, __entry->invalidate_domains)
TP_printk("dev=%u, ring=%u, seqno=%u",
__entry->dev, __entry->ring, __entry->seqno)
);
TRACE_EVENT(i915_gem_ring_flush,
TP_PROTO(struct intel_ring_buffer *ring, u32 invalidate, u32 flush),
TP_ARGS(ring, invalidate, flush),
TP_STRUCT__entry(
__field(u32, dev)
__field(u32, ring)
__field(u32, invalidate)
__field(u32, flush)
),
TP_fast_assign(
__entry->dev = ring->dev->primary->index;
__entry->ring = ring->id;
__entry->invalidate = invalidate;
__entry->flush = flush;
),
TP_printk("dev=%u, ring=%x, invalidate=%04x, flush=%04x",
__entry->dev, __entry->ring,
__entry->invalidate, __entry->flush)
);
DECLARE_EVENT_CLASS(i915_gem_request,
TP_PROTO(struct drm_device *dev, u32 seqno),
TP_ARGS(dev, seqno),
TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
TP_ARGS(ring, seqno),
TP_STRUCT__entry(
__field(u32, dev)
__field(u32, ring)
__field(u32, seqno)
),
TP_fast_assign(
__entry->dev = dev->primary->index;
__entry->dev = ring->dev->primary->index;
__entry->ring = ring->id;
__entry->seqno = seqno;
),
TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno)
TP_printk("dev=%u, ring=%u, seqno=%u",
__entry->dev, __entry->ring, __entry->seqno)
);
DEFINE_EVENT(i915_gem_request, i915_gem_request_add,
TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
TP_ARGS(ring, seqno)
);
DEFINE_EVENT(i915_gem_request, i915_gem_request_complete,
TP_PROTO(struct drm_device *dev, u32 seqno),
TP_ARGS(dev, seqno)
TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
TP_ARGS(ring, seqno)
);
DEFINE_EVENT(i915_gem_request, i915_gem_request_retire,
TP_PROTO(struct drm_device *dev, u32 seqno),
TP_ARGS(dev, seqno)
TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
TP_ARGS(ring, seqno)
);
DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_begin,
TP_PROTO(struct drm_device *dev, u32 seqno),
TP_ARGS(dev, seqno)
TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
TP_ARGS(ring, seqno)
);
DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_end,
TP_PROTO(struct drm_device *dev, u32 seqno),
TP_ARGS(dev, seqno)
TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
TP_ARGS(ring, seqno)
);
DECLARE_EVENT_CLASS(i915_ring,
TP_PROTO(struct drm_device *dev),
TP_ARGS(dev),
TP_PROTO(struct intel_ring_buffer *ring),
TP_ARGS(ring),
TP_STRUCT__entry(
__field(u32, dev)
__field(u32, ring)
),
TP_fast_assign(
__entry->dev = dev->primary->index;
__entry->dev = ring->dev->primary->index;
__entry->ring = ring->id;
),
TP_printk("dev=%u", __entry->dev)
TP_printk("dev=%u, ring=%u", __entry->dev, __entry->ring)
);
DEFINE_EVENT(i915_ring, i915_ring_wait_begin,
TP_PROTO(struct drm_device *dev),
TP_ARGS(dev)
TP_PROTO(struct intel_ring_buffer *ring),
TP_ARGS(ring)
);
DEFINE_EVENT(i915_ring, i915_ring_wait_end,
TP_PROTO(struct drm_device *dev),
TP_ARGS(dev)
TP_PROTO(struct intel_ring_buffer *ring),
TP_ARGS(ring)
);
TRACE_EVENT(i915_flip_request,
@ -281,26 +385,29 @@ TRACE_EVENT(i915_flip_complete,
);
TRACE_EVENT(i915_reg_rw,
TP_PROTO(int cmd, uint32_t reg, uint64_t val, int len),
TP_PROTO(bool write, u32 reg, u64 val, int len),
TP_ARGS(cmd, reg, val, len),
TP_ARGS(write, reg, val, len),
TP_STRUCT__entry(
__field(int, cmd)
__field(uint32_t, reg)
__field(uint64_t, val)
__field(int, len)
__field(u64, val)
__field(u32, reg)
__field(u16, write)
__field(u16, len)
),
TP_fast_assign(
__entry->cmd = cmd;
__entry->val = (u64)val;
__entry->reg = reg;
__entry->val = (uint64_t)val;
__entry->write = write;
__entry->len = len;
),
TP_printk("cmd=%c, reg=0x%x, val=0x%llx, len=%d",
__entry->cmd, __entry->reg, __entry->val, __entry->len)
TP_printk("%s reg=0x%x, len=%d, val=(0x%x, 0x%x)",
__entry->write ? "write" : "read",
__entry->reg, __entry->len,
(u32)(__entry->val & 0xffffffff),
(u32)(__entry->val >> 32))
);
#endif /* _I915_TRACE_H_ */

View File

@ -226,29 +226,49 @@ static void
parse_sdvo_panel_data(struct drm_i915_private *dev_priv,
struct bdb_header *bdb)
{
struct bdb_sdvo_lvds_options *sdvo_lvds_options;
struct lvds_dvo_timing *dvo_timing;
struct drm_display_mode *panel_fixed_mode;
int index;
sdvo_lvds_options = find_section(bdb, BDB_SDVO_LVDS_OPTIONS);
if (!sdvo_lvds_options)
return;
index = i915_vbt_sdvo_panel_type;
if (index == -1) {
struct bdb_sdvo_lvds_options *sdvo_lvds_options;
sdvo_lvds_options = find_section(bdb, BDB_SDVO_LVDS_OPTIONS);
if (!sdvo_lvds_options)
return;
index = sdvo_lvds_options->panel_type;
}
dvo_timing = find_section(bdb, BDB_SDVO_PANEL_DTDS);
if (!dvo_timing)
return;
panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL);
if (!panel_fixed_mode)
return;
fill_detail_timing_data(panel_fixed_mode,
dvo_timing + sdvo_lvds_options->panel_type);
fill_detail_timing_data(panel_fixed_mode, dvo_timing + index);
dev_priv->sdvo_lvds_vbt_mode = panel_fixed_mode;
return;
DRM_DEBUG_KMS("Found SDVO panel mode in BIOS VBT tables:\n");
drm_mode_debug_printmodeline(panel_fixed_mode);
}
static int intel_bios_ssc_frequency(struct drm_device *dev,
bool alternate)
{
switch (INTEL_INFO(dev)->gen) {
case 2:
return alternate ? 66 : 48;
case 3:
case 4:
return alternate ? 100 : 96;
default:
return alternate ? 100 : 120;
}
}
static void
@ -263,13 +283,8 @@ parse_general_features(struct drm_i915_private *dev_priv,
dev_priv->int_tv_support = general->int_tv_support;
dev_priv->int_crt_support = general->int_crt_support;
dev_priv->lvds_use_ssc = general->enable_ssc;
if (IS_I85X(dev))
dev_priv->lvds_ssc_freq = general->ssc_freq ? 66 : 48;
else if (IS_GEN5(dev) || IS_GEN6(dev))
dev_priv->lvds_ssc_freq = general->ssc_freq ? 100 : 120;
else
dev_priv->lvds_ssc_freq = general->ssc_freq ? 100 : 96;
dev_priv->lvds_ssc_freq =
intel_bios_ssc_frequency(dev, general->ssc_freq);
}
}
@ -553,6 +568,8 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
static void
init_vbt_defaults(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
dev_priv->crt_ddc_pin = GMBUS_PORT_VGADDC;
/* LFP panel data */
@ -565,7 +582,11 @@ init_vbt_defaults(struct drm_i915_private *dev_priv)
/* general features */
dev_priv->int_tv_support = 1;
dev_priv->int_crt_support = 1;
dev_priv->lvds_use_ssc = 0;
/* Default to using SSC */
dev_priv->lvds_use_ssc = 1;
dev_priv->lvds_ssc_freq = intel_bios_ssc_frequency(dev, 1);
DRM_DEBUG("Set default to SSC at %dMHz\n", dev_priv->lvds_ssc_freq);
/* eDP data */
dev_priv->edp.bpp = 18;

View File

@ -129,10 +129,7 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
u32 adpa, dpll_md;
u32 adpa_reg;
if (intel_crtc->pipe == 0)
dpll_md_reg = DPLL_A_MD;
else
dpll_md_reg = DPLL_B_MD;
dpll_md_reg = DPLL_MD(intel_crtc->pipe);
if (HAS_PCH_SPLIT(dev))
adpa_reg = PCH_ADPA;
@ -160,17 +157,16 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
adpa |= PORT_TRANS_A_SEL_CPT;
else
adpa |= ADPA_PIPE_A_SELECT;
if (!HAS_PCH_SPLIT(dev))
I915_WRITE(BCLRPAT_A, 0);
} else {
if (HAS_PCH_CPT(dev))
adpa |= PORT_TRANS_B_SEL_CPT;
else
adpa |= ADPA_PIPE_B_SELECT;
if (!HAS_PCH_SPLIT(dev))
I915_WRITE(BCLRPAT_B, 0);
}
if (!HAS_PCH_SPLIT(dev))
I915_WRITE(BCLRPAT(intel_crtc->pipe), 0);
I915_WRITE(adpa_reg, adpa);
}
@ -353,21 +349,12 @@ intel_crt_load_detect(struct drm_crtc *crtc, struct intel_crt *crt)
DRM_DEBUG_KMS("starting load-detect on CRT\n");
if (pipe == 0) {
bclrpat_reg = BCLRPAT_A;
vtotal_reg = VTOTAL_A;
vblank_reg = VBLANK_A;
vsync_reg = VSYNC_A;
pipeconf_reg = PIPEACONF;
pipe_dsl_reg = PIPEADSL;
} else {
bclrpat_reg = BCLRPAT_B;
vtotal_reg = VTOTAL_B;
vblank_reg = VBLANK_B;
vsync_reg = VSYNC_B;
pipeconf_reg = PIPEBCONF;
pipe_dsl_reg = PIPEBDSL;
}
bclrpat_reg = BCLRPAT(pipe);
vtotal_reg = VTOTAL(pipe);
vblank_reg = VBLANK(pipe);
vsync_reg = VSYNC(pipe);
pipeconf_reg = PIPECONF(pipe);
pipe_dsl_reg = PIPEDSL(pipe);
save_bclrpat = I915_READ(bclrpat_reg);
save_vtotal = I915_READ(vtotal_reg);

File diff suppressed because it is too large Load Diff

View File

@ -49,6 +49,7 @@ struct intel_dp {
uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE];
bool has_audio;
int force_audio;
uint32_t color_range;
int dpms_mode;
uint8_t link_bw;
uint8_t lane_count;
@ -685,6 +686,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int lane_count = 4, bpp = 24;
struct intel_dp_m_n m_n;
int pipe = intel_crtc->pipe;
/*
* Find the lane count in the intel_encoder private
@ -715,39 +717,19 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
mode->clock, adjusted_mode->clock, &m_n);
if (HAS_PCH_SPLIT(dev)) {
if (intel_crtc->pipe == 0) {
I915_WRITE(TRANSA_DATA_M1,
((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
m_n.gmch_m);
I915_WRITE(TRANSA_DATA_N1, m_n.gmch_n);
I915_WRITE(TRANSA_DP_LINK_M1, m_n.link_m);
I915_WRITE(TRANSA_DP_LINK_N1, m_n.link_n);
} else {
I915_WRITE(TRANSB_DATA_M1,
((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
m_n.gmch_m);
I915_WRITE(TRANSB_DATA_N1, m_n.gmch_n);
I915_WRITE(TRANSB_DP_LINK_M1, m_n.link_m);
I915_WRITE(TRANSB_DP_LINK_N1, m_n.link_n);
}
I915_WRITE(TRANSDATA_M1(pipe),
((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
m_n.gmch_m);
I915_WRITE(TRANSDATA_N1(pipe), m_n.gmch_n);
I915_WRITE(TRANSDPLINK_M1(pipe), m_n.link_m);
I915_WRITE(TRANSDPLINK_N1(pipe), m_n.link_n);
} else {
if (intel_crtc->pipe == 0) {
I915_WRITE(PIPEA_GMCH_DATA_M,
((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
m_n.gmch_m);
I915_WRITE(PIPEA_GMCH_DATA_N,
m_n.gmch_n);
I915_WRITE(PIPEA_DP_LINK_M, m_n.link_m);
I915_WRITE(PIPEA_DP_LINK_N, m_n.link_n);
} else {
I915_WRITE(PIPEB_GMCH_DATA_M,
((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
m_n.gmch_m);
I915_WRITE(PIPEB_GMCH_DATA_N,
m_n.gmch_n);
I915_WRITE(PIPEB_DP_LINK_M, m_n.link_m);
I915_WRITE(PIPEB_DP_LINK_N, m_n.link_n);
}
I915_WRITE(PIPE_GMCH_DATA_M(pipe),
((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
m_n.gmch_m);
I915_WRITE(PIPE_GMCH_DATA_N(pipe), m_n.gmch_n);
I915_WRITE(PIPE_DP_LINK_M(pipe), m_n.link_m);
I915_WRITE(PIPE_DP_LINK_N(pipe), m_n.link_n);
}
}
@ -760,8 +742,8 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
struct drm_crtc *crtc = intel_dp->base.base.crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
intel_dp->DP = (DP_VOLTAGE_0_4 |
DP_PRE_EMPHASIS_0);
intel_dp->DP = DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
intel_dp->DP |= intel_dp->color_range;
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
intel_dp->DP |= DP_SYNC_HS_HIGH;
@ -813,6 +795,40 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
}
}
static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp;
/*
* If the panel wasn't on, make sure there's not a currently
* active PP sequence before enabling AUX VDD.
*/
if (!(I915_READ(PCH_PP_STATUS) & PP_ON))
msleep(dev_priv->panel_t3);
pp = I915_READ(PCH_PP_CONTROL);
pp |= EDP_FORCE_VDD;
I915_WRITE(PCH_PP_CONTROL, pp);
POSTING_READ(PCH_PP_CONTROL);
}
static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp;
pp = I915_READ(PCH_PP_CONTROL);
pp &= ~EDP_FORCE_VDD;
I915_WRITE(PCH_PP_CONTROL, pp);
POSTING_READ(PCH_PP_CONTROL);
/* Make sure sequencer is idle before allowing subsequent activity */
msleep(dev_priv->panel_t12);
}
/* Returns true if the panel was already on when called */
static bool ironlake_edp_panel_on (struct intel_dp *intel_dp)
{
@ -834,11 +850,6 @@ static bool ironlake_edp_panel_on (struct intel_dp *intel_dp)
I915_WRITE(PCH_PP_CONTROL, pp);
POSTING_READ(PCH_PP_CONTROL);
/* Ouch. We need to wait here for some panels, like Dell e6510
* https://bugs.freedesktop.org/show_bug.cgi?id=29278i
*/
msleep(300);
if (wait_for((I915_READ(PCH_PP_STATUS) & idle_on_mask) == idle_on_mask,
5000))
DRM_ERROR("panel on wait timed out: 0x%08x\n",
@ -875,11 +886,6 @@ static void ironlake_edp_panel_off (struct drm_device *dev)
pp |= PANEL_POWER_RESET; /* restore panel reset bit */
I915_WRITE(PCH_PP_CONTROL, pp);
POSTING_READ(PCH_PP_CONTROL);
/* Ouch. We need to wait here for some panels, like Dell e6510
* https://bugs.freedesktop.org/show_bug.cgi?id=29278i
*/
msleep(300);
}
static void ironlake_edp_backlight_on (struct drm_device *dev)
@ -945,7 +951,7 @@ static void intel_dp_prepare(struct drm_encoder *encoder)
if (is_edp(intel_dp)) {
ironlake_edp_backlight_off(dev);
ironlake_edp_panel_on(intel_dp);
ironlake_edp_panel_off(dev);
if (!is_pch_edp(intel_dp))
ironlake_edp_pll_on(encoder);
else
@ -959,10 +965,15 @@ static void intel_dp_commit(struct drm_encoder *encoder)
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct drm_device *dev = encoder->dev;
if (is_edp(intel_dp))
ironlake_edp_panel_vdd_on(intel_dp);
intel_dp_start_link_train(intel_dp);
if (is_edp(intel_dp))
if (is_edp(intel_dp)) {
ironlake_edp_panel_on(intel_dp);
ironlake_edp_panel_vdd_off(intel_dp);
}
intel_dp_complete_link_train(intel_dp);
@ -988,9 +999,13 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
ironlake_edp_pll_off(encoder);
} else {
if (is_edp(intel_dp))
ironlake_edp_panel_on(intel_dp);
ironlake_edp_panel_vdd_on(intel_dp);
if (!(dp_reg & DP_PORT_EN)) {
intel_dp_start_link_train(intel_dp);
if (is_edp(intel_dp)) {
ironlake_edp_panel_on(intel_dp);
ironlake_edp_panel_vdd_off(intel_dp);
}
intel_dp_complete_link_train(intel_dp);
}
if (is_edp(intel_dp))
@ -1508,9 +1523,13 @@ ironlake_dp_detect(struct intel_dp *intel_dp)
{
enum drm_connector_status status;
/* Can't disconnect eDP */
if (is_edp(intel_dp))
return connector_status_connected;
/* Can't disconnect eDP, but you can close the lid... */
if (is_edp(intel_dp)) {
status = intel_panel_detect(intel_dp->base.base.dev);
if (status == connector_status_unknown)
status = connector_status_connected;
return status;
}
status = connector_status_disconnected;
if (intel_dp_aux_native_read(intel_dp,
@ -1662,6 +1681,7 @@ intel_dp_set_property(struct drm_connector *connector,
struct drm_property *property,
uint64_t val)
{
struct drm_i915_private *dev_priv = connector->dev->dev_private;
struct intel_dp *intel_dp = intel_attached_dp(connector);
int ret;
@ -1690,6 +1710,14 @@ intel_dp_set_property(struct drm_connector *connector,
goto done;
}
if (property == dev_priv->broadcast_rgb_property) {
if (val == !!intel_dp->color_range)
return 0;
intel_dp->color_range = val ? DP_COLOR_RANGE_16_235 : 0;
goto done;
}
return -EINVAL;
done:
@ -1809,6 +1837,8 @@ intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connect
intel_dp->force_audio_property->values[1] = 1;
drm_connector_attach_property(connector, intel_dp->force_audio_property, 0);
}
intel_attach_broadcast_rgb_property(connector);
}
void
@ -1826,6 +1856,9 @@ intel_dp_init(struct drm_device *dev, int output_reg)
if (!intel_dp)
return;
intel_dp->output_reg = output_reg;
intel_dp->dpms_mode = -1;
intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
if (!intel_connector) {
kfree(intel_dp);
@ -1865,10 +1898,6 @@ intel_dp_init(struct drm_device *dev, int output_reg)
connector->interlace_allowed = true;
connector->doublescan_allowed = 0;
intel_dp->output_reg = output_reg;
intel_dp->has_audio = false;
intel_dp->dpms_mode = DRM_MODE_DPMS_ON;
drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
DRM_MODE_ENCODER_TMDS);
drm_encoder_helper_add(&intel_encoder->base, &intel_dp_helper_funcs);
@ -1906,21 +1935,33 @@ intel_dp_init(struct drm_device *dev, int output_reg)
/* Cache some DPCD data in the eDP case */
if (is_edp(intel_dp)) {
int ret;
bool was_on;
u32 pp_on, pp_div;
was_on = ironlake_edp_panel_on(intel_dp);
pp_on = I915_READ(PCH_PP_ON_DELAYS);
pp_div = I915_READ(PCH_PP_DIVISOR);
/* Get T3 & T12 values (note: VESA not bspec terminology) */
dev_priv->panel_t3 = (pp_on & 0x1fff0000) >> 16;
dev_priv->panel_t3 /= 10; /* t3 in 100us units */
dev_priv->panel_t12 = pp_div & 0xf;
dev_priv->panel_t12 *= 100; /* t12 in 100ms units */
ironlake_edp_panel_vdd_on(intel_dp);
ret = intel_dp_aux_native_read(intel_dp, DP_DPCD_REV,
intel_dp->dpcd,
sizeof(intel_dp->dpcd));
ironlake_edp_panel_vdd_off(intel_dp);
if (ret == sizeof(intel_dp->dpcd)) {
if (intel_dp->dpcd[0] >= 0x11)
dev_priv->no_aux_handshake = intel_dp->dpcd[3] &
DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
} else {
/* if this fails, presume the device is a ghost */
DRM_ERROR("failed to retrieve link info\n");
intel_dp_destroy(&intel_connector->base);
intel_dp_encoder_destroy(&intel_dp->base.base);
return;
}
if (!was_on)
ironlake_edp_panel_off(dev);
}
intel_encoder->hot_plug = intel_dp_hot_plug;

View File

@ -217,6 +217,13 @@ intel_get_crtc_for_pipe(struct drm_device *dev, int pipe)
return dev_priv->pipe_to_crtc_mapping[pipe];
}
static inline struct drm_crtc *
intel_get_crtc_for_plane(struct drm_device *dev, int plane)
{
struct drm_i915_private *dev_priv = dev->dev_private;
return dev_priv->plane_to_crtc_mapping[plane];
}
struct intel_unpin_work {
struct work_struct work;
struct drm_device *dev;
@ -230,6 +237,8 @@ struct intel_unpin_work {
int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
extern bool intel_ddc_probe(struct intel_encoder *intel_encoder, int ddc_bus);
extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector);
extern void intel_crt_init(struct drm_device *dev);
extern void intel_hdmi_init(struct drm_device *dev, int sdvox_reg);
void intel_dip_infoframe_csum(struct dip_infoframe *avi_if);
@ -260,6 +269,7 @@ extern void intel_panel_set_backlight(struct drm_device *dev, u32 level);
extern void intel_panel_setup_backlight(struct drm_device *dev);
extern void intel_panel_enable_backlight(struct drm_device *dev);
extern void intel_panel_disable_backlight(struct drm_device *dev);
extern enum drm_connector_status intel_panel_detect(struct drm_device *dev);
extern void intel_crtc_load_lut(struct drm_crtc *crtc);
extern void intel_encoder_prepare (struct drm_encoder *encoder);
@ -321,8 +331,7 @@ extern void intel_finish_page_flip_plane(struct drm_device *dev, int plane);
extern void intel_setup_overlay(struct drm_device *dev);
extern void intel_cleanup_overlay(struct drm_device *dev);
extern int intel_overlay_switch_off(struct intel_overlay *overlay,
bool interruptible);
extern int intel_overlay_switch_off(struct intel_overlay *overlay);
extern int intel_overlay_put_image(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int intel_overlay_attrs(struct drm_device *dev, void *data,

View File

@ -178,7 +178,7 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder,
int pipe = intel_crtc->pipe;
u32 dvo_val;
u32 dvo_reg = intel_dvo->dev.dvo_reg, dvo_srcdim_reg;
int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
int dpll_reg = DPLL(pipe);
switch (dvo_reg) {
case DVOA:

View File

@ -41,6 +41,7 @@ struct intel_hdmi {
struct intel_encoder base;
u32 sdvox_reg;
int ddc_bus;
uint32_t color_range;
bool has_hdmi_sink;
bool has_audio;
int force_audio;
@ -124,6 +125,7 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
u32 sdvox;
sdvox = SDVO_ENCODING_HDMI | SDVO_BORDER_ENABLE;
sdvox |= intel_hdmi->color_range;
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
sdvox |= SDVO_VSYNC_ACTIVE_HIGH;
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
@ -278,6 +280,7 @@ intel_hdmi_set_property(struct drm_connector *connector,
uint64_t val)
{
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
struct drm_i915_private *dev_priv = connector->dev->dev_private;
int ret;
ret = drm_connector_property_set_value(connector, property, val);
@ -305,6 +308,14 @@ intel_hdmi_set_property(struct drm_connector *connector,
goto done;
}
if (property == dev_priv->broadcast_rgb_property) {
if (val == !!intel_hdmi->color_range)
return 0;
intel_hdmi->color_range = val ? SDVO_COLOR_RANGE_16_235 : 0;
goto done;
}
return -EINVAL;
done:
@ -363,6 +374,8 @@ intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *c
intel_hdmi->force_audio_property->values[1] = 1;
drm_connector_attach_property(connector, intel_hdmi->force_audio_property, 0);
}
intel_attach_broadcast_rgb_property(connector);
}
void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)

View File

@ -384,7 +384,8 @@ int intel_setup_gmbus(struct drm_device *dev)
bus->reg0 = i | GMBUS_RATE_100KHZ;
/* XXX force bit banging until GMBUS is fully debugged */
bus->force_bit = intel_gpio_create(dev_priv, i);
if (IS_GEN2(dev))
bus->force_bit = intel_gpio_create(dev_priv, i);
}
intel_i2c_reset(dev_priv->dev);

View File

@ -231,6 +231,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
struct drm_encoder *tmp_encoder;
u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0;
int pipe;
/* Should never happen!! */
if (INTEL_INFO(dev)->gen < 4 && intel_crtc->pipe == 0) {
@ -277,8 +278,8 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
* to register description and PRM.
* Change the value here to see the borders for debugging
*/
I915_WRITE(BCLRPAT_A, 0);
I915_WRITE(BCLRPAT_B, 0);
for_each_pipe(pipe)
I915_WRITE(BCLRPAT(pipe), 0);
switch (intel_lvds->fitting_mode) {
case DRM_MODE_SCALE_CENTER:
@ -474,6 +475,10 @@ intel_lvds_detect(struct drm_connector *connector, bool force)
struct drm_device *dev = connector->dev;
enum drm_connector_status status = connector_status_connected;
status = intel_panel_detect(dev);
if (status != connector_status_unknown)
return status;
/* ACPI lid methods were generally unreliable in this generation, so
* don't even bother.
*/
@ -496,7 +501,7 @@ static int intel_lvds_get_modes(struct drm_connector *connector)
return drm_add_edid_modes(connector, intel_lvds->edid);
mode = drm_mode_duplicate(dev, intel_lvds->fixed_mode);
if (mode == 0)
if (mode == NULL)
return 0;
drm_mode_probed_add(connector, mode);

View File

@ -80,3 +80,33 @@ int intel_ddc_get_modes(struct drm_connector *connector,
return ret;
}
static const char *broadcast_rgb_names[] = {
"Full",
"Limited 16:235",
};
void
intel_attach_broadcast_rgb_property(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_property *prop;
int i;
prop = dev_priv->broadcast_rgb_property;
if (prop == NULL) {
prop = drm_property_create(dev, DRM_MODE_PROP_ENUM,
"Broadcast RGB",
ARRAY_SIZE(broadcast_rgb_names));
if (prop == NULL)
return;
for (i = 0; i < ARRAY_SIZE(broadcast_rgb_names); i++)
drm_property_add_enum(prop, i, i, broadcast_rgb_names[i]);
dev_priv->broadcast_rgb_property = prop;
}
drm_connector_attach_property(connector, prop, 0);
}

View File

@ -39,6 +39,8 @@
#define OPREGION_HEADER_OFFSET 0
#define OPREGION_ACPI_OFFSET 0x100
#define ACPI_CLID 0x01ac /* current lid state indicator */
#define ACPI_CDCK 0x01b0 /* current docking state indicator */
#define OPREGION_SWSCI_OFFSET 0x200
#define OPREGION_ASLE_OFFSET 0x300
#define OPREGION_VBT_OFFSET 0x400
@ -489,6 +491,8 @@ int intel_opregion_setup(struct drm_device *dev)
opregion->header = base;
opregion->vbt = base + OPREGION_VBT_OFFSET;
opregion->lid_state = base + ACPI_CLID;
mboxes = opregion->header->mboxes;
if (mboxes & MBOX_ACPI) {
DRM_DEBUG_DRIVER("Public ACPI methods supported\n");

View File

@ -213,7 +213,6 @@ static void intel_overlay_unmap_regs(struct intel_overlay *overlay,
static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
struct drm_i915_gem_request *request,
bool interruptible,
void (*tail)(struct intel_overlay *))
{
struct drm_device *dev = overlay->dev;
@ -221,16 +220,14 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
int ret;
BUG_ON(overlay->last_flip_req);
ret = i915_add_request(dev, NULL, request, LP_RING(dev_priv));
ret = i915_add_request(LP_RING(dev_priv), NULL, request);
if (ret) {
kfree(request);
return ret;
}
overlay->last_flip_req = request->seqno;
overlay->flip_tail = tail;
ret = i915_do_wait_request(dev,
overlay->last_flip_req, true,
LP_RING(dev_priv));
ret = i915_wait_request(LP_RING(dev_priv), overlay->last_flip_req);
if (ret)
return ret;
@ -256,7 +253,7 @@ i830_activate_pipe_a(struct drm_device *dev)
return 0;
/* most i8xx have pipe a forced on, so don't trust dpms mode */
if (I915_READ(PIPEACONF) & PIPECONF_ENABLE)
if (I915_READ(_PIPEACONF) & PIPECONF_ENABLE)
return 0;
crtc_funcs = crtc->base.helper_private;
@ -322,7 +319,7 @@ static int intel_overlay_on(struct intel_overlay *overlay)
OUT_RING(MI_NOOP);
ADVANCE_LP_RING();
ret = intel_overlay_do_wait_request(overlay, request, true, NULL);
ret = intel_overlay_do_wait_request(overlay, request, NULL);
out:
if (pipe_a_quirk)
i830_deactivate_pipe_a(dev);
@ -364,7 +361,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
OUT_RING(flip_addr);
ADVANCE_LP_RING();
ret = i915_add_request(dev, NULL, request, LP_RING(dev_priv));
ret = i915_add_request(LP_RING(dev_priv), NULL, request);
if (ret) {
kfree(request);
return ret;
@ -401,8 +398,7 @@ static void intel_overlay_off_tail(struct intel_overlay *overlay)
}
/* overlay needs to be disabled in OCMD reg */
static int intel_overlay_off(struct intel_overlay *overlay,
bool interruptible)
static int intel_overlay_off(struct intel_overlay *overlay)
{
struct drm_device *dev = overlay->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@ -437,14 +433,13 @@ static int intel_overlay_off(struct intel_overlay *overlay,
OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
ADVANCE_LP_RING();
return intel_overlay_do_wait_request(overlay, request, interruptible,
return intel_overlay_do_wait_request(overlay, request,
intel_overlay_off_tail);
}
/* recover from an interruption due to a signal
* We have to be careful not to repeat work forever an make forward progess. */
static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
bool interruptible)
static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
{
struct drm_device *dev = overlay->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
@ -453,8 +448,7 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
if (overlay->last_flip_req == 0)
return 0;
ret = i915_do_wait_request(dev, overlay->last_flip_req,
interruptible, LP_RING(dev_priv));
ret = i915_wait_request(LP_RING(dev_priv), overlay->last_flip_req);
if (ret)
return ret;
@ -499,7 +493,7 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
OUT_RING(MI_NOOP);
ADVANCE_LP_RING();
ret = intel_overlay_do_wait_request(overlay, request, true,
ret = intel_overlay_do_wait_request(overlay, request,
intel_overlay_release_old_vid_tail);
if (ret)
return ret;
@ -868,8 +862,7 @@ out_unpin:
return ret;
}
int intel_overlay_switch_off(struct intel_overlay *overlay,
bool interruptible)
int intel_overlay_switch_off(struct intel_overlay *overlay)
{
struct overlay_registers *regs;
struct drm_device *dev = overlay->dev;
@ -878,7 +871,7 @@ int intel_overlay_switch_off(struct intel_overlay *overlay,
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
BUG_ON(!mutex_is_locked(&dev->mode_config.mutex));
ret = intel_overlay_recover_from_interrupt(overlay, interruptible);
ret = intel_overlay_recover_from_interrupt(overlay);
if (ret != 0)
return ret;
@ -893,7 +886,7 @@ int intel_overlay_switch_off(struct intel_overlay *overlay,
regs->OCMD = 0;
intel_overlay_unmap_regs(overlay, regs);
ret = intel_overlay_off(overlay, interruptible);
ret = intel_overlay_off(overlay);
if (ret != 0)
return ret;
@ -1135,7 +1128,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
mutex_lock(&dev->mode_config.mutex);
mutex_lock(&dev->struct_mutex);
ret = intel_overlay_switch_off(overlay, true);
ret = intel_overlay_switch_off(overlay);
mutex_unlock(&dev->struct_mutex);
mutex_unlock(&dev->mode_config.mutex);
@ -1157,7 +1150,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
new_bo = to_intel_bo(drm_gem_object_lookup(dev, file_priv,
put_image_rec->bo_handle));
if (!new_bo) {
if (&new_bo->base == NULL) {
ret = -ENOENT;
goto out_free;
}
@ -1171,13 +1164,13 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
goto out_unlock;
}
ret = intel_overlay_recover_from_interrupt(overlay, true);
ret = intel_overlay_recover_from_interrupt(overlay);
if (ret != 0)
goto out_unlock;
if (overlay->crtc != crtc) {
struct drm_display_mode *mode = &crtc->base.mode;
ret = intel_overlay_switch_off(overlay, true);
ret = intel_overlay_switch_off(overlay);
if (ret != 0)
goto out_unlock;

View File

@ -280,3 +280,28 @@ void intel_panel_setup_backlight(struct drm_device *dev)
dev_priv->backlight_level = intel_panel_get_backlight(dev);
dev_priv->backlight_enabled = dev_priv->backlight_level != 0;
}
enum drm_connector_status
intel_panel_detect(struct drm_device *dev)
{
#if 0
struct drm_i915_private *dev_priv = dev->dev_private;
#endif
if (i915_panel_ignore_lid)
return i915_panel_ignore_lid > 0 ?
connector_status_connected :
connector_status_disconnected;
/* opregion lid state on HP 2540p is wrong at boot up,
* appears to be either the BIOS or Linux ACPI fault */
#if 0
/* Assume that the BIOS does not lie through the OpRegion... */
if (dev_priv->opregion.lid_state)
return ioread32(dev_priv->opregion.lid_state) & 0x1 ?
connector_status_connected :
connector_status_disconnected;
#endif
return connector_status_unknown;
}

View File

@ -62,18 +62,9 @@ render_ring_flush(struct intel_ring_buffer *ring,
u32 flush_domains)
{
struct drm_device *dev = ring->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
u32 cmd;
int ret;
#if WATCH_EXEC
DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
invalidate_domains, flush_domains);
#endif
trace_i915_gem_request_flush(dev, dev_priv->next_seqno,
invalidate_domains, flush_domains);
if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) {
/*
* read/write caches:
@ -122,9 +113,6 @@ render_ring_flush(struct intel_ring_buffer *ring,
(IS_G4X(dev) || IS_GEN5(dev)))
cmd |= MI_INVALIDATE_ISP;
#if WATCH_EXEC
DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
#endif
ret = intel_ring_begin(ring, 2);
if (ret)
return ret;
@ -612,7 +600,6 @@ ring_add_request(struct intel_ring_buffer *ring,
intel_ring_emit(ring, MI_USER_INTERRUPT);
intel_ring_advance(ring);
DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno);
*result = seqno;
return 0;
}
@ -715,11 +702,8 @@ render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
u32 offset, u32 len)
{
struct drm_device *dev = ring->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
int ret;
trace_i915_gem_request_submit(dev, dev_priv->next_seqno + 1);
if (IS_I830(dev) || IS_845G(dev)) {
ret = intel_ring_begin(ring, 4);
if (ret)
@ -894,6 +878,10 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
/* Disable the ring buffer. The ring must be idle at this point */
dev_priv = ring->dev->dev_private;
ret = intel_wait_ring_buffer(ring, ring->size - 8);
if (ret)
DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
ring->name, ret);
I915_WRITE_CTL(ring, 0);
drm_core_ioremapfree(&ring->map, ring->dev);
@ -950,13 +938,13 @@ int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
return 0;
}
trace_i915_ring_wait_begin (dev);
trace_i915_ring_wait_begin(ring);
end = jiffies + 3 * HZ;
do {
ring->head = I915_READ_HEAD(ring);
ring->space = ring_space(ring);
if (ring->space >= n) {
trace_i915_ring_wait_end(dev);
trace_i915_ring_wait_end(ring);
return 0;
}
@ -970,16 +958,20 @@ int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
if (atomic_read(&dev_priv->mm.wedged))
return -EAGAIN;
} while (!time_after(jiffies, end));
trace_i915_ring_wait_end (dev);
trace_i915_ring_wait_end(ring);
return -EBUSY;
}
int intel_ring_begin(struct intel_ring_buffer *ring,
int num_dwords)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
int n = 4*num_dwords;
int ret;
if (unlikely(atomic_read(&dev_priv->mm.wedged)))
return -EIO;
if (unlikely(ring->tail + n > ring->effective_size)) {
ret = intel_wrap_ring_buffer(ring);
if (unlikely(ret))

View File

@ -44,7 +44,7 @@ struct intel_ring_buffer {
RING_BLT = 0x4,
} id;
u32 mmio_base;
void *virtual_start;
void __iomem *virtual_start;
struct drm_device *dev;
struct drm_i915_gem_object *obj;
@ -59,6 +59,7 @@ struct intel_ring_buffer {
u32 irq_refcount;
u32 irq_mask;
u32 irq_seqno; /* last seq seem at irq time */
u32 trace_irq_seqno;
u32 waiting_seqno;
u32 sync_seqno[I915_NUM_RINGS-1];
bool __must_check (*irq_get)(struct intel_ring_buffer *ring);
@ -142,6 +143,26 @@ intel_read_status_page(struct intel_ring_buffer *ring,
return ioread32(ring->status_page.page_addr + reg);
}
/**
* Reads a dword out of the status page, which is written to from the command
* queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
* MI_STORE_DATA_IMM.
*
* The following dwords have a reserved meaning:
* 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
* 0x04: ring 0 head pointer
* 0x05: ring 1 head pointer (915-class)
* 0x06: ring 2 head pointer (915-class)
* 0x10-0x1b: Context status DWords (GM45)
* 0x1f: Last written status offset. (GM45)
*
* The area from dword 0x20 to 0x3ff is available for driver usage.
*/
#define READ_HWSP(dev_priv, reg) intel_read_status_page(LP_RING(dev_priv), reg)
#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX)
#define I915_GEM_HWS_INDEX 0x20
#define I915_BREADCRUMB_INDEX 0x21
void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring);
int __must_check intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n);
int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n);
@ -167,6 +188,12 @@ int intel_init_blt_ring_buffer(struct drm_device *dev);
u32 intel_ring_get_active_head(struct intel_ring_buffer *ring);
void intel_ring_setup_status_page(struct intel_ring_buffer *ring);
static inline void i915_trace_irq_get(struct intel_ring_buffer *ring, u32 seqno)
{
if (ring->trace_irq_seqno == 0 && ring->irq_get(ring))
ring->trace_irq_seqno = seqno;
}
/* DRI warts */
int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size);

View File

@ -92,6 +92,12 @@ struct intel_sdvo {
*/
uint16_t attached_output;
/**
* This is used to select the color range of RBG outputs in HDMI mode.
* It is only valid when using TMDS encoding and 8 bit per color mode.
*/
uint32_t color_range;
/**
* This is set if we're going to treat the device as TV-out.
*
@ -585,6 +591,7 @@ static bool intel_sdvo_get_trained_inputs(struct intel_sdvo *intel_sdvo, bool *i
{
struct intel_sdvo_get_trained_inputs_response response;
BUILD_BUG_ON(sizeof(response) != 1);
if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_TRAINED_INPUTS,
&response, sizeof(response)))
return false;
@ -632,6 +639,7 @@ static bool intel_sdvo_get_input_pixel_clock_range(struct intel_sdvo *intel_sdvo
{
struct intel_sdvo_pixel_clock_range clocks;
BUILD_BUG_ON(sizeof(clocks) != 4);
if (!intel_sdvo_get_value(intel_sdvo,
SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE,
&clocks, sizeof(clocks)))
@ -699,6 +707,8 @@ intel_sdvo_create_preferred_input_timing(struct intel_sdvo *intel_sdvo,
static bool intel_sdvo_get_preferred_input_timing(struct intel_sdvo *intel_sdvo,
struct intel_sdvo_dtd *dtd)
{
BUILD_BUG_ON(sizeof(dtd->part1) != 8);
BUILD_BUG_ON(sizeof(dtd->part2) != 8);
return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1,
&dtd->part1, sizeof(dtd->part1)) &&
intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2,
@ -796,6 +806,7 @@ static bool intel_sdvo_check_supp_encode(struct intel_sdvo *intel_sdvo)
{
struct intel_sdvo_encode encode;
BUILD_BUG_ON(sizeof(encode) != 2);
return intel_sdvo_get_value(intel_sdvo,
SDVO_CMD_GET_SUPP_ENCODE,
&encode, sizeof(encode));
@ -1051,6 +1062,8 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
/* Set the SDVO control regs. */
if (INTEL_INFO(dev)->gen >= 4) {
sdvox = 0;
if (intel_sdvo->is_hdmi)
sdvox |= intel_sdvo->color_range;
if (INTEL_INFO(dev)->gen < 5)
sdvox |= SDVO_BORDER_ENABLE;
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
@ -1162,6 +1175,7 @@ static int intel_sdvo_mode_valid(struct drm_connector *connector,
static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct intel_sdvo_caps *caps)
{
BUILD_BUG_ON(sizeof(*caps) != 8);
if (!intel_sdvo_get_value(intel_sdvo,
SDVO_CMD_GET_DEVICE_CAPS,
caps, sizeof(*caps)))
@ -1268,33 +1282,9 @@ void intel_sdvo_set_hotplug(struct drm_connector *connector, int on)
static bool
intel_sdvo_multifunc_encoder(struct intel_sdvo *intel_sdvo)
{
int caps = 0;
if (intel_sdvo->caps.output_flags &
(SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1))
caps++;
if (intel_sdvo->caps.output_flags &
(SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1))
caps++;
if (intel_sdvo->caps.output_flags &
(SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_SVID1))
caps++;
if (intel_sdvo->caps.output_flags &
(SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_CVBS1))
caps++;
if (intel_sdvo->caps.output_flags &
(SDVO_OUTPUT_YPRPB0 | SDVO_OUTPUT_YPRPB1))
caps++;
if (intel_sdvo->caps.output_flags &
(SDVO_OUTPUT_SCART0 | SDVO_OUTPUT_SCART1))
caps++;
if (intel_sdvo->caps.output_flags &
(SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1))
caps++;
return (caps > 1);
/* Is there more than one type of output? */
int caps = intel_sdvo->caps.output_flags & 0xf;
return caps & -caps;
}
static struct edid *
@ -1482,7 +1472,7 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
* Note! This is in reply order (see loop in get_tv_modes).
* XXX: all 60Hz refresh?
*/
struct drm_display_mode sdvo_tv_modes[] = {
static const struct drm_display_mode sdvo_tv_modes[] = {
{ DRM_MODE("320x200", DRM_MODE_TYPE_DRIVER, 5815, 320, 321, 384,
416, 0, 200, 201, 232, 233, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
@ -1713,6 +1703,7 @@ intel_sdvo_set_property(struct drm_connector *connector,
{
struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
struct drm_i915_private *dev_priv = connector->dev->dev_private;
uint16_t temp_value;
uint8_t cmd;
int ret;
@ -1742,6 +1733,14 @@ intel_sdvo_set_property(struct drm_connector *connector,
goto done;
}
if (property == dev_priv->broadcast_rgb_property) {
if (val == !!intel_sdvo->color_range)
return 0;
intel_sdvo->color_range = val ? SDVO_COLOR_RANGE_16_235 : 0;
goto done;
}
#define CHECK_PROPERTY(name, NAME) \
if (intel_sdvo_connector->name == property) { \
if (intel_sdvo_connector->cur_##name == temp_value) return 0; \
@ -2046,6 +2045,9 @@ intel_sdvo_add_hdmi_properties(struct intel_sdvo_connector *connector)
drm_connector_attach_property(&connector->base.base,
connector->force_audio_property, 0);
}
if (INTEL_INFO(dev)->gen >= 4 && IS_MOBILE(dev))
intel_attach_broadcast_rgb_property(&connector->base.base);
}
static bool
@ -2268,6 +2270,7 @@ static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
if (!intel_sdvo_set_target_output(intel_sdvo, type))
return false;
BUILD_BUG_ON(sizeof(format) != 6);
if (!intel_sdvo_get_value(intel_sdvo,
SDVO_CMD_GET_SUPPORTED_TV_FORMATS,
&format, sizeof(format)))
@ -2474,6 +2477,8 @@ static bool intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo,
uint16_t response;
} enhancements;
BUILD_BUG_ON(sizeof(enhancements) != 2);
enhancements.response = 0;
intel_sdvo_get_value(intel_sdvo,
SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS,

View File

@ -1006,6 +1006,7 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
const struct video_levels *video_levels;
const struct color_conversion *color_conversion;
bool burst_ena;
int pipe = intel_crtc->pipe;
if (!tv_mode)
return; /* can't happen (mode_prepare prevents this) */
@ -1149,14 +1150,11 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
((video_levels->black << TV_BLACK_LEVEL_SHIFT) |
(video_levels->blank << TV_BLANK_LEVEL_SHIFT)));
{
int pipeconf_reg = (intel_crtc->pipe == 0) ?
PIPEACONF : PIPEBCONF;
int dspcntr_reg = (intel_crtc->plane == 0) ?
DSPACNTR : DSPBCNTR;
int pipeconf_reg = PIPECONF(pipe);
int dspcntr_reg = DSPCNTR(pipe);
int pipeconf = I915_READ(pipeconf_reg);
int dspcntr = I915_READ(dspcntr_reg);
int dspbase_reg = (intel_crtc->plane == 0) ?
DSPAADDR : DSPBADDR;
int dspbase_reg = DSPADDR(pipe);
int xpos = 0x0, ypos = 0x0;
unsigned int xsize, ysize;
/* Pipe must be off here */

View File

@ -703,7 +703,7 @@ static int mga_do_pci_dma_bootstrap(struct drm_device *dev,
static int mga_do_dma_bootstrap(struct drm_device *dev,
drm_mga_dma_bootstrap_t *dma_bs)
{
const int is_agp = (dma_bs->agp_mode != 0) && drm_device_is_agp(dev);
const int is_agp = (dma_bs->agp_mode != 0) && drm_pci_device_is_agp(dev);
int err;
drm_mga_private_t *const dev_priv =
(drm_mga_private_t *) dev->dev_private;

View File

@ -75,10 +75,6 @@ static struct drm_driver driver = {
#endif
.llseek = noop_llseek,
},
.pci_driver = {
.name = DRIVER_NAME,
.id_table = pciidlist,
},
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
@ -88,15 +84,20 @@ static struct drm_driver driver = {
.patchlevel = DRIVER_PATCHLEVEL,
};
static struct pci_driver mga_pci_driver = {
.name = DRIVER_NAME,
.id_table = pciidlist,
};
static int __init mga_init(void)
{
driver.num_ioctls = mga_max_ioctl;
return drm_init(&driver);
return drm_pci_init(&driver, &mga_pci_driver);
}
static void __exit mga_exit(void)
{
drm_exit(&driver);
drm_pci_exit(&driver, &mga_pci_driver);
}
module_init(mga_init);

View File

@ -282,7 +282,7 @@ static void still_alive(void)
{
#if 0
sync();
msleep(2);
mdelay(2);
#endif
}
@ -1904,7 +1904,7 @@ init_condition_time(struct nvbios *bios, uint16_t offset,
BIOSLOG(bios, "0x%04X: "
"Condition not met, sleeping for 20ms\n",
offset);
msleep(20);
mdelay(20);
}
}
@ -1938,7 +1938,7 @@ init_ltime(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
BIOSLOG(bios, "0x%04X: Sleeping for 0x%04X milliseconds\n",
offset, time);
msleep(time);
mdelay(time);
return 3;
}
@ -2962,7 +2962,7 @@ init_time(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
if (time < 1000)
udelay(time);
else
msleep((time + 900) / 1000);
mdelay((time + 900) / 1000);
return 3;
}
@ -3856,7 +3856,7 @@ static int call_lvds_manufacturer_script(struct drm_device *dev, struct dcb_entr
if (script == LVDS_PANEL_OFF) {
/* off-on delay in ms */
msleep(ROM16(bios->data[bios->fp.xlated_entry + 7]));
mdelay(ROM16(bios->data[bios->fp.xlated_entry + 7]));
}
#ifdef __powerpc__
/* Powerbook specific quirks */
@ -5950,6 +5950,11 @@ apply_dcb_connector_quirks(struct nvbios *bios, int idx)
}
}
static const u8 hpd_gpio[16] = {
0xff, 0x07, 0x08, 0xff, 0xff, 0x51, 0x52, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0x5e, 0x5f, 0x60,
};
static void
parse_dcb_connector_table(struct nvbios *bios)
{
@ -5986,23 +5991,9 @@ parse_dcb_connector_table(struct nvbios *bios)
cte->type = (cte->entry & 0x000000ff) >> 0;
cte->index2 = (cte->entry & 0x00000f00) >> 8;
switch (cte->entry & 0x00033000) {
case 0x00001000:
cte->gpio_tag = 0x07;
break;
case 0x00002000:
cte->gpio_tag = 0x08;
break;
case 0x00010000:
cte->gpio_tag = 0x51;
break;
case 0x00020000:
cte->gpio_tag = 0x52;
break;
default:
cte->gpio_tag = 0xff;
break;
}
cte->gpio_tag = ffs((cte->entry & 0x07033000) >> 12);
cte->gpio_tag = hpd_gpio[cte->gpio_tag];
if (cte->type == 0xff)
continue;
@ -6702,11 +6693,11 @@ nouveau_bios_run_init_table(struct drm_device *dev, uint16_t table,
struct nvbios *bios = &dev_priv->vbios;
struct init_exec iexec = { true, false };
mutex_lock(&bios->lock);
spin_lock_bh(&bios->lock);
bios->display.output = dcbent;
parse_init_table(bios, table, &iexec);
bios->display.output = NULL;
mutex_unlock(&bios->lock);
spin_unlock_bh(&bios->lock);
}
static bool NVInitVBIOS(struct drm_device *dev)
@ -6715,7 +6706,7 @@ static bool NVInitVBIOS(struct drm_device *dev)
struct nvbios *bios = &dev_priv->vbios;
memset(bios, 0, sizeof(struct nvbios));
mutex_init(&bios->lock);
spin_lock_init(&bios->lock);
bios->dev = dev;
if (!NVShadowVBIOS(dev, bios->data))

View File

@ -251,7 +251,7 @@ struct nvbios {
uint8_t digital_min_front_porch;
bool fp_no_ddc;
struct mutex lock;
spinlock_t lock;
uint8_t data[NV_PROM_SIZE];
unsigned int length;

View File

@ -57,8 +57,8 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
}
static void
nouveau_bo_fixup_align(struct nouveau_bo *nvbo, int *align, int *size,
int *page_shift)
nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
int *align, int *size, int *page_shift)
{
struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
@ -83,7 +83,7 @@ nouveau_bo_fixup_align(struct nouveau_bo *nvbo, int *align, int *size,
}
} else {
if (likely(dev_priv->chan_vm)) {
if (*size > 256 * 1024)
if (!(flags & TTM_PL_FLAG_TT) && *size > 256 * 1024)
*page_shift = dev_priv->chan_vm->lpg_shift;
else
*page_shift = dev_priv->chan_vm->spg_shift;
@ -101,8 +101,7 @@ nouveau_bo_fixup_align(struct nouveau_bo *nvbo, int *align, int *size,
int
nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
int size, int align, uint32_t flags, uint32_t tile_mode,
uint32_t tile_flags, bool no_vm, bool mappable,
struct nouveau_bo **pnvbo)
uint32_t tile_flags, struct nouveau_bo **pnvbo)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_bo *nvbo;
@ -113,16 +112,14 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
return -ENOMEM;
INIT_LIST_HEAD(&nvbo->head);
INIT_LIST_HEAD(&nvbo->entry);
nvbo->mappable = mappable;
nvbo->no_vm = no_vm;
nvbo->tile_mode = tile_mode;
nvbo->tile_flags = tile_flags;
nvbo->bo.bdev = &dev_priv->ttm.bdev;
nouveau_bo_fixup_align(nvbo, &align, &size, &page_shift);
nouveau_bo_fixup_align(nvbo, flags, &align, &size, &page_shift);
align >>= PAGE_SHIFT;
if (!nvbo->no_vm && dev_priv->chan_vm) {
if (dev_priv->chan_vm) {
ret = nouveau_vm_get(dev_priv->chan_vm, size, page_shift,
NV_MEM_ACCESS_RW, &nvbo->vma);
if (ret) {
@ -144,11 +141,8 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
}
nvbo->channel = NULL;
if (nvbo->vma.node) {
if (nvbo->bo.mem.mem_type == TTM_PL_VRAM)
nvbo->bo.offset = nvbo->vma.offset;
}
if (nvbo->vma.node)
nvbo->bo.offset = nvbo->vma.offset;
*pnvbo = nvbo;
return 0;
}
@ -318,11 +312,8 @@ nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
if (ret)
return ret;
if (nvbo->vma.node) {
if (nvbo->bo.mem.mem_type == TTM_PL_VRAM)
nvbo->bo.offset = nvbo->vma.offset;
}
if (nvbo->vma.node)
nvbo->bo.offset = nvbo->vma.offset;
return 0;
}
@ -385,7 +376,8 @@ nouveau_bo_create_ttm_backend_entry(struct ttm_bo_device *bdev)
case NOUVEAU_GART_AGP:
return ttm_agp_backend_init(bdev, dev->agp->bridge);
#endif
case NOUVEAU_GART_SGDMA:
case NOUVEAU_GART_PDMA:
case NOUVEAU_GART_HW:
return nouveau_sgdma_init_ttm(dev);
default:
NV_ERROR(dev, "Unknown GART type %d\n",
@ -431,7 +423,10 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
man->default_caching = TTM_PL_FLAG_WC;
break;
case TTM_PL_TT:
man->func = &ttm_bo_manager_func;
if (dev_priv->card_type >= NV_50)
man->func = &nouveau_gart_manager;
else
man->func = &ttm_bo_manager_func;
switch (dev_priv->gart_info.type) {
case NOUVEAU_GART_AGP:
man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
@ -439,7 +434,8 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
TTM_PL_FLAG_WC;
man->default_caching = TTM_PL_FLAG_WC;
break;
case NOUVEAU_GART_SGDMA:
case NOUVEAU_GART_PDMA:
case NOUVEAU_GART_HW:
man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
TTM_MEMTYPE_FLAG_CMA;
man->available_caching = TTM_PL_MASK_CACHING;
@ -501,45 +497,22 @@ nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
return ret;
}
static inline uint32_t
nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
struct nouveau_channel *chan, struct ttm_mem_reg *mem)
{
struct nouveau_bo *nvbo = nouveau_bo(bo);
if (nvbo->no_vm) {
if (mem->mem_type == TTM_PL_TT)
return NvDmaGART;
return NvDmaVRAM;
}
if (mem->mem_type == TTM_PL_TT)
return chan->gart_handle;
return chan->vram_handle;
}
static int
nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
{
struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
struct nouveau_mem *old_node = old_mem->mm_node;
struct nouveau_mem *new_node = new_mem->mm_node;
struct nouveau_bo *nvbo = nouveau_bo(bo);
u64 src_offset = old_mem->start << PAGE_SHIFT;
u64 dst_offset = new_mem->start << PAGE_SHIFT;
u32 page_count = new_mem->num_pages;
u64 src_offset, dst_offset;
int ret;
if (!nvbo->no_vm) {
if (old_mem->mem_type == TTM_PL_VRAM)
src_offset = nvbo->vma.offset;
else
src_offset += dev_priv->gart_info.aper_base;
if (new_mem->mem_type == TTM_PL_VRAM)
dst_offset = nvbo->vma.offset;
else
dst_offset += dev_priv->gart_info.aper_base;
}
src_offset = old_node->tmp_vma.offset;
if (new_node->tmp_vma.node)
dst_offset = new_node->tmp_vma.offset;
else
dst_offset = nvbo->vma.offset;
page_count = new_mem->num_pages;
while (page_count) {
@ -574,33 +547,18 @@ static int
nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
{
struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
struct nouveau_mem *old_node = old_mem->mm_node;
struct nouveau_mem *new_node = new_mem->mm_node;
struct nouveau_bo *nvbo = nouveau_bo(bo);
u64 length = (new_mem->num_pages << PAGE_SHIFT);
u64 src_offset, dst_offset;
int ret;
src_offset = old_mem->start << PAGE_SHIFT;
dst_offset = new_mem->start << PAGE_SHIFT;
if (!nvbo->no_vm) {
if (old_mem->mem_type == TTM_PL_VRAM)
src_offset = nvbo->vma.offset;
else
src_offset += dev_priv->gart_info.aper_base;
if (new_mem->mem_type == TTM_PL_VRAM)
dst_offset = nvbo->vma.offset;
else
dst_offset += dev_priv->gart_info.aper_base;
}
ret = RING_SPACE(chan, 3);
if (ret)
return ret;
BEGIN_RING(chan, NvSubM2MF, 0x0184, 2);
OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem));
OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem));
src_offset = old_node->tmp_vma.offset;
if (new_node->tmp_vma.node)
dst_offset = new_node->tmp_vma.offset;
else
dst_offset = nvbo->vma.offset;
while (length) {
u32 amount, stride, height;
@ -681,6 +639,15 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
return 0;
}
static inline uint32_t
nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
struct nouveau_channel *chan, struct ttm_mem_reg *mem)
{
if (mem->mem_type == TTM_PL_TT)
return chan->gart_handle;
return chan->vram_handle;
}
static int
nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
@ -734,15 +701,43 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
{
struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
struct nouveau_bo *nvbo = nouveau_bo(bo);
struct ttm_mem_reg *old_mem = &bo->mem;
struct nouveau_channel *chan;
int ret;
chan = nvbo->channel;
if (!chan || nvbo->no_vm) {
if (!chan) {
chan = dev_priv->channel;
mutex_lock_nested(&chan->mutex, NOUVEAU_KCHANNEL_MUTEX);
}
/* create temporary vma for old memory, this will get cleaned
* up after ttm destroys the ttm_mem_reg
*/
if (dev_priv->card_type >= NV_50) {
struct nouveau_mem *node = old_mem->mm_node;
if (!node->tmp_vma.node) {
u32 page_shift = nvbo->vma.node->type;
if (old_mem->mem_type == TTM_PL_TT)
page_shift = nvbo->vma.vm->spg_shift;
ret = nouveau_vm_get(chan->vm,
old_mem->num_pages << PAGE_SHIFT,
page_shift, NV_MEM_ACCESS_RO,
&node->tmp_vma);
if (ret)
goto out;
}
if (old_mem->mem_type == TTM_PL_VRAM)
nouveau_vm_map(&node->tmp_vma, node);
else {
nouveau_vm_map_sg(&node->tmp_vma, 0,
old_mem->num_pages << PAGE_SHIFT,
node, node->pages);
}
}
if (dev_priv->card_type < NV_50)
ret = nv04_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
else
@ -756,6 +751,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
no_wait_gpu, new_mem);
}
out:
if (chan == dev_priv->channel)
mutex_unlock(&chan->mutex);
return ret;
@ -766,6 +762,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
bool no_wait_reserve, bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
struct ttm_placement placement;
struct ttm_mem_reg tmp_mem;
@ -785,7 +782,23 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
if (ret)
goto out;
if (dev_priv->card_type >= NV_50) {
struct nouveau_bo *nvbo = nouveau_bo(bo);
struct nouveau_mem *node = tmp_mem.mm_node;
struct nouveau_vma *vma = &nvbo->vma;
if (vma->node->type != vma->vm->spg_shift)
vma = &node->tmp_vma;
nouveau_vm_map_sg(vma, 0, tmp_mem.num_pages << PAGE_SHIFT,
node, node->pages);
}
ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, &tmp_mem);
if (dev_priv->card_type >= NV_50) {
struct nouveau_bo *nvbo = nouveau_bo(bo);
nouveau_vm_unmap(&nvbo->vma);
}
if (ret)
goto out;
@ -828,6 +841,36 @@ out:
return ret;
}
static void
nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem)
{
struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
struct nouveau_mem *node = new_mem->mm_node;
struct nouveau_bo *nvbo = nouveau_bo(bo);
struct nouveau_vma *vma = &nvbo->vma;
struct nouveau_vm *vm = vma->vm;
if (dev_priv->card_type < NV_50)
return;
switch (new_mem->mem_type) {
case TTM_PL_VRAM:
nouveau_vm_map(vma, node);
break;
case TTM_PL_TT:
if (vma->node->type != vm->spg_shift) {
nouveau_vm_unmap(vma);
vma = &node->tmp_vma;
}
nouveau_vm_map_sg(vma, 0, new_mem->num_pages << PAGE_SHIFT,
node, node->pages);
break;
default:
nouveau_vm_unmap(&nvbo->vma);
break;
}
}
static int
nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
struct nouveau_tile_reg **new_tile)
@ -835,19 +878,13 @@ nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
struct drm_device *dev = dev_priv->dev;
struct nouveau_bo *nvbo = nouveau_bo(bo);
uint64_t offset;
u64 offset = new_mem->start << PAGE_SHIFT;
if (nvbo->no_vm || new_mem->mem_type != TTM_PL_VRAM) {
/* Nothing to do. */
*new_tile = NULL;
*new_tile = NULL;
if (new_mem->mem_type != TTM_PL_VRAM)
return 0;
}
offset = new_mem->start << PAGE_SHIFT;
if (dev_priv->chan_vm) {
nouveau_vm_map(&nvbo->vma, new_mem->mm_node);
} else if (dev_priv->card_type >= NV_10) {
if (dev_priv->card_type >= NV_10) {
*new_tile = nv10_mem_set_tiling(dev, offset, new_mem->size,
nvbo->tile_mode,
nvbo->tile_flags);
@ -864,11 +901,8 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
struct drm_device *dev = dev_priv->dev;
if (dev_priv->card_type >= NV_10 &&
dev_priv->card_type < NV_50) {
nv10_mem_put_tile_region(dev, *old_tile, bo->sync_obj);
*old_tile = new_tile;
}
nv10_mem_put_tile_region(dev, *old_tile, bo->sync_obj);
*old_tile = new_tile;
}
static int
@ -882,9 +916,11 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
struct nouveau_tile_reg *new_tile = NULL;
int ret = 0;
ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile);
if (ret)
return ret;
if (dev_priv->card_type < NV_50) {
ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile);
if (ret)
return ret;
}
/* Fake bo copy. */
if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
@ -915,10 +951,12 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
out:
if (ret)
nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
else
nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
if (dev_priv->card_type < NV_50) {
if (ret)
nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
else
nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
}
return ret;
}
@ -959,7 +997,7 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
break;
case TTM_PL_VRAM:
{
struct nouveau_vram *vram = mem->mm_node;
struct nouveau_mem *node = mem->mm_node;
u8 page_shift;
if (!dev_priv->bar1_vm) {
@ -970,23 +1008,23 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
}
if (dev_priv->card_type == NV_C0)
page_shift = vram->page_shift;
page_shift = node->page_shift;
else
page_shift = 12;
ret = nouveau_vm_get(dev_priv->bar1_vm, mem->bus.size,
page_shift, NV_MEM_ACCESS_RW,
&vram->bar_vma);
&node->bar_vma);
if (ret)
return ret;
nouveau_vm_map(&vram->bar_vma, vram);
nouveau_vm_map(&node->bar_vma, node);
if (ret) {
nouveau_vm_put(&vram->bar_vma);
nouveau_vm_put(&node->bar_vma);
return ret;
}
mem->bus.offset = vram->bar_vma.offset;
mem->bus.offset = node->bar_vma.offset;
if (dev_priv->card_type == NV_50) /*XXX*/
mem->bus.offset -= 0x0020000000ULL;
mem->bus.base = pci_resource_start(dev->pdev, 1);
@ -1003,16 +1041,16 @@ static void
nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
{
struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
struct nouveau_vram *vram = mem->mm_node;
struct nouveau_mem *node = mem->mm_node;
if (!dev_priv->bar1_vm || mem->mem_type != TTM_PL_VRAM)
return;
if (!vram->bar_vma.node)
if (!node->bar_vma.node)
return;
nouveau_vm_unmap(&vram->bar_vma);
nouveau_vm_put(&vram->bar_vma);
nouveau_vm_unmap(&node->bar_vma);
nouveau_vm_put(&node->bar_vma);
}
static int
@ -1062,6 +1100,7 @@ struct ttm_bo_driver nouveau_bo_driver = {
.invalidate_caches = nouveau_bo_invalidate_caches,
.init_mem_type = nouveau_bo_init_mem_type,
.evict_flags = nouveau_bo_evict_flags,
.move_notify = nouveau_bo_move_ntfy,
.move = nouveau_bo_move,
.verify_access = nouveau_bo_verify_access,
.sync_obj_signaled = __nouveau_fence_signalled,

View File

@ -35,7 +35,7 @@ nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan)
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_bo *pb = chan->pushbuf_bo;
struct nouveau_gpuobj *pushbuf = NULL;
int ret;
int ret = 0;
if (dev_priv->card_type >= NV_50) {
if (dev_priv->card_type < NV_C0) {
@ -90,8 +90,7 @@ nouveau_channel_user_pushbuf_alloc(struct drm_device *dev)
else
location = TTM_PL_FLAG_TT;
ret = nouveau_bo_new(dev, NULL, 65536, 0, location, 0, 0x0000, false,
true, &pushbuf);
ret = nouveau_bo_new(dev, NULL, 65536, 0, location, 0, 0x0000, &pushbuf);
if (ret) {
NV_ERROR(dev, "error allocating DMA push buffer: %d\n", ret);
return NULL;

View File

@ -32,6 +32,7 @@
#include "nouveau_hw.h"
#include "nouveau_crtc.h"
#include "nouveau_dma.h"
#include "nv50_display.h"
static void
nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb)
@ -61,18 +62,59 @@ static const struct drm_framebuffer_funcs nouveau_framebuffer_funcs = {
};
int
nouveau_framebuffer_init(struct drm_device *dev, struct nouveau_framebuffer *nouveau_fb,
struct drm_mode_fb_cmd *mode_cmd, struct nouveau_bo *nvbo)
nouveau_framebuffer_init(struct drm_device *dev,
struct nouveau_framebuffer *nv_fb,
struct drm_mode_fb_cmd *mode_cmd,
struct nouveau_bo *nvbo)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct drm_framebuffer *fb = &nv_fb->base;
int ret;
ret = drm_framebuffer_init(dev, &nouveau_fb->base, &nouveau_framebuffer_funcs);
ret = drm_framebuffer_init(dev, fb, &nouveau_framebuffer_funcs);
if (ret) {
return ret;
}
drm_helper_mode_fill_fb_struct(&nouveau_fb->base, mode_cmd);
nouveau_fb->nvbo = nvbo;
drm_helper_mode_fill_fb_struct(fb, mode_cmd);
nv_fb->nvbo = nvbo;
if (dev_priv->card_type >= NV_50) {
u32 tile_flags = nouveau_bo_tile_layout(nvbo);
if (tile_flags == 0x7a00 ||
tile_flags == 0xfe00)
nv_fb->r_dma = NvEvoFB32;
else
if (tile_flags == 0x7000)
nv_fb->r_dma = NvEvoFB16;
else
nv_fb->r_dma = NvEvoVRAM_LP;
switch (fb->depth) {
case 8: nv_fb->r_format = NV50_EVO_CRTC_FB_DEPTH_8; break;
case 15: nv_fb->r_format = NV50_EVO_CRTC_FB_DEPTH_15; break;
case 16: nv_fb->r_format = NV50_EVO_CRTC_FB_DEPTH_16; break;
case 24:
case 32: nv_fb->r_format = NV50_EVO_CRTC_FB_DEPTH_24; break;
case 30: nv_fb->r_format = NV50_EVO_CRTC_FB_DEPTH_30; break;
default:
NV_ERROR(dev, "unknown depth %d\n", fb->depth);
return -EINVAL;
}
if (dev_priv->chipset == 0x50)
nv_fb->r_format |= (tile_flags << 8);
if (!tile_flags)
nv_fb->r_pitch = 0x00100000 | fb->pitch;
else {
u32 mode = nvbo->tile_mode;
if (dev_priv->card_type >= NV_C0)
mode >>= 4;
nv_fb->r_pitch = ((fb->pitch / 4) << 4) | mode;
}
}
return 0;
}
@ -182,6 +224,7 @@ nouveau_page_flip_emit(struct nouveau_channel *chan,
struct nouveau_page_flip_state *s,
struct nouveau_fence **pfence)
{
struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
struct drm_device *dev = chan->dev;
unsigned long flags;
int ret;
@ -201,9 +244,12 @@ nouveau_page_flip_emit(struct nouveau_channel *chan,
if (ret)
goto fail;
BEGIN_RING(chan, NvSubSw, NV_SW_PAGE_FLIP, 1);
OUT_RING(chan, 0);
FIRE_RING(chan);
if (dev_priv->card_type < NV_C0)
BEGIN_RING(chan, NvSubSw, NV_SW_PAGE_FLIP, 1);
else
BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0500, 1);
OUT_RING (chan, 0);
FIRE_RING (chan);
ret = nouveau_fence_new(chan, pfence, true);
if (ret)
@ -244,7 +290,7 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
/* Initialize a page flip struct */
*s = (struct nouveau_page_flip_state)
{ { }, s->event, nouveau_crtc(crtc)->index,
{ { }, event, nouveau_crtc(crtc)->index,
fb->bits_per_pixel, fb->pitch, crtc->x, crtc->y,
new_bo->bo.offset };
@ -255,6 +301,14 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
mutex_lock(&chan->mutex);
/* Emit a page flip */
if (dev_priv->card_type >= NV_50) {
ret = nv50_display_flip_next(crtc, fb, chan);
if (ret) {
nouveau_channel_put(&chan);
goto fail_unreserve;
}
}
ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence);
nouveau_channel_put(&chan);
if (ret)
@ -305,7 +359,8 @@ nouveau_finish_page_flip(struct nouveau_channel *chan,
}
list_del(&s->head);
*ps = *s;
if (ps)
*ps = *s;
kfree(s);
spin_unlock_irqrestore(&dev->event_lock, flags);

View File

@ -97,13 +97,15 @@ nouveau_dma_init(struct nouveau_channel *chan)
OUT_RING(chan, 0);
/* Initialise NV_MEMORY_TO_MEMORY_FORMAT */
ret = RING_SPACE(chan, 4);
ret = RING_SPACE(chan, 6);
if (ret)
return ret;
BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NAME, 1);
OUT_RING(chan, NvM2MF);
BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_NOTIFY, 1);
OUT_RING(chan, NvNotify0);
OUT_RING (chan, NvM2MF);
BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_NOTIFY, 3);
OUT_RING (chan, NvNotify0);
OUT_RING (chan, chan->vram_handle);
OUT_RING (chan, chan->gart_handle);
/* Sit back and pray the channel works.. */
FIRE_RING(chan);

View File

@ -61,8 +61,6 @@ enum {
NvM2MF = 0x80000001,
NvDmaFB = 0x80000002,
NvDmaTT = 0x80000003,
NvDmaVRAM = 0x80000004,
NvDmaGART = 0x80000005,
NvNotify0 = 0x80000006,
Nv2D = 0x80000007,
NvCtxSurf2D = 0x80000008,
@ -73,12 +71,15 @@ enum {
NvImageBlit = 0x8000000d,
NvSw = 0x8000000e,
NvSema = 0x8000000f,
NvEvoSema0 = 0x80000010,
NvEvoSema1 = 0x80000011,
/* G80+ display objects */
NvEvoVRAM = 0x01000000,
NvEvoFB16 = 0x01000001,
NvEvoFB32 = 0x01000002,
NvEvoVRAM_LP = 0x01000003
NvEvoVRAM_LP = 0x01000003,
NvEvoSync = 0xcafe0000
};
#define NV_MEMORY_TO_MEMORY_FORMAT 0x00000039

View File

@ -175,7 +175,6 @@ nouveau_dp_link_train_adjust(struct drm_encoder *encoder, uint8_t *config)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct drm_device *dev = encoder->dev;
struct bit_displayport_encoder_table_entry *dpse;
struct bit_displayport_encoder_table *dpe;
int ret, i, dpe_headerlen, vs = 0, pre = 0;
uint8_t request[2];
@ -183,7 +182,6 @@ nouveau_dp_link_train_adjust(struct drm_encoder *encoder, uint8_t *config)
dpe = nouveau_bios_dp_table(dev, nv_encoder->dcb, &dpe_headerlen);
if (!dpe)
return false;
dpse = (void *)((char *)dpe + dpe_headerlen);
ret = auxch_rd(encoder, DP_ADJUST_REQUEST_LANE0_1, request, 2);
if (ret)

View File

@ -408,14 +408,6 @@ static struct drm_driver driver = {
#endif
.llseek = noop_llseek,
},
.pci_driver = {
.name = DRIVER_NAME,
.id_table = pciidlist,
.probe = nouveau_pci_probe,
.remove = nouveau_pci_remove,
.suspend = nouveau_pci_suspend,
.resume = nouveau_pci_resume
},
.gem_init_object = nouveau_gem_object_new,
.gem_free_object = nouveau_gem_object_del,
@ -432,6 +424,15 @@ static struct drm_driver driver = {
.patchlevel = DRIVER_PATCHLEVEL,
};
static struct pci_driver nouveau_pci_driver = {
.name = DRIVER_NAME,
.id_table = pciidlist,
.probe = nouveau_pci_probe,
.remove = nouveau_pci_remove,
.suspend = nouveau_pci_suspend,
.resume = nouveau_pci_resume
};
static int __init nouveau_init(void)
{
driver.num_ioctls = nouveau_max_ioctl;
@ -449,7 +450,7 @@ static int __init nouveau_init(void)
return 0;
nouveau_register_dsm_handler();
return drm_init(&driver);
return drm_pci_init(&driver, &nouveau_pci_driver);
}
static void __exit nouveau_exit(void)
@ -457,7 +458,7 @@ static void __exit nouveau_exit(void)
if (!nouveau_modeset)
return;
drm_exit(&driver);
drm_pci_exit(&driver, &nouveau_pci_driver);
nouveau_unregister_dsm_handler();
}

View File

@ -57,7 +57,7 @@ struct nouveau_fpriv {
#include "nouveau_util.h"
struct nouveau_grctx;
struct nouveau_vram;
struct nouveau_mem;
#include "nouveau_vm.h"
#define MAX_NUM_DCB_ENTRIES 16
@ -65,13 +65,16 @@ struct nouveau_vram;
#define NOUVEAU_MAX_CHANNEL_NR 128
#define NOUVEAU_MAX_TILE_NR 15
struct nouveau_vram {
struct nouveau_mem {
struct drm_device *dev;
struct nouveau_vma bar_vma;
struct nouveau_vma tmp_vma;
u8 page_shift;
struct drm_mm_node *tag;
struct list_head regions;
dma_addr_t *pages;
u32 memtype;
u64 offset;
u64 size;
@ -90,6 +93,7 @@ struct nouveau_tile_reg {
struct nouveau_bo {
struct ttm_buffer_object bo;
struct ttm_placement placement;
u32 valid_domains;
u32 placements[3];
u32 busy_placements[3];
struct ttm_bo_kmap_obj kmap;
@ -104,8 +108,6 @@ struct nouveau_bo {
struct nouveau_channel *channel;
struct nouveau_vma vma;
bool mappable;
bool no_vm;
uint32_t tile_mode;
uint32_t tile_flags;
@ -387,6 +389,7 @@ struct nouveau_pgraph_engine {
};
struct nouveau_display_engine {
void *priv;
int (*early_init)(struct drm_device *);
void (*late_takedown)(struct drm_device *);
int (*create)(struct drm_device *);
@ -509,8 +512,8 @@ struct nouveau_crypt_engine {
struct nouveau_vram_engine {
int (*init)(struct drm_device *);
int (*get)(struct drm_device *, u64, u32 align, u32 size_nc,
u32 type, struct nouveau_vram **);
void (*put)(struct drm_device *, struct nouveau_vram **);
u32 type, struct nouveau_mem **);
void (*put)(struct drm_device *, struct nouveau_mem **);
bool (*flags_valid)(struct drm_device *, u32 tile_flags);
};
@ -652,8 +655,6 @@ struct drm_nouveau_private {
/* interrupt handling */
void (*irq_handler[32])(struct drm_device *);
bool msi_enabled;
struct workqueue_struct *wq;
struct work_struct irq_work;
struct list_head vbl_waiting;
@ -691,15 +692,22 @@ struct drm_nouveau_private {
struct {
enum {
NOUVEAU_GART_NONE = 0,
NOUVEAU_GART_AGP,
NOUVEAU_GART_SGDMA
NOUVEAU_GART_AGP, /* AGP */
NOUVEAU_GART_PDMA, /* paged dma object */
NOUVEAU_GART_HW /* on-chip gart/vm */
} type;
uint64_t aper_base;
uint64_t aper_size;
uint64_t aper_free;
struct ttm_backend_func *func;
struct {
struct page *page;
dma_addr_t addr;
} dummy;
struct nouveau_gpuobj *sg_ctxdma;
struct nouveau_vma vma;
} gart_info;
/* nv10-nv40 tiling regions */
@ -740,14 +748,6 @@ struct drm_nouveau_private {
struct backlight_device *backlight;
struct nouveau_channel *evo;
u32 evo_alloc;
struct {
struct dcb_entry *dcb;
u16 script;
u32 pclk;
} evo_irq;
struct {
struct dentry *channel_root;
} debugfs;
@ -847,6 +847,7 @@ extern void nv10_mem_put_tile_region(struct drm_device *dev,
struct nouveau_tile_reg *tile,
struct nouveau_fence *fence);
extern const struct ttm_mem_type_manager_func nouveau_vram_manager;
extern const struct ttm_mem_type_manager_func nouveau_gart_manager;
/* nouveau_notifier.c */
extern int nouveau_notifier_init_channel(struct nouveau_channel *);
@ -1076,7 +1077,7 @@ extern void nv40_fb_set_tile_region(struct drm_device *dev, int i);
/* nv50_fb.c */
extern int nv50_fb_init(struct drm_device *);
extern void nv50_fb_takedown(struct drm_device *);
extern void nv50_fb_vm_trap(struct drm_device *, int display, const char *);
extern void nv50_fb_vm_trap(struct drm_device *, int display);
/* nvc0_fb.c */
extern int nvc0_fb_init(struct drm_device *);
@ -1295,7 +1296,7 @@ extern struct ttm_bo_driver nouveau_bo_driver;
extern int nouveau_bo_new(struct drm_device *, struct nouveau_channel *,
int size, int align, uint32_t flags,
uint32_t tile_mode, uint32_t tile_flags,
bool no_vm, bool mappable, struct nouveau_bo **);
struct nouveau_bo **);
extern int nouveau_bo_pin(struct nouveau_bo *, uint32_t flags);
extern int nouveau_bo_unpin(struct nouveau_bo *);
extern int nouveau_bo_map(struct nouveau_bo *);
@ -1356,9 +1357,9 @@ static inline struct nouveau_fence *nouveau_fence_ref(struct nouveau_fence *obj)
/* nouveau_gem.c */
extern int nouveau_gem_new(struct drm_device *, struct nouveau_channel *,
int size, int align, uint32_t flags,
int size, int align, uint32_t domain,
uint32_t tile_mode, uint32_t tile_flags,
bool no_vm, bool mappable, struct nouveau_bo **);
struct nouveau_bo **);
extern int nouveau_gem_object_new(struct drm_gem_object *);
extern void nouveau_gem_object_del(struct drm_gem_object *);
extern int nouveau_gem_ioctl_new(struct drm_device *, void *,

View File

@ -30,6 +30,9 @@
struct nouveau_framebuffer {
struct drm_framebuffer base;
struct nouveau_bo *nvbo;
u32 r_dma;
u32 r_format;
u32 r_pitch;
};
static inline struct nouveau_framebuffer *

View File

@ -296,8 +296,8 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
size = mode_cmd.pitch * mode_cmd.height;
size = roundup(size, PAGE_SIZE);
ret = nouveau_gem_new(dev, dev_priv->channel, size, 0, TTM_PL_FLAG_VRAM,
0, 0x0000, false, true, &nvbo);
ret = nouveau_gem_new(dev, dev_priv->channel, size, 0,
NOUVEAU_GEM_DOMAIN_VRAM, 0, 0x0000, &nvbo);
if (ret) {
NV_ERROR(dev, "failed to allocate framebuffer\n");
goto out;

View File

@ -27,13 +27,15 @@
#include "drmP.h"
#include "drm.h"
#include <linux/ktime.h>
#include <linux/hrtimer.h>
#include "nouveau_drv.h"
#include "nouveau_ramht.h"
#include "nouveau_dma.h"
#define USE_REFCNT(dev) (nouveau_private(dev)->chipset >= 0x10)
#define USE_SEMA(dev) (nouveau_private(dev)->chipset >= 0x17 && \
nouveau_private(dev)->card_type < NV_C0)
#define USE_SEMA(dev) (nouveau_private(dev)->chipset >= 0x17)
struct nouveau_fence {
struct nouveau_channel *channel;
@ -230,7 +232,8 @@ int
__nouveau_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr)
{
unsigned long timeout = jiffies + (3 * DRM_HZ);
unsigned long sleep_time = jiffies + 1;
unsigned long sleep_time = NSEC_PER_MSEC / 1000;
ktime_t t;
int ret = 0;
while (1) {
@ -244,8 +247,13 @@ __nouveau_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr)
__set_current_state(intr ? TASK_INTERRUPTIBLE
: TASK_UNINTERRUPTIBLE);
if (lazy && time_after_eq(jiffies, sleep_time))
schedule_timeout(1);
if (lazy) {
t = ktime_set(0, sleep_time);
schedule_hrtimeout(&t, HRTIMER_MODE_REL);
sleep_time *= 2;
if (sleep_time > NSEC_PER_MSEC)
sleep_time = NSEC_PER_MSEC;
}
if (intr && signal_pending(current)) {
ret = -ERESTARTSYS;
@ -259,11 +267,12 @@ __nouveau_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr)
}
static struct nouveau_semaphore *
alloc_semaphore(struct drm_device *dev)
semaphore_alloc(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_semaphore *sema;
int ret;
int size = (dev_priv->chipset < 0x84) ? 4 : 16;
int ret, i;
if (!USE_SEMA(dev))
return NULL;
@ -277,9 +286,9 @@ alloc_semaphore(struct drm_device *dev)
goto fail;
spin_lock(&dev_priv->fence.lock);
sema->mem = drm_mm_search_free(&dev_priv->fence.heap, 4, 0, 0);
sema->mem = drm_mm_search_free(&dev_priv->fence.heap, size, 0, 0);
if (sema->mem)
sema->mem = drm_mm_get_block_atomic(sema->mem, 4, 0);
sema->mem = drm_mm_get_block_atomic(sema->mem, size, 0);
spin_unlock(&dev_priv->fence.lock);
if (!sema->mem)
@ -287,7 +296,8 @@ alloc_semaphore(struct drm_device *dev)
kref_init(&sema->ref);
sema->dev = dev;
nouveau_bo_wr32(dev_priv->fence.bo, sema->mem->start / 4, 0);
for (i = sema->mem->start; i < sema->mem->start + size; i += 4)
nouveau_bo_wr32(dev_priv->fence.bo, i / 4, 0);
return sema;
fail:
@ -296,7 +306,7 @@ fail:
}
static void
free_semaphore(struct kref *ref)
semaphore_free(struct kref *ref)
{
struct nouveau_semaphore *sema =
container_of(ref, struct nouveau_semaphore, ref);
@ -318,61 +328,51 @@ semaphore_work(void *priv, bool signalled)
if (unlikely(!signalled))
nouveau_bo_wr32(dev_priv->fence.bo, sema->mem->start / 4, 1);
kref_put(&sema->ref, free_semaphore);
kref_put(&sema->ref, semaphore_free);
}
static int
emit_semaphore(struct nouveau_channel *chan, int method,
struct nouveau_semaphore *sema)
semaphore_acquire(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
{
struct drm_nouveau_private *dev_priv = sema->dev->dev_private;
struct nouveau_fence *fence;
bool smart = (dev_priv->card_type >= NV_50);
struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
struct nouveau_fence *fence = NULL;
int ret;
ret = RING_SPACE(chan, smart ? 8 : 4);
if (ret)
return ret;
if (dev_priv->chipset < 0x84) {
ret = RING_SPACE(chan, 3);
if (ret)
return ret;
if (smart) {
BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 1);
OUT_RING(chan, NvSema);
}
BEGIN_RING(chan, NvSubSw, NV_SW_SEMAPHORE_OFFSET, 1);
OUT_RING(chan, sema->mem->start);
BEGIN_RING(chan, NvSubSw, NV_SW_SEMAPHORE_OFFSET, 2);
OUT_RING (chan, sema->mem->start);
OUT_RING (chan, 1);
} else
if (dev_priv->chipset < 0xc0) {
struct nouveau_vma *vma = &dev_priv->fence.bo->vma;
u64 offset = vma->offset + sema->mem->start;
if (smart && method == NV_SW_SEMAPHORE_ACQUIRE) {
/*
* NV50 tries to be too smart and context-switch
* between semaphores instead of doing a "first come,
* first served" strategy like previous cards
* do.
*
* That's bad because the ACQUIRE latency can get as
* large as the PFIFO context time slice in the
* typical DRI2 case where you have several
* outstanding semaphores at the same moment.
*
* If we're going to ACQUIRE, force the card to
* context switch before, just in case the matching
* RELEASE is already scheduled to be executed in
* another channel.
*/
BEGIN_RING(chan, NvSubSw, NV_SW_YIELD, 1);
OUT_RING(chan, 0);
}
ret = RING_SPACE(chan, 5);
if (ret)
return ret;
BEGIN_RING(chan, NvSubSw, method, 1);
OUT_RING(chan, 1);
BEGIN_RING(chan, NvSubSw, 0x0010, 4);
OUT_RING (chan, upper_32_bits(offset));
OUT_RING (chan, lower_32_bits(offset));
OUT_RING (chan, 1);
OUT_RING (chan, 1); /* ACQUIRE_EQ */
} else {
struct nouveau_vma *vma = &dev_priv->fence.bo->vma;
u64 offset = vma->offset + sema->mem->start;
if (smart && method == NV_SW_SEMAPHORE_RELEASE) {
/*
* Force the card to context switch, there may be
* another channel waiting for the semaphore we just
* released.
*/
BEGIN_RING(chan, NvSubSw, NV_SW_YIELD, 1);
OUT_RING(chan, 0);
ret = RING_SPACE(chan, 5);
if (ret)
return ret;
BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0010, 4);
OUT_RING (chan, upper_32_bits(offset));
OUT_RING (chan, lower_32_bits(offset));
OUT_RING (chan, 1);
OUT_RING (chan, 0x1001); /* ACQUIRE_EQ */
}
/* Delay semaphore destruction until its work is done */
@ -383,7 +383,62 @@ emit_semaphore(struct nouveau_channel *chan, int method,
kref_get(&sema->ref);
nouveau_fence_work(fence, semaphore_work, sema);
nouveau_fence_unref(&fence);
return 0;
}
static int
semaphore_release(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
{
struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
struct nouveau_fence *fence = NULL;
int ret;
if (dev_priv->chipset < 0x84) {
ret = RING_SPACE(chan, 4);
if (ret)
return ret;
BEGIN_RING(chan, NvSubSw, NV_SW_SEMAPHORE_OFFSET, 1);
OUT_RING (chan, sema->mem->start);
BEGIN_RING(chan, NvSubSw, NV_SW_SEMAPHORE_RELEASE, 1);
OUT_RING (chan, 1);
} else
if (dev_priv->chipset < 0xc0) {
struct nouveau_vma *vma = &dev_priv->fence.bo->vma;
u64 offset = vma->offset + sema->mem->start;
ret = RING_SPACE(chan, 5);
if (ret)
return ret;
BEGIN_RING(chan, NvSubSw, 0x0010, 4);
OUT_RING (chan, upper_32_bits(offset));
OUT_RING (chan, lower_32_bits(offset));
OUT_RING (chan, 1);
OUT_RING (chan, 2); /* RELEASE */
} else {
struct nouveau_vma *vma = &dev_priv->fence.bo->vma;
u64 offset = vma->offset + sema->mem->start;
ret = RING_SPACE(chan, 5);
if (ret)
return ret;
BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0010, 4);
OUT_RING (chan, upper_32_bits(offset));
OUT_RING (chan, lower_32_bits(offset));
OUT_RING (chan, 1);
OUT_RING (chan, 0x1002); /* RELEASE */
}
/* Delay semaphore destruction until its work is done */
ret = nouveau_fence_new(chan, &fence, true);
if (ret)
return ret;
kref_get(&sema->ref);
nouveau_fence_work(fence, semaphore_work, sema);
nouveau_fence_unref(&fence);
return 0;
}
@ -400,7 +455,7 @@ nouveau_fence_sync(struct nouveau_fence *fence,
nouveau_fence_signalled(fence)))
goto out;
sema = alloc_semaphore(dev);
sema = semaphore_alloc(dev);
if (!sema) {
/* Early card or broken userspace, fall back to
* software sync. */
@ -418,17 +473,17 @@ nouveau_fence_sync(struct nouveau_fence *fence,
}
/* Make wchan wait until it gets signalled */
ret = emit_semaphore(wchan, NV_SW_SEMAPHORE_ACQUIRE, sema);
ret = semaphore_acquire(wchan, sema);
if (ret)
goto out_unlock;
/* Signal the semaphore from chan */
ret = emit_semaphore(chan, NV_SW_SEMAPHORE_RELEASE, sema);
ret = semaphore_release(chan, sema);
out_unlock:
mutex_unlock(&chan->mutex);
out_unref:
kref_put(&sema->ref, free_semaphore);
kref_put(&sema->ref, semaphore_free);
out:
if (chan)
nouveau_channel_put_unlocked(&chan);
@ -449,22 +504,23 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
struct nouveau_gpuobj *obj = NULL;
int ret;
if (dev_priv->card_type >= NV_C0)
goto out_initialised;
/* Create an NV_SW object for various sync purposes */
ret = nouveau_gpuobj_gr_new(chan, NvSw, NV_SW);
if (ret)
return ret;
/* we leave subchannel empty for nvc0 */
if (dev_priv->card_type < NV_C0) {
ret = RING_SPACE(chan, 2);
if (ret)
return ret;
BEGIN_RING(chan, NvSubSw, 0, 1);
OUT_RING(chan, NvSw);
}
ret = RING_SPACE(chan, 2);
if (ret)
return ret;
BEGIN_RING(chan, NvSubSw, 0, 1);
OUT_RING(chan, NvSw);
/* Create a DMA object for the shared cross-channel sync area. */
if (USE_SEMA(dev)) {
if (USE_SEMA(dev) && dev_priv->chipset < 0x84) {
struct ttm_mem_reg *mem = &dev_priv->fence.bo->bo.mem;
ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
@ -484,14 +540,20 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
return ret;
BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 1);
OUT_RING(chan, NvSema);
} else {
ret = RING_SPACE(chan, 2);
if (ret)
return ret;
BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 1);
OUT_RING (chan, chan->vram_handle); /* whole VM */
}
FIRE_RING(chan);
out_initialised:
INIT_LIST_HEAD(&chan->fence.pending);
spin_lock_init(&chan->fence.lock);
atomic_set(&chan->fence.last_sequence_irq, 0);
return 0;
}
@ -519,12 +581,13 @@ int
nouveau_fence_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
int size = (dev_priv->chipset < 0x84) ? 4096 : 16384;
int ret;
/* Create a shared VRAM heap for cross-channel sync. */
if (USE_SEMA(dev)) {
ret = nouveau_bo_new(dev, NULL, 4096, 0, TTM_PL_FLAG_VRAM,
0, 0, false, true, &dev_priv->fence.bo);
ret = nouveau_bo_new(dev, NULL, size, 0, TTM_PL_FLAG_VRAM,
0, 0, &dev_priv->fence.bo);
if (ret)
return ret;

View File

@ -61,19 +61,36 @@ nouveau_gem_object_del(struct drm_gem_object *gem)
int
nouveau_gem_new(struct drm_device *dev, struct nouveau_channel *chan,
int size, int align, uint32_t flags, uint32_t tile_mode,
uint32_t tile_flags, bool no_vm, bool mappable,
struct nouveau_bo **pnvbo)
int size, int align, uint32_t domain, uint32_t tile_mode,
uint32_t tile_flags, struct nouveau_bo **pnvbo)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_bo *nvbo;
u32 flags = 0;
int ret;
if (domain & NOUVEAU_GEM_DOMAIN_VRAM)
flags |= TTM_PL_FLAG_VRAM;
if (domain & NOUVEAU_GEM_DOMAIN_GART)
flags |= TTM_PL_FLAG_TT;
if (!flags || domain & NOUVEAU_GEM_DOMAIN_CPU)
flags |= TTM_PL_FLAG_SYSTEM;
ret = nouveau_bo_new(dev, chan, size, align, flags, tile_mode,
tile_flags, no_vm, mappable, pnvbo);
tile_flags, pnvbo);
if (ret)
return ret;
nvbo = *pnvbo;
/* we restrict allowed domains on nv50+ to only the types
* that were requested at creation time. not possibly on
* earlier chips without busting the ABI.
*/
nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM |
NOUVEAU_GEM_DOMAIN_GART;
if (dev_priv->card_type >= NV_50)
nvbo->valid_domains &= domain;
nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size);
if (!nvbo->gem) {
nouveau_bo_ref(NULL, pnvbo);
@ -97,7 +114,7 @@ nouveau_gem_info(struct drm_gem_object *gem, struct drm_nouveau_gem_info *rep)
rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
rep->offset = nvbo->bo.offset;
rep->map_handle = nvbo->mappable ? nvbo->bo.addr_space_offset : 0;
rep->map_handle = nvbo->bo.addr_space_offset;
rep->tile_mode = nvbo->tile_mode;
rep->tile_flags = nvbo->tile_flags;
return 0;
@ -111,19 +128,11 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
struct drm_nouveau_gem_new *req = data;
struct nouveau_bo *nvbo = NULL;
struct nouveau_channel *chan = NULL;
uint32_t flags = 0;
int ret = 0;
if (unlikely(dev_priv->ttm.bdev.dev_mapping == NULL))
dev_priv->ttm.bdev.dev_mapping = dev_priv->dev->dev_mapping;
if (req->info.domain & NOUVEAU_GEM_DOMAIN_VRAM)
flags |= TTM_PL_FLAG_VRAM;
if (req->info.domain & NOUVEAU_GEM_DOMAIN_GART)
flags |= TTM_PL_FLAG_TT;
if (!flags || req->info.domain & NOUVEAU_GEM_DOMAIN_CPU)
flags |= TTM_PL_FLAG_SYSTEM;
if (!dev_priv->engine.vram.flags_valid(dev, req->info.tile_flags)) {
NV_ERROR(dev, "bad page flags: 0x%08x\n", req->info.tile_flags);
return -EINVAL;
@ -135,10 +144,9 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
return PTR_ERR(chan);
}
ret = nouveau_gem_new(dev, chan, req->info.size, req->align, flags,
req->info.tile_mode, req->info.tile_flags, false,
(req->info.domain & NOUVEAU_GEM_DOMAIN_MAPPABLE),
&nvbo);
ret = nouveau_gem_new(dev, chan, req->info.size, req->align,
req->info.domain, req->info.tile_mode,
req->info.tile_flags, &nvbo);
if (chan)
nouveau_channel_put(&chan);
if (ret)
@ -161,7 +169,7 @@ nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains,
{
struct nouveau_bo *nvbo = gem->driver_private;
struct ttm_buffer_object *bo = &nvbo->bo;
uint32_t domains = valid_domains &
uint32_t domains = valid_domains & nvbo->valid_domains &
(write_domains ? write_domains : read_domains);
uint32_t pref_flags = 0, valid_flags = 0;
@ -592,7 +600,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
if (push[i].bo_index >= req->nr_buffers) {
NV_ERROR(dev, "push %d buffer not in list\n", i);
ret = -EINVAL;
goto out;
goto out_prevalid;
}
bo[push[i].bo_index].read_domains |= (1 << 31);
@ -604,7 +612,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
if (ret) {
if (ret != -ERESTARTSYS)
NV_ERROR(dev, "validate: %d\n", ret);
goto out;
goto out_prevalid;
}
/* Apply any relocations that are required */
@ -697,6 +705,8 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
out:
validate_fini(&op, fence);
nouveau_fence_unref(&fence);
out_prevalid:
kfree(bo);
kfree(push);

View File

@ -152,7 +152,6 @@ nouveau_mem_vram_fini(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
nouveau_bo_unpin(dev_priv->vga_ram);
nouveau_bo_ref(NULL, &dev_priv->vga_ram);
ttm_bo_device_release(&dev_priv->ttm.bdev);
@ -393,11 +392,17 @@ nouveau_mem_vram_init(struct drm_device *dev)
struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
int ret, dma_bits;
if (dev_priv->card_type >= NV_50 &&
pci_dma_supported(dev->pdev, DMA_BIT_MASK(40)))
dma_bits = 40;
else
dma_bits = 32;
dma_bits = 32;
if (dev_priv->card_type >= NV_50) {
if (pci_dma_supported(dev->pdev, DMA_BIT_MASK(40)))
dma_bits = 40;
} else
if (drm_pci_device_is_pcie(dev) &&
dev_priv->chipset != 0x40 &&
dev_priv->chipset != 0x45) {
if (pci_dma_supported(dev->pdev, DMA_BIT_MASK(39)))
dma_bits = 39;
}
ret = pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(dma_bits));
if (ret)
@ -419,14 +424,32 @@ nouveau_mem_vram_init(struct drm_device *dev)
}
/* reserve space at end of VRAM for PRAMIN */
if (dev_priv->chipset == 0x40 || dev_priv->chipset == 0x47 ||
dev_priv->chipset == 0x49 || dev_priv->chipset == 0x4b)
dev_priv->ramin_rsvd_vram = (2 * 1024 * 1024);
else
if (dev_priv->card_type >= NV_40)
dev_priv->ramin_rsvd_vram = (1 * 1024 * 1024);
else
dev_priv->ramin_rsvd_vram = (512 * 1024);
if (dev_priv->card_type >= NV_50) {
dev_priv->ramin_rsvd_vram = 1 * 1024 * 1024;
} else
if (dev_priv->card_type >= NV_40) {
u32 vs = hweight8((nv_rd32(dev, 0x001540) & 0x0000ff00) >> 8);
u32 rsvd;
/* estimate grctx size, the magics come from nv40_grctx.c */
if (dev_priv->chipset == 0x40) rsvd = 0x6aa0 * vs;
else if (dev_priv->chipset < 0x43) rsvd = 0x4f00 * vs;
else if (nv44_graph_class(dev)) rsvd = 0x4980 * vs;
else rsvd = 0x4a40 * vs;
rsvd += 16 * 1024;
rsvd *= dev_priv->engine.fifo.channels;
/* pciegart table */
if (drm_pci_device_is_pcie(dev))
rsvd += 512 * 1024;
/* object storage */
rsvd += 512 * 1024;
dev_priv->ramin_rsvd_vram = round_up(rsvd, 4096);
} else {
dev_priv->ramin_rsvd_vram = 512 * 1024;
}
ret = dev_priv->engine.vram.init(dev);
if (ret)
@ -455,13 +478,17 @@ nouveau_mem_vram_init(struct drm_device *dev)
return ret;
}
ret = nouveau_bo_new(dev, NULL, 256*1024, 0, TTM_PL_FLAG_VRAM,
0, 0, true, true, &dev_priv->vga_ram);
if (ret == 0)
ret = nouveau_bo_pin(dev_priv->vga_ram, TTM_PL_FLAG_VRAM);
if (ret) {
NV_WARN(dev, "failed to reserve VGA memory\n");
nouveau_bo_ref(NULL, &dev_priv->vga_ram);
if (dev_priv->card_type < NV_50) {
ret = nouveau_bo_new(dev, NULL, 256*1024, 0, TTM_PL_FLAG_VRAM,
0, 0, &dev_priv->vga_ram);
if (ret == 0)
ret = nouveau_bo_pin(dev_priv->vga_ram,
TTM_PL_FLAG_VRAM);
if (ret) {
NV_WARN(dev, "failed to reserve VGA memory\n");
nouveau_bo_ref(NULL, &dev_priv->vga_ram);
}
}
dev_priv->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 1),
@ -480,7 +507,7 @@ nouveau_mem_gart_init(struct drm_device *dev)
dev_priv->gart_info.type = NOUVEAU_GART_NONE;
#if !defined(__powerpc__) && !defined(__ia64__)
if (drm_device_is_agp(dev) && dev->agp && nouveau_agpmode) {
if (drm_pci_device_is_agp(dev) && dev->agp && nouveau_agpmode) {
ret = nouveau_mem_init_agp(dev);
if (ret)
NV_ERROR(dev, "Error initialising AGP: %d\n", ret);
@ -666,13 +693,14 @@ nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long p_size
{
struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
struct nouveau_mm *mm;
u32 b_size;
u64 size, block, rsvd;
int ret;
p_size = (p_size << PAGE_SHIFT) >> 12;
b_size = dev_priv->vram_rblock_size >> 12;
rsvd = (256 * 1024); /* vga memory */
size = (p_size << PAGE_SHIFT) - rsvd;
block = dev_priv->vram_rblock_size;
ret = nouveau_mm_init(&mm, 0, p_size, b_size);
ret = nouveau_mm_init(&mm, rsvd >> 12, size >> 12, block >> 12);
if (ret)
return ret;
@ -700,9 +728,15 @@ nouveau_vram_manager_del(struct ttm_mem_type_manager *man,
{
struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
struct nouveau_mem *node = mem->mm_node;
struct drm_device *dev = dev_priv->dev;
vram->put(dev, (struct nouveau_vram **)&mem->mm_node);
if (node->tmp_vma.node) {
nouveau_vm_unmap(&node->tmp_vma);
nouveau_vm_put(&node->tmp_vma);
}
vram->put(dev, (struct nouveau_mem **)&mem->mm_node);
}
static int
@ -715,7 +749,7 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
struct drm_device *dev = dev_priv->dev;
struct nouveau_bo *nvbo = nouveau_bo(bo);
struct nouveau_vram *node;
struct nouveau_mem *node;
u32 size_nc = 0;
int ret;
@ -724,7 +758,7 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
ret = vram->get(dev, mem->num_pages << PAGE_SHIFT,
mem->page_alignment << PAGE_SHIFT, size_nc,
(nvbo->tile_flags >> 8) & 0xff, &node);
(nvbo->tile_flags >> 8) & 0x3ff, &node);
if (ret) {
mem->mm_node = NULL;
return (ret == -ENOSPC) ? 0 : ret;
@ -771,3 +805,84 @@ const struct ttm_mem_type_manager_func nouveau_vram_manager = {
nouveau_vram_manager_del,
nouveau_vram_manager_debug
};
static int
nouveau_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
{
return 0;
}
static int
nouveau_gart_manager_fini(struct ttm_mem_type_manager *man)
{
return 0;
}
static void
nouveau_gart_manager_del(struct ttm_mem_type_manager *man,
struct ttm_mem_reg *mem)
{
struct nouveau_mem *node = mem->mm_node;
if (node->tmp_vma.node) {
nouveau_vm_unmap(&node->tmp_vma);
nouveau_vm_put(&node->tmp_vma);
}
mem->mm_node = NULL;
}
static int
nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
struct ttm_buffer_object *bo,
struct ttm_placement *placement,
struct ttm_mem_reg *mem)
{
struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
struct nouveau_bo *nvbo = nouveau_bo(bo);
struct nouveau_vma *vma = &nvbo->vma;
struct nouveau_vm *vm = vma->vm;
struct nouveau_mem *node;
int ret;
if (unlikely((mem->num_pages << PAGE_SHIFT) >=
dev_priv->gart_info.aper_size))
return -ENOMEM;
node = kzalloc(sizeof(*node), GFP_KERNEL);
if (!node)
return -ENOMEM;
/* This node must be for evicting large-paged VRAM
* to system memory. Due to a nv50 limitation of
* not being able to mix large/small pages within
* the same PDE, we need to create a temporary
* small-paged VMA for the eviction.
*/
if (vma->node->type != vm->spg_shift) {
ret = nouveau_vm_get(vm, (u64)vma->node->length << 12,
vm->spg_shift, NV_MEM_ACCESS_RW,
&node->tmp_vma);
if (ret) {
kfree(node);
return ret;
}
}
node->page_shift = nvbo->vma.node->type;
mem->mm_node = node;
mem->start = 0;
return 0;
}
void
nouveau_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
{
}
const struct ttm_mem_type_manager_func nouveau_gart_manager = {
nouveau_gart_manager_init,
nouveau_gart_manager_fini,
nouveau_gart_manager_new,
nouveau_gart_manager_del,
nouveau_gart_manager_debug
};

View File

@ -53,13 +53,13 @@ void nouveau_mm_put(struct nouveau_mm *, struct nouveau_mm_node *);
int nv50_vram_init(struct drm_device *);
int nv50_vram_new(struct drm_device *, u64 size, u32 align, u32 size_nc,
u32 memtype, struct nouveau_vram **);
void nv50_vram_del(struct drm_device *, struct nouveau_vram **);
u32 memtype, struct nouveau_mem **);
void nv50_vram_del(struct drm_device *, struct nouveau_mem **);
bool nv50_vram_flags_valid(struct drm_device *, u32 tile_flags);
int nvc0_vram_init(struct drm_device *);
int nvc0_vram_new(struct drm_device *, u64 size, u32 align, u32 ncmin,
u32 memtype, struct nouveau_vram **);
u32 memtype, struct nouveau_mem **);
bool nvc0_vram_flags_valid(struct drm_device *, u32 tile_flags);
#endif

View File

@ -39,12 +39,11 @@ nouveau_notifier_init_channel(struct nouveau_channel *chan)
int ret;
if (nouveau_vram_notify)
flags = TTM_PL_FLAG_VRAM;
flags = NOUVEAU_GEM_DOMAIN_VRAM;
else
flags = TTM_PL_FLAG_TT;
flags = NOUVEAU_GEM_DOMAIN_GART;
ret = nouveau_gem_new(dev, NULL, PAGE_SIZE, 0, flags,
0, 0x0000, false, true, &ntfy);
ret = nouveau_gem_new(dev, NULL, PAGE_SIZE, 0, flags, 0, 0, &ntfy);
if (ret)
return ret;
@ -100,6 +99,7 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
uint32_t *b_offset)
{
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *nobj = NULL;
struct drm_mm_node *mem;
uint32_t offset;
@ -114,11 +114,16 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
return -ENOMEM;
}
if (chan->notifier_bo->bo.mem.mem_type == TTM_PL_VRAM)
target = NV_MEM_TARGET_VRAM;
else
target = NV_MEM_TARGET_GART;
offset = chan->notifier_bo->bo.mem.start << PAGE_SHIFT;
if (dev_priv->card_type < NV_50) {
if (chan->notifier_bo->bo.mem.mem_type == TTM_PL_VRAM)
target = NV_MEM_TARGET_VRAM;
else
target = NV_MEM_TARGET_GART;
offset = chan->notifier_bo->bo.mem.start << PAGE_SHIFT;
} else {
target = NV_MEM_TARGET_VM;
offset = chan->notifier_bo->vma.offset;
}
offset += mem->start;
ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, offset,

View File

@ -36,6 +36,7 @@
#include "nouveau_drm.h"
#include "nouveau_ramht.h"
#include "nouveau_vm.h"
#include "nv50_display.h"
struct nouveau_gpuobj_method {
struct list_head head;
@ -490,16 +491,22 @@ nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class, u64 base,
}
if (target == NV_MEM_TARGET_GART) {
if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
target = NV_MEM_TARGET_PCI_NOSNOOP;
base += dev_priv->gart_info.aper_base;
} else
if (base != 0) {
base = nouveau_sgdma_get_physical(dev, base);
struct nouveau_gpuobj *gart = dev_priv->gart_info.sg_ctxdma;
if (dev_priv->gart_info.type == NOUVEAU_GART_PDMA) {
if (base == 0) {
nouveau_gpuobj_ref(gart, pobj);
return 0;
}
base = nouveau_sgdma_get_physical(dev, base);
target = NV_MEM_TARGET_PCI;
} else {
nouveau_gpuobj_ref(dev_priv->gart_info.sg_ctxdma, pobj);
return 0;
base += dev_priv->gart_info.aper_base;
if (dev_priv->gart_info.type == NOUVEAU_GART_AGP)
target = NV_MEM_TARGET_PCI_NOSNOOP;
else
target = NV_MEM_TARGET_PCI;
}
}
@ -776,7 +783,7 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *vram = NULL, *tt = NULL;
int ret;
int ret, i;
NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h);
@ -841,6 +848,25 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
nouveau_gpuobj_ref(NULL, &ramht);
if (ret)
return ret;
/* dma objects for display sync channel semaphore blocks */
for (i = 0; i < 2; i++) {
struct nouveau_gpuobj *sem = NULL;
struct nv50_display_crtc *dispc =
&nv50_display(dev)->crtc[i];
u64 offset = dispc->sem.bo->bo.mem.start << PAGE_SHIFT;
ret = nouveau_gpuobj_dma_new(chan, 0x3d, offset, 0xfff,
NV_MEM_ACCESS_RW,
NV_MEM_TARGET_VRAM, &sem);
if (ret)
return ret;
ret = nouveau_ramht_insert(chan, NvEvoSema0 + i, sem);
nouveau_gpuobj_ref(NULL, &sem);
if (ret)
return ret;
}
}
/* VRAM ctxdma */
@ -909,7 +935,7 @@ nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan)
nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd);
nouveau_gpuobj_ref(NULL, &chan->vm_pd);
if (chan->ramin_heap.free_stack.next)
if (drm_mm_initialized(&chan->ramin_heap))
drm_mm_takedown(&chan->ramin_heap);
nouveau_gpuobj_ref(NULL, &chan->ramin);
}

View File

@ -114,7 +114,9 @@ nouveau_ramht_insert(struct nouveau_channel *chan, u32 handle,
(gpuobj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT);
} else {
if (gpuobj->engine == NVOBJ_ENGINE_DISPLAY) {
ctx = (gpuobj->cinst << 10) | chan->id;
ctx = (gpuobj->cinst << 10) |
(chan->id << 28) |
chan->id; /* HASH_TAG */
} else {
ctx = (gpuobj->cinst >> 4) |
((gpuobj->engine <<

View File

@ -12,6 +12,7 @@ struct nouveau_sgdma_be {
struct drm_device *dev;
dma_addr_t *pages;
bool *ttm_alloced;
unsigned nr_pages;
u64 offset;
@ -20,7 +21,8 @@ struct nouveau_sgdma_be {
static int
nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages,
struct page **pages, struct page *dummy_read_page)
struct page **pages, struct page *dummy_read_page,
dma_addr_t *dma_addrs)
{
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
struct drm_device *dev = nvbe->dev;
@ -34,15 +36,25 @@ nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages,
if (!nvbe->pages)
return -ENOMEM;
nvbe->ttm_alloced = kmalloc(sizeof(bool) * num_pages, GFP_KERNEL);
if (!nvbe->ttm_alloced)
return -ENOMEM;
nvbe->nr_pages = 0;
while (num_pages--) {
nvbe->pages[nvbe->nr_pages] =
pci_map_page(dev->pdev, pages[nvbe->nr_pages], 0,
if (dma_addrs[nvbe->nr_pages] != DMA_ERROR_CODE) {
nvbe->pages[nvbe->nr_pages] =
dma_addrs[nvbe->nr_pages];
nvbe->ttm_alloced[nvbe->nr_pages] = true;
} else {
nvbe->pages[nvbe->nr_pages] =
pci_map_page(dev->pdev, pages[nvbe->nr_pages], 0,
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
if (pci_dma_mapping_error(dev->pdev,
nvbe->pages[nvbe->nr_pages])) {
be->func->clear(be);
return -EFAULT;
if (pci_dma_mapping_error(dev->pdev,
nvbe->pages[nvbe->nr_pages])) {
be->func->clear(be);
return -EFAULT;
}
}
nvbe->nr_pages++;
@ -65,17 +77,36 @@ nouveau_sgdma_clear(struct ttm_backend *be)
be->func->unbind(be);
while (nvbe->nr_pages--) {
pci_unmap_page(dev->pdev, nvbe->pages[nvbe->nr_pages],
if (!nvbe->ttm_alloced[nvbe->nr_pages])
pci_unmap_page(dev->pdev, nvbe->pages[nvbe->nr_pages],
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
}
kfree(nvbe->pages);
kfree(nvbe->ttm_alloced);
nvbe->pages = NULL;
nvbe->ttm_alloced = NULL;
nvbe->nr_pages = 0;
}
}
static void
nouveau_sgdma_destroy(struct ttm_backend *be)
{
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
if (be) {
NV_DEBUG(nvbe->dev, "\n");
if (nvbe) {
if (nvbe->pages)
be->func->clear(be);
kfree(nvbe);
}
}
}
static int
nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
nv04_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
{
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
struct drm_device *dev = nvbe->dev;
@ -102,7 +133,7 @@ nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
}
static int
nouveau_sgdma_unbind(struct ttm_backend *be)
nv04_sgdma_unbind(struct ttm_backend *be)
{
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
struct drm_device *dev = nvbe->dev;
@ -125,32 +156,229 @@ nouveau_sgdma_unbind(struct ttm_backend *be)
return 0;
}
static struct ttm_backend_func nv04_sgdma_backend = {
.populate = nouveau_sgdma_populate,
.clear = nouveau_sgdma_clear,
.bind = nv04_sgdma_bind,
.unbind = nv04_sgdma_unbind,
.destroy = nouveau_sgdma_destroy
};
static void
nouveau_sgdma_destroy(struct ttm_backend *be)
nv41_sgdma_flush(struct nouveau_sgdma_be *nvbe)
{
struct drm_device *dev = nvbe->dev;
nv_wr32(dev, 0x100810, 0x00000022);
if (!nv_wait(dev, 0x100810, 0x00000100, 0x00000100))
NV_ERROR(dev, "vm flush timeout: 0x%08x\n",
nv_rd32(dev, 0x100810));
nv_wr32(dev, 0x100810, 0x00000000);
}
static int
nv41_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
{
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
dma_addr_t *list = nvbe->pages;
u32 pte = mem->start << 2;
u32 cnt = nvbe->nr_pages;
if (be) {
NV_DEBUG(nvbe->dev, "\n");
nvbe->offset = mem->start << PAGE_SHIFT;
if (nvbe) {
if (nvbe->pages)
be->func->clear(be);
kfree(nvbe);
while (cnt--) {
nv_wo32(pgt, pte, (*list++ >> 7) | 1);
pte += 4;
}
nv41_sgdma_flush(nvbe);
nvbe->bound = true;
return 0;
}
static int
nv41_sgdma_unbind(struct ttm_backend *be)
{
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
u32 pte = (nvbe->offset >> 12) << 2;
u32 cnt = nvbe->nr_pages;
while (cnt--) {
nv_wo32(pgt, pte, 0x00000000);
pte += 4;
}
nv41_sgdma_flush(nvbe);
nvbe->bound = false;
return 0;
}
static struct ttm_backend_func nv41_sgdma_backend = {
.populate = nouveau_sgdma_populate,
.clear = nouveau_sgdma_clear,
.bind = nv41_sgdma_bind,
.unbind = nv41_sgdma_unbind,
.destroy = nouveau_sgdma_destroy
};
static void
nv44_sgdma_flush(struct nouveau_sgdma_be *nvbe)
{
struct drm_device *dev = nvbe->dev;
nv_wr32(dev, 0x100814, (nvbe->nr_pages - 1) << 12);
nv_wr32(dev, 0x100808, nvbe->offset | 0x20);
if (!nv_wait(dev, 0x100808, 0x00000001, 0x00000001))
NV_ERROR(dev, "gart flush timeout: 0x%08x\n",
nv_rd32(dev, 0x100808));
nv_wr32(dev, 0x100808, 0x00000000);
}
static void
nv44_sgdma_fill(struct nouveau_gpuobj *pgt, dma_addr_t *list, u32 base, u32 cnt)
{
struct drm_nouveau_private *dev_priv = pgt->dev->dev_private;
dma_addr_t dummy = dev_priv->gart_info.dummy.addr;
u32 pte, tmp[4];
pte = base >> 2;
base &= ~0x0000000f;
tmp[0] = nv_ro32(pgt, base + 0x0);
tmp[1] = nv_ro32(pgt, base + 0x4);
tmp[2] = nv_ro32(pgt, base + 0x8);
tmp[3] = nv_ro32(pgt, base + 0xc);
while (cnt--) {
u32 addr = list ? (*list++ >> 12) : (dummy >> 12);
switch (pte++ & 0x3) {
case 0:
tmp[0] &= ~0x07ffffff;
tmp[0] |= addr;
break;
case 1:
tmp[0] &= ~0xf8000000;
tmp[0] |= addr << 27;
tmp[1] &= ~0x003fffff;
tmp[1] |= addr >> 5;
break;
case 2:
tmp[1] &= ~0xffc00000;
tmp[1] |= addr << 22;
tmp[2] &= ~0x0001ffff;
tmp[2] |= addr >> 10;
break;
case 3:
tmp[2] &= ~0xfffe0000;
tmp[2] |= addr << 17;
tmp[3] &= ~0x00000fff;
tmp[3] |= addr >> 15;
break;
}
}
tmp[3] |= 0x40000000;
nv_wo32(pgt, base + 0x0, tmp[0]);
nv_wo32(pgt, base + 0x4, tmp[1]);
nv_wo32(pgt, base + 0x8, tmp[2]);
nv_wo32(pgt, base + 0xc, tmp[3]);
}
static int
nv44_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
{
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
dma_addr_t *list = nvbe->pages;
u32 pte = mem->start << 2, tmp[4];
u32 cnt = nvbe->nr_pages;
int i;
nvbe->offset = mem->start << PAGE_SHIFT;
if (pte & 0x0000000c) {
u32 max = 4 - ((pte >> 2) & 0x3);
u32 part = (cnt > max) ? max : cnt;
nv44_sgdma_fill(pgt, list, pte, part);
pte += (part << 2);
list += part;
cnt -= part;
}
while (cnt >= 4) {
for (i = 0; i < 4; i++)
tmp[i] = *list++ >> 12;
nv_wo32(pgt, pte + 0x0, tmp[0] >> 0 | tmp[1] << 27);
nv_wo32(pgt, pte + 0x4, tmp[1] >> 5 | tmp[2] << 22);
nv_wo32(pgt, pte + 0x8, tmp[2] >> 10 | tmp[3] << 17);
nv_wo32(pgt, pte + 0xc, tmp[3] >> 15 | 0x40000000);
pte += 0x10;
cnt -= 4;
}
if (cnt)
nv44_sgdma_fill(pgt, list, pte, cnt);
nv44_sgdma_flush(nvbe);
nvbe->bound = true;
return 0;
}
static int
nv44_sgdma_unbind(struct ttm_backend *be)
{
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
u32 pte = (nvbe->offset >> 12) << 2;
u32 cnt = nvbe->nr_pages;
if (pte & 0x0000000c) {
u32 max = 4 - ((pte >> 2) & 0x3);
u32 part = (cnt > max) ? max : cnt;
nv44_sgdma_fill(pgt, NULL, pte, part);
pte += (part << 2);
cnt -= part;
}
while (cnt >= 4) {
nv_wo32(pgt, pte + 0x0, 0x00000000);
nv_wo32(pgt, pte + 0x4, 0x00000000);
nv_wo32(pgt, pte + 0x8, 0x00000000);
nv_wo32(pgt, pte + 0xc, 0x00000000);
pte += 0x10;
cnt -= 4;
}
if (cnt)
nv44_sgdma_fill(pgt, NULL, pte, cnt);
nv44_sgdma_flush(nvbe);
nvbe->bound = false;
return 0;
}
static struct ttm_backend_func nv44_sgdma_backend = {
.populate = nouveau_sgdma_populate,
.clear = nouveau_sgdma_clear,
.bind = nv44_sgdma_bind,
.unbind = nv44_sgdma_unbind,
.destroy = nouveau_sgdma_destroy
};
static int
nv50_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
{
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
nvbe->offset = mem->start << PAGE_SHIFT;
nouveau_vm_map_sg(&dev_priv->gart_info.vma, nvbe->offset,
nvbe->nr_pages << PAGE_SHIFT, nvbe->pages);
struct nouveau_mem *node = mem->mm_node;
/* noop: bound in move_notify() */
node->pages = nvbe->pages;
nvbe->pages = (dma_addr_t *)node;
nvbe->bound = true;
return 0;
}
@ -159,25 +387,14 @@ static int
nv50_sgdma_unbind(struct ttm_backend *be)
{
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
if (!nvbe->bound)
return 0;
nouveau_vm_unmap_at(&dev_priv->gart_info.vma, nvbe->offset,
nvbe->nr_pages << PAGE_SHIFT);
struct nouveau_mem *node = (struct nouveau_mem *)nvbe->pages;
/* noop: unbound in move_notify() */
nvbe->pages = node->pages;
node->pages = NULL;
nvbe->bound = false;
return 0;
}
static struct ttm_backend_func nouveau_sgdma_backend = {
.populate = nouveau_sgdma_populate,
.clear = nouveau_sgdma_clear,
.bind = nouveau_sgdma_bind,
.unbind = nouveau_sgdma_unbind,
.destroy = nouveau_sgdma_destroy
};
static struct ttm_backend_func nv50_sgdma_backend = {
.populate = nouveau_sgdma_populate,
.clear = nouveau_sgdma_clear,
@ -198,10 +415,7 @@ nouveau_sgdma_init_ttm(struct drm_device *dev)
nvbe->dev = dev;
if (dev_priv->card_type < NV_50)
nvbe->backend.func = &nouveau_sgdma_backend;
else
nvbe->backend.func = &nv50_sgdma_backend;
nvbe->backend.func = dev_priv->gart_info.func;
return &nvbe->backend;
}
@ -210,21 +424,64 @@ nouveau_sgdma_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *gpuobj = NULL;
uint32_t aper_size, obj_size;
int i, ret;
u32 aper_size, align;
int ret;
if (dev_priv->card_type < NV_50) {
if(dev_priv->ramin_rsvd_vram < 2 * 1024 * 1024)
aper_size = 64 * 1024 * 1024;
else
aper_size = 512 * 1024 * 1024;
if (dev_priv->card_type >= NV_50 || drm_pci_device_is_pcie(dev))
aper_size = 512 * 1024 * 1024;
else
aper_size = 64 * 1024 * 1024;
obj_size = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 4;
obj_size += 8; /* ctxdma header */
/* Dear NVIDIA, NV44+ would like proper present bits in PTEs for
* christmas. The cards before it have them, the cards after
* it have them, why is NV44 so unloved?
*/
dev_priv->gart_info.dummy.page = alloc_page(GFP_DMA32 | GFP_KERNEL);
if (!dev_priv->gart_info.dummy.page)
return -ENOMEM;
ret = nouveau_gpuobj_new(dev, NULL, obj_size, 16,
NVOBJ_FLAG_ZERO_ALLOC |
NVOBJ_FLAG_ZERO_FREE, &gpuobj);
dev_priv->gart_info.dummy.addr =
pci_map_page(dev->pdev, dev_priv->gart_info.dummy.page,
0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
if (pci_dma_mapping_error(dev->pdev, dev_priv->gart_info.dummy.addr)) {
NV_ERROR(dev, "error mapping dummy page\n");
__free_page(dev_priv->gart_info.dummy.page);
dev_priv->gart_info.dummy.page = NULL;
return -ENOMEM;
}
if (dev_priv->card_type >= NV_50) {
dev_priv->gart_info.aper_base = 0;
dev_priv->gart_info.aper_size = aper_size;
dev_priv->gart_info.type = NOUVEAU_GART_HW;
dev_priv->gart_info.func = &nv50_sgdma_backend;
} else
if (drm_pci_device_is_pcie(dev) &&
dev_priv->chipset != 0x40 && dev_priv->chipset != 0x45) {
if (nv44_graph_class(dev)) {
dev_priv->gart_info.func = &nv44_sgdma_backend;
align = 512 * 1024;
} else {
dev_priv->gart_info.func = &nv41_sgdma_backend;
align = 16;
}
ret = nouveau_gpuobj_new(dev, NULL, aper_size / 1024, align,
NVOBJ_FLAG_ZERO_ALLOC |
NVOBJ_FLAG_ZERO_FREE, &gpuobj);
if (ret) {
NV_ERROR(dev, "Error creating sgdma object: %d\n", ret);
return ret;
}
dev_priv->gart_info.sg_ctxdma = gpuobj;
dev_priv->gart_info.aper_base = 0;
dev_priv->gart_info.aper_size = aper_size;
dev_priv->gart_info.type = NOUVEAU_GART_HW;
} else {
ret = nouveau_gpuobj_new(dev, NULL, (aper_size / 1024) + 8, 16,
NVOBJ_FLAG_ZERO_ALLOC |
NVOBJ_FLAG_ZERO_FREE, &gpuobj);
if (ret) {
NV_ERROR(dev, "Error creating sgdma object: %d\n", ret);
return ret;
@ -236,25 +493,14 @@ nouveau_sgdma_init(struct drm_device *dev)
(0 << 14) /* RW */ |
(2 << 16) /* PCI */);
nv_wo32(gpuobj, 4, aper_size - 1);
for (i = 2; i < 2 + (aper_size >> 12); i++)
nv_wo32(gpuobj, i * 4, 0x00000000);
dev_priv->gart_info.sg_ctxdma = gpuobj;
dev_priv->gart_info.aper_base = 0;
dev_priv->gart_info.aper_size = aper_size;
} else
if (dev_priv->chan_vm) {
ret = nouveau_vm_get(dev_priv->chan_vm, 512 * 1024 * 1024,
12, NV_MEM_ACCESS_RW,
&dev_priv->gart_info.vma);
if (ret)
return ret;
dev_priv->gart_info.aper_base = dev_priv->gart_info.vma.offset;
dev_priv->gart_info.aper_size = 512 * 1024 * 1024;
dev_priv->gart_info.type = NOUVEAU_GART_PDMA;
dev_priv->gart_info.func = &nv04_sgdma_backend;
}
dev_priv->gart_info.type = NOUVEAU_GART_SGDMA;
return 0;
}
@ -264,7 +510,13 @@ nouveau_sgdma_takedown(struct drm_device *dev)
struct drm_nouveau_private *dev_priv = dev->dev_private;
nouveau_gpuobj_ref(NULL, &dev_priv->gart_info.sg_ctxdma);
nouveau_vm_put(&dev_priv->gart_info.vma);
if (dev_priv->gart_info.dummy.page) {
pci_unmap_page(dev->pdev, dev_priv->gart_info.dummy.addr,
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
__free_page(dev_priv->gart_info.dummy.page);
dev_priv->gart_info.dummy.page = NULL;
}
}
uint32_t

View File

@ -544,7 +544,6 @@ static int
nouveau_card_init_channel(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *gpuobj = NULL;
int ret;
ret = nouveau_channel_alloc(dev, &dev_priv->channel,
@ -552,41 +551,8 @@ nouveau_card_init_channel(struct drm_device *dev)
if (ret)
return ret;
/* no dma objects on fermi... */
if (dev_priv->card_type >= NV_C0)
goto out_done;
ret = nouveau_gpuobj_dma_new(dev_priv->channel, NV_CLASS_DMA_IN_MEMORY,
0, dev_priv->vram_size,
NV_MEM_ACCESS_RW, NV_MEM_TARGET_VRAM,
&gpuobj);
if (ret)
goto out_err;
ret = nouveau_ramht_insert(dev_priv->channel, NvDmaVRAM, gpuobj);
nouveau_gpuobj_ref(NULL, &gpuobj);
if (ret)
goto out_err;
ret = nouveau_gpuobj_dma_new(dev_priv->channel, NV_CLASS_DMA_IN_MEMORY,
0, dev_priv->gart_info.aper_size,
NV_MEM_ACCESS_RW, NV_MEM_TARGET_GART,
&gpuobj);
if (ret)
goto out_err;
ret = nouveau_ramht_insert(dev_priv->channel, NvDmaGART, gpuobj);
nouveau_gpuobj_ref(NULL, &gpuobj);
if (ret)
goto out_err;
out_done:
mutex_unlock(&dev_priv->channel->mutex);
return 0;
out_err:
nouveau_channel_put(&dev_priv->channel);
return ret;
}
static void nouveau_switcheroo_set_state(struct pci_dev *pdev,
@ -929,12 +895,6 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
NV_DEBUG(dev, "vendor: 0x%X device: 0x%X class: 0x%X\n",
dev->pci_vendor, dev->pci_device, dev->pdev->class);
dev_priv->wq = create_workqueue("nouveau");
if (!dev_priv->wq) {
ret = -EINVAL;
goto err_priv;
}
/* resource 0 is mmio regs */
/* resource 1 is linear FB */
/* resource 2 is RAMIN (mmio regs + 0x1000000) */
@ -947,7 +907,7 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
NV_ERROR(dev, "Unable to initialize the mmio mapping. "
"Please report your setup to " DRIVER_EMAIL "\n");
ret = -EINVAL;
goto err_wq;
goto err_priv;
}
NV_DEBUG(dev, "regs mapped ok at 0x%llx\n",
(unsigned long long)mmio_start_offs);
@ -1054,8 +1014,6 @@ err_ramin:
iounmap(dev_priv->ramin);
err_mmio:
iounmap(dev_priv->mmio);
err_wq:
destroy_workqueue(dev_priv->wq);
err_priv:
kfree(dev_priv);
dev->dev_private = NULL;
@ -1103,9 +1061,9 @@ int nouveau_ioctl_getparam(struct drm_device *dev, void *data,
getparam->value = dev->pci_device;
break;
case NOUVEAU_GETPARAM_BUS_TYPE:
if (drm_device_is_agp(dev))
if (drm_pci_device_is_agp(dev))
getparam->value = NV_AGP;
else if (drm_device_is_pcie(dev))
else if (drm_pci_device_is_pcie(dev))
getparam->value = NV_PCIE;
else
getparam->value = NV_PCI;
@ -1126,7 +1084,7 @@ int nouveau_ioctl_getparam(struct drm_device *dev, void *data,
getparam->value = 1;
break;
case NOUVEAU_GETPARAM_HAS_PAGEFLIP:
getparam->value = (dev_priv->card_type < NV_50);
getparam->value = 1;
break;
case NOUVEAU_GETPARAM_GRAPH_UNITS:
/* NV40 and NV50 versions are quite different, but register

View File

@ -239,11 +239,9 @@ static bool
probe_monitoring_device(struct nouveau_i2c_chan *i2c,
struct i2c_board_info *info)
{
char modalias[16] = "i2c:";
struct i2c_client *client;
strlcat(modalias, info->type, sizeof(modalias));
request_module(modalias);
request_module("%s%s", I2C_MODULE_PREFIX, info->type);
client = i2c_new_device(&i2c->adapter, info);
if (!client)

View File

@ -47,16 +47,25 @@ nouveau_bitfield_print(const struct nouveau_bitfield *bf, u32 value)
printk(" (unknown bits 0x%08x)", value);
}
const struct nouveau_enum *
nouveau_enum_find(const struct nouveau_enum *en, u32 value)
{
while (en->name) {
if (en->value == value)
return en;
en++;
}
return NULL;
}
void
nouveau_enum_print(const struct nouveau_enum *en, u32 value)
{
while (en->name) {
if (value == en->value) {
printk("%s", en->name);
return;
}
en++;
en = nouveau_enum_find(en, value);
if (en) {
printk("%s", en->name);
return;
}
printk("(unknown enum 0x%08x)", value);

View File

@ -36,10 +36,14 @@ struct nouveau_bitfield {
struct nouveau_enum {
u32 value;
const char *name;
void *data;
};
void nouveau_bitfield_print(const struct nouveau_bitfield *, u32 value);
void nouveau_enum_print(const struct nouveau_enum *, u32 value);
const struct nouveau_enum *
nouveau_enum_find(const struct nouveau_enum *, u32 value);
int nouveau_ratelimit(void);
#endif

View File

@ -28,7 +28,7 @@
#include "nouveau_vm.h"
void
nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_vram *vram)
nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_mem *node)
{
struct nouveau_vm *vm = vma->vm;
struct nouveau_mm_node *r;
@ -40,7 +40,8 @@ nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_vram *vram)
u32 max = 1 << (vm->pgt_bits - bits);
u32 end, len;
list_for_each_entry(r, &vram->regions, rl_entry) {
delta = 0;
list_for_each_entry(r, &node->regions, rl_entry) {
u64 phys = (u64)r->offset << 12;
u32 num = r->length >> bits;
@ -52,7 +53,7 @@ nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_vram *vram)
end = max;
len = end - pte;
vm->map(vma, pgt, vram, pte, len, phys);
vm->map(vma, pgt, node, pte, len, phys, delta);
num -= len;
pte += len;
@ -60,6 +61,8 @@ nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_vram *vram)
pde++;
pte = 0;
}
delta += (u64)len << vma->node->type;
}
}
@ -67,14 +70,14 @@ nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_vram *vram)
}
void
nouveau_vm_map(struct nouveau_vma *vma, struct nouveau_vram *vram)
nouveau_vm_map(struct nouveau_vma *vma, struct nouveau_mem *node)
{
nouveau_vm_map_at(vma, 0, vram);
nouveau_vm_map_at(vma, 0, node);
}
void
nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length,
dma_addr_t *list)
struct nouveau_mem *mem, dma_addr_t *list)
{
struct nouveau_vm *vm = vma->vm;
int big = vma->node->type != vm->spg_shift;
@ -94,7 +97,7 @@ nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length,
end = max;
len = end - pte;
vm->map_sg(vma, pgt, pte, list, len);
vm->map_sg(vma, pgt, mem, pte, len, list);
num -= len;
pte += len;
@ -311,18 +314,7 @@ nouveau_vm_new(struct drm_device *dev, u64 offset, u64 length, u64 mm_offset,
vm->spg_shift = 12;
vm->lpg_shift = 17;
pgt_bits = 27;
/* Should be 4096 everywhere, this is a hack that's
* currently necessary to avoid an elusive bug that
* causes corruption when mixing small/large pages
*/
if (length < (1ULL << 40))
block = 4096;
else {
block = (1 << pgt_bits);
if (length < block)
block = length;
}
block = 4096;
} else {
kfree(vm);
return -ENOSYS;

View File

@ -67,9 +67,10 @@ struct nouveau_vm {
void (*map_pgt)(struct nouveau_gpuobj *pgd, u32 pde,
struct nouveau_gpuobj *pgt[2]);
void (*map)(struct nouveau_vma *, struct nouveau_gpuobj *,
struct nouveau_vram *, u32 pte, u32 cnt, u64 phys);
struct nouveau_mem *, u32 pte, u32 cnt,
u64 phys, u64 delta);
void (*map_sg)(struct nouveau_vma *, struct nouveau_gpuobj *,
u32 pte, dma_addr_t *, u32 cnt);
struct nouveau_mem *, u32 pte, u32 cnt, dma_addr_t *);
void (*unmap)(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt);
void (*flush)(struct nouveau_vm *);
};
@ -82,20 +83,20 @@ int nouveau_vm_ref(struct nouveau_vm *, struct nouveau_vm **,
int nouveau_vm_get(struct nouveau_vm *, u64 size, u32 page_shift,
u32 access, struct nouveau_vma *);
void nouveau_vm_put(struct nouveau_vma *);
void nouveau_vm_map(struct nouveau_vma *, struct nouveau_vram *);
void nouveau_vm_map_at(struct nouveau_vma *, u64 offset, struct nouveau_vram *);
void nouveau_vm_map(struct nouveau_vma *, struct nouveau_mem *);
void nouveau_vm_map_at(struct nouveau_vma *, u64 offset, struct nouveau_mem *);
void nouveau_vm_unmap(struct nouveau_vma *);
void nouveau_vm_unmap_at(struct nouveau_vma *, u64 offset, u64 length);
void nouveau_vm_map_sg(struct nouveau_vma *, u64 offset, u64 length,
dma_addr_t *);
struct nouveau_mem *, dma_addr_t *);
/* nv50_vm.c */
void nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
struct nouveau_gpuobj *pgt[2]);
void nv50_vm_map(struct nouveau_vma *, struct nouveau_gpuobj *,
struct nouveau_vram *, u32 pte, u32 cnt, u64 phys);
struct nouveau_mem *, u32 pte, u32 cnt, u64 phys, u64 delta);
void nv50_vm_map_sg(struct nouveau_vma *, struct nouveau_gpuobj *,
u32 pte, dma_addr_t *, u32 cnt);
struct nouveau_mem *, u32 pte, u32 cnt, dma_addr_t *);
void nv50_vm_unmap(struct nouveau_gpuobj *, u32 pte, u32 cnt);
void nv50_vm_flush(struct nouveau_vm *);
void nv50_vm_flush_engine(struct drm_device *, int engine);
@ -104,9 +105,9 @@ void nv50_vm_flush_engine(struct drm_device *, int engine);
void nvc0_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
struct nouveau_gpuobj *pgt[2]);
void nvc0_vm_map(struct nouveau_vma *, struct nouveau_gpuobj *,
struct nouveau_vram *, u32 pte, u32 cnt, u64 phys);
struct nouveau_mem *, u32 pte, u32 cnt, u64 phys, u64 delta);
void nvc0_vm_map_sg(struct nouveau_vma *, struct nouveau_gpuobj *,
u32 pte, dma_addr_t *, u32 cnt);
struct nouveau_mem *, u32 pte, u32 cnt, dma_addr_t *);
void nvc0_vm_unmap(struct nouveau_gpuobj *, u32 pte, u32 cnt);
void nvc0_vm_flush(struct nouveau_vm *);

View File

@ -1031,7 +1031,7 @@ nv04_crtc_create(struct drm_device *dev, int crtc_num)
drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256);
ret = nouveau_bo_new(dev, NULL, 64*64*4, 0x100, TTM_PL_FLAG_VRAM,
0, 0x0000, false, true, &nv_crtc->cursor.nvbo);
0, 0x0000, &nv_crtc->cursor.nvbo);
if (!ret) {
ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
if (!ret)

View File

@ -379,6 +379,15 @@ out:
return handled;
}
static const char *nv_dma_state_err(u32 state)
{
static const char * const desc[] = {
"NONE", "CALL_SUBR_ACTIVE", "INVALID_MTHD", "RET_SUBR_INACTIVE",
"INVALID_CMD", "IB_EMPTY"/* NV50+ */, "MEM_FAULT", "UNK"
};
return desc[(state >> 29) & 0x7];
}
void
nv04_fifo_isr(struct drm_device *dev)
{
@ -460,9 +469,10 @@ nv04_fifo_isr(struct drm_device *dev)
if (nouveau_ratelimit())
NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%02x%08x "
"Put 0x%02x%08x IbGet 0x%08x IbPut 0x%08x "
"State 0x%08x Push 0x%08x\n",
"State 0x%08x (err: %s) Push 0x%08x\n",
chid, ho_get, dma_get, ho_put,
dma_put, ib_get, ib_put, state,
nv_dma_state_err(state),
push);
/* METHOD_COUNT, in DMA_STATE on earlier chipsets */
@ -476,8 +486,9 @@ nv04_fifo_isr(struct drm_device *dev)
}
} else {
NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%08x "
"Put 0x%08x State 0x%08x Push 0x%08x\n",
chid, dma_get, dma_put, state, push);
"Put 0x%08x State 0x%08x (err: %s) Push 0x%08x\n",
chid, dma_get, dma_put, state,
nv_dma_state_err(state), push);
if (dma_get != dma_put)
nv_wr32(dev, 0x003244, dma_put);
@ -505,7 +516,7 @@ nv04_fifo_isr(struct drm_device *dev)
if (dev_priv->card_type == NV_50) {
if (status & 0x00000010) {
nv50_fb_vm_trap(dev, 1, "PFIFO_BAR_FAULT");
nv50_fb_vm_trap(dev, nouveau_ratelimit());
status &= ~0x00000010;
nv_wr32(dev, 0x002100, 0x00000010);
}

View File

@ -197,10 +197,12 @@ static int nv17_tv_get_ld_modes(struct drm_encoder *encoder,
struct drm_connector *connector)
{
struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
struct drm_display_mode *mode, *tv_mode;
const struct drm_display_mode *tv_mode;
int n = 0;
for (tv_mode = nv17_tv_modes; tv_mode->hdisplay; tv_mode++) {
struct drm_display_mode *mode;
mode = drm_mode_duplicate(encoder->dev, tv_mode);
mode->clock = tv_norm->tv_enc_mode.vrefresh *

View File

@ -112,7 +112,7 @@ extern struct nv17_tv_norm_params {
} nv17_tv_norms[NUM_TV_NORMS];
#define get_tv_norm(enc) (&nv17_tv_norms[to_tv_enc(enc)->tv_norm])
extern struct drm_display_mode nv17_tv_modes[];
extern const struct drm_display_mode nv17_tv_modes[];
static inline int interpolate(int y0, int y1, int y2, int x)
{

View File

@ -438,7 +438,7 @@ void nv17_tv_state_load(struct drm_device *dev, struct nv17_tv_state *state)
/* Timings similar to the ones the blob sets */
struct drm_display_mode nv17_tv_modes[] = {
const struct drm_display_mode nv17_tv_modes[] = {
{ DRM_MODE("320x200", DRM_MODE_TYPE_DRIVER, 0,
320, 344, 392, 560, 0, 200, 200, 202, 220, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC

View File

@ -24,6 +24,53 @@ nv40_fb_set_tile_region(struct drm_device *dev, int i)
}
}
static void
nv40_fb_init_gart(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *gart = dev_priv->gart_info.sg_ctxdma;
if (dev_priv->gart_info.type != NOUVEAU_GART_HW) {
nv_wr32(dev, 0x100800, 0x00000001);
return;
}
nv_wr32(dev, 0x100800, gart->pinst | 0x00000002);
nv_mask(dev, 0x10008c, 0x00000100, 0x00000100);
nv_wr32(dev, 0x100820, 0x00000000);
}
static void
nv44_fb_init_gart(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *gart = dev_priv->gart_info.sg_ctxdma;
u32 vinst;
if (dev_priv->gart_info.type != NOUVEAU_GART_HW) {
nv_wr32(dev, 0x100850, 0x80000000);
nv_wr32(dev, 0x100800, 0x00000001);
return;
}
/* calculate vram address of this PRAMIN block, object
* must be allocated on 512KiB alignment, and not exceed
* a total size of 512KiB for this to work correctly
*/
vinst = nv_rd32(dev, 0x10020c);
vinst -= ((gart->pinst >> 19) + 1) << 19;
nv_wr32(dev, 0x100850, 0x80000000);
nv_wr32(dev, 0x100818, dev_priv->gart_info.dummy.addr);
nv_wr32(dev, 0x100804, dev_priv->gart_info.aper_size);
nv_wr32(dev, 0x100850, 0x00008000);
nv_mask(dev, 0x10008c, 0x00000200, 0x00000200);
nv_wr32(dev, 0x100820, 0x00000000);
nv_wr32(dev, 0x10082c, 0x00000001);
nv_wr32(dev, 0x100800, vinst | 0x00000010);
}
int
nv40_fb_init(struct drm_device *dev)
{
@ -32,12 +79,12 @@ nv40_fb_init(struct drm_device *dev)
uint32_t tmp;
int i;
/* This is strictly a NV4x register (don't know about NV5x). */
/* The blob sets these to all kinds of values, and they mess up our setup. */
/* I got value 0x52802 instead. For some cards the blob even sets it back to 0x1. */
/* Note: the blob doesn't read this value, so i'm pretty sure this is safe for all cards. */
/* Any idea what this is? */
nv_wr32(dev, NV40_PFB_UNK_800, 0x1);
if (dev_priv->chipset != 0x40 && dev_priv->chipset != 0x45) {
if (nv44_graph_class(dev))
nv44_fb_init_gart(dev);
else
nv40_fb_init_gart(dev);
}
switch (dev_priv->chipset) {
case 0x40:

View File

@ -65,7 +65,7 @@ nv50_crtc_blank(struct nouveau_crtc *nv_crtc, bool blanked)
{
struct drm_device *dev = nv_crtc->base.dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *evo = dev_priv->evo;
struct nouveau_channel *evo = nv50_display(dev)->master;
int index = nv_crtc->index, ret;
NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
@ -135,8 +135,7 @@ static int
nv50_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool on, bool update)
{
struct drm_device *dev = nv_crtc->base.dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *evo = dev_priv->evo;
struct nouveau_channel *evo = nv50_display(dev)->master;
int ret;
NV_DEBUG_KMS(dev, "\n");
@ -186,8 +185,7 @@ nv50_crtc_set_scale(struct nouveau_crtc *nv_crtc, int scaling_mode, bool update)
struct nouveau_connector *nv_connector =
nouveau_crtc_connector_get(nv_crtc);
struct drm_device *dev = nv_crtc->base.dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *evo = dev_priv->evo;
struct nouveau_channel *evo = nv50_display(dev)->master;
struct drm_display_mode *native_mode = NULL;
struct drm_display_mode *mode = &nv_crtc->base.mode;
uint32_t outX, outY, horiz, vert;
@ -445,6 +443,42 @@ nv50_crtc_dpms(struct drm_crtc *crtc, int mode)
{
}
static int
nv50_crtc_wait_complete(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
struct nv50_display *disp = nv50_display(dev);
struct nouveau_channel *evo = disp->master;
u64 start;
int ret;
ret = RING_SPACE(evo, 6);
if (ret)
return ret;
BEGIN_RING(evo, 0, 0x0084, 1);
OUT_RING (evo, 0x80000000);
BEGIN_RING(evo, 0, 0x0080, 1);
OUT_RING (evo, 0);
BEGIN_RING(evo, 0, 0x0084, 1);
OUT_RING (evo, 0x00000000);
nv_wo32(disp->ntfy, 0x000, 0x00000000);
FIRE_RING (evo);
start = ptimer->read(dev);
do {
nv_wr32(dev, 0x61002c, 0x370);
nv_wr32(dev, 0x000140, 1);
if (nv_ro32(disp->ntfy, 0x000))
return 0;
} while (ptimer->read(dev) - start < 2000000000ULL);
return -EBUSY;
}
static void
nv50_crtc_prepare(struct drm_crtc *crtc)
{
@ -453,6 +487,7 @@ nv50_crtc_prepare(struct drm_crtc *crtc)
NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
nv50_display_flip_stop(crtc);
drm_vblank_pre_modeset(dev, nv_crtc->index);
nv50_crtc_blank(nv_crtc, true);
}
@ -461,24 +496,14 @@ static void
nv50_crtc_commit(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *evo = dev_priv->evo;
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
int ret;
NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
nv50_crtc_blank(nv_crtc, false);
drm_vblank_post_modeset(dev, nv_crtc->index);
ret = RING_SPACE(evo, 2);
if (ret) {
NV_ERROR(dev, "no space while committing crtc\n");
return;
}
BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
OUT_RING (evo, 0);
FIRE_RING (evo);
nv50_crtc_wait_complete(crtc);
nv50_display_flip_next(crtc, crtc->fb, NULL);
}
static bool
@ -491,15 +516,15 @@ nv50_crtc_mode_fixup(struct drm_crtc *crtc, struct drm_display_mode *mode,
static int
nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
struct drm_framebuffer *passed_fb,
int x, int y, bool update, bool atomic)
int x, int y, bool atomic)
{
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
struct drm_device *dev = nv_crtc->base.dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *evo = dev_priv->evo;
struct nouveau_channel *evo = nv50_display(dev)->master;
struct drm_framebuffer *drm_fb = nv_crtc->base.fb;
struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
int ret, format;
int ret;
NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
@ -525,28 +550,6 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
}
}
switch (drm_fb->depth) {
case 8:
format = NV50_EVO_CRTC_FB_DEPTH_8;
break;
case 15:
format = NV50_EVO_CRTC_FB_DEPTH_15;
break;
case 16:
format = NV50_EVO_CRTC_FB_DEPTH_16;
break;
case 24:
case 32:
format = NV50_EVO_CRTC_FB_DEPTH_24;
break;
case 30:
format = NV50_EVO_CRTC_FB_DEPTH_30;
break;
default:
NV_ERROR(dev, "unknown depth %d\n", drm_fb->depth);
return -EINVAL;
}
nv_crtc->fb.offset = fb->nvbo->bo.mem.start << PAGE_SHIFT;
nv_crtc->fb.tile_flags = nouveau_bo_tile_layout(fb->nvbo);
nv_crtc->fb.cpp = drm_fb->bits_per_pixel / 8;
@ -556,14 +559,7 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
return ret;
BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_DMA), 1);
if (nv_crtc->fb.tile_flags == 0x7a00 ||
nv_crtc->fb.tile_flags == 0xfe00)
OUT_RING(evo, NvEvoFB32);
else
if (nv_crtc->fb.tile_flags == 0x7000)
OUT_RING(evo, NvEvoFB16);
else
OUT_RING(evo, NvEvoVRAM_LP);
OUT_RING (evo, fb->r_dma);
}
ret = RING_SPACE(evo, 12);
@ -571,45 +567,26 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
return ret;
BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_OFFSET), 5);
OUT_RING(evo, nv_crtc->fb.offset >> 8);
OUT_RING(evo, 0);
OUT_RING(evo, (drm_fb->height << 16) | drm_fb->width);
if (!nv_crtc->fb.tile_flags) {
OUT_RING(evo, drm_fb->pitch | (1 << 20));
} else {
u32 tile_mode = fb->nvbo->tile_mode;
if (dev_priv->card_type >= NV_C0)
tile_mode >>= 4;
OUT_RING(evo, ((drm_fb->pitch / 4) << 4) | tile_mode);
}
if (dev_priv->chipset == 0x50)
OUT_RING(evo, (nv_crtc->fb.tile_flags << 8) | format);
else
OUT_RING(evo, format);
OUT_RING (evo, nv_crtc->fb.offset >> 8);
OUT_RING (evo, 0);
OUT_RING (evo, (drm_fb->height << 16) | drm_fb->width);
OUT_RING (evo, fb->r_pitch);
OUT_RING (evo, fb->r_format);
BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CLUT_MODE), 1);
OUT_RING(evo, fb->base.depth == 8 ?
NV50_EVO_CRTC_CLUT_MODE_OFF : NV50_EVO_CRTC_CLUT_MODE_ON);
OUT_RING (evo, fb->base.depth == 8 ?
NV50_EVO_CRTC_CLUT_MODE_OFF : NV50_EVO_CRTC_CLUT_MODE_ON);
BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, COLOR_CTRL), 1);
OUT_RING(evo, NV50_EVO_CRTC_COLOR_CTRL_COLOR);
OUT_RING (evo, NV50_EVO_CRTC_COLOR_CTRL_COLOR);
BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_POS), 1);
OUT_RING(evo, (y << 16) | x);
OUT_RING (evo, (y << 16) | x);
if (nv_crtc->lut.depth != fb->base.depth) {
nv_crtc->lut.depth = fb->base.depth;
nv50_crtc_lut_load(crtc);
}
if (update) {
ret = RING_SPACE(evo, 2);
if (ret)
return ret;
BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
OUT_RING(evo, 0);
FIRE_RING(evo);
}
return 0;
}
@ -619,8 +596,7 @@ nv50_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
struct drm_framebuffer *old_fb)
{
struct drm_device *dev = crtc->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *evo = dev_priv->evo;
struct nouveau_channel *evo = nv50_display(dev)->master;
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
struct nouveau_connector *nv_connector = NULL;
uint32_t hsync_dur, vsync_dur, hsync_start_to_end, vsync_start_to_end;
@ -700,14 +676,25 @@ nv50_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
nv_crtc->set_dither(nv_crtc, nv_connector->use_dithering, false);
nv_crtc->set_scale(nv_crtc, nv_connector->scaling_mode, false);
return nv50_crtc_do_mode_set_base(crtc, old_fb, x, y, false, false);
return nv50_crtc_do_mode_set_base(crtc, old_fb, x, y, false);
}
static int
nv50_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb)
{
return nv50_crtc_do_mode_set_base(crtc, old_fb, x, y, true, false);
int ret;
nv50_display_flip_stop(crtc);
ret = nv50_crtc_do_mode_set_base(crtc, old_fb, x, y, false);
if (ret)
return ret;
ret = nv50_crtc_wait_complete(crtc);
if (ret)
return ret;
return nv50_display_flip_next(crtc, crtc->fb, NULL);
}
static int
@ -715,7 +702,14 @@ nv50_crtc_mode_set_base_atomic(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
int x, int y, enum mode_set_atomic state)
{
return nv50_crtc_do_mode_set_base(crtc, fb, x, y, true, true);
int ret;
nv50_display_flip_stop(crtc);
ret = nv50_crtc_do_mode_set_base(crtc, fb, x, y, true);
if (ret)
return ret;
return nv50_crtc_wait_complete(crtc);
}
static const struct drm_crtc_helper_funcs nv50_crtc_helper_funcs = {
@ -758,7 +752,7 @@ nv50_crtc_create(struct drm_device *dev, int index)
nv_crtc->lut.depth = 0;
ret = nouveau_bo_new(dev, NULL, 4096, 0x100, TTM_PL_FLAG_VRAM,
0, 0x0000, false, true, &nv_crtc->lut.nvbo);
0, 0x0000, &nv_crtc->lut.nvbo);
if (!ret) {
ret = nouveau_bo_pin(nv_crtc->lut.nvbo, TTM_PL_FLAG_VRAM);
if (!ret)
@ -784,7 +778,7 @@ nv50_crtc_create(struct drm_device *dev, int index)
drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256);
ret = nouveau_bo_new(dev, NULL, 64*64*4, 0x100, TTM_PL_FLAG_VRAM,
0, 0x0000, false, true, &nv_crtc->cursor.nvbo);
0, 0x0000, &nv_crtc->cursor.nvbo);
if (!ret) {
ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
if (!ret)

View File

@ -36,9 +36,9 @@
static void
nv50_cursor_show(struct nouveau_crtc *nv_crtc, bool update)
{
struct drm_nouveau_private *dev_priv = nv_crtc->base.dev->dev_private;
struct nouveau_channel *evo = dev_priv->evo;
struct drm_device *dev = nv_crtc->base.dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *evo = nv50_display(dev)->master;
int ret;
NV_DEBUG_KMS(dev, "\n");
@ -71,9 +71,9 @@ nv50_cursor_show(struct nouveau_crtc *nv_crtc, bool update)
static void
nv50_cursor_hide(struct nouveau_crtc *nv_crtc, bool update)
{
struct drm_nouveau_private *dev_priv = nv_crtc->base.dev->dev_private;
struct nouveau_channel *evo = dev_priv->evo;
struct drm_device *dev = nv_crtc->base.dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *evo = nv50_display(dev)->master;
int ret;
NV_DEBUG_KMS(dev, "\n");

View File

@ -41,8 +41,7 @@ nv50_dac_disconnect(struct drm_encoder *encoder)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct drm_device *dev = encoder->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *evo = dev_priv->evo;
struct nouveau_channel *evo = nv50_display(dev)->master;
int ret;
if (!nv_encoder->crtc)
@ -216,8 +215,7 @@ nv50_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct drm_device *dev = encoder->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *evo = dev_priv->evo;
struct nouveau_channel *evo = nv50_display(dev)->master;
struct nouveau_crtc *crtc = nouveau_crtc(encoder->crtc);
uint32_t mode_ctl = 0, mode_ctl2 = 0;
int ret;

View File

@ -24,6 +24,7 @@
*
*/
#define NOUVEAU_DMA_DEBUG (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO)
#include "nv50_display.h"
#include "nouveau_crtc.h"
#include "nouveau_encoder.h"
@ -34,6 +35,7 @@
#include "drm_crtc_helper.h"
static void nv50_display_isr(struct drm_device *);
static void nv50_display_bh(unsigned long);
static inline int
nv50_sor_nr(struct drm_device *dev)
@ -172,16 +174,16 @@ nv50_display_init(struct drm_device *dev)
ret = nv50_evo_init(dev);
if (ret)
return ret;
evo = dev_priv->evo;
evo = nv50_display(dev)->master;
nv_wr32(dev, NV50_PDISPLAY_OBJECTS, (evo->ramin->vinst >> 8) | 9);
ret = RING_SPACE(evo, 11);
ret = RING_SPACE(evo, 15);
if (ret)
return ret;
BEGIN_RING(evo, 0, NV50_EVO_UNK84, 2);
OUT_RING(evo, NV50_EVO_UNK84_NOTIFY_DISABLED);
OUT_RING(evo, NV50_EVO_DMA_NOTIFY_HANDLE_NONE);
OUT_RING(evo, NvEvoSync);
BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, FB_DMA), 1);
OUT_RING(evo, NV50_EVO_CRTC_FB_DMA_HANDLE_NONE);
BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, UNK0800), 1);
@ -190,6 +192,11 @@ nv50_display_init(struct drm_device *dev)
OUT_RING(evo, 0);
BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, UNK082C), 1);
OUT_RING(evo, 0);
/* required to make display sync channels not hate life */
BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, UNK900), 1);
OUT_RING (evo, 0x00000311);
BEGIN_RING(evo, 0, NV50_EVO_CRTC(1, UNK900), 1);
OUT_RING (evo, 0x00000311);
FIRE_RING(evo);
if (!nv_wait(dev, 0x640004, 0xffffffff, evo->dma.put << 2))
NV_ERROR(dev, "evo pushbuf stalled\n");
@ -201,6 +208,8 @@ nv50_display_init(struct drm_device *dev)
static int nv50_display_disable(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nv50_display *disp = nv50_display(dev);
struct nouveau_channel *evo = disp->master;
struct drm_crtc *drm_crtc;
int ret, i;
@ -212,12 +221,12 @@ static int nv50_display_disable(struct drm_device *dev)
nv50_crtc_blank(crtc, true);
}
ret = RING_SPACE(dev_priv->evo, 2);
ret = RING_SPACE(evo, 2);
if (ret == 0) {
BEGIN_RING(dev_priv->evo, 0, NV50_EVO_UPDATE, 1);
OUT_RING(dev_priv->evo, 0);
BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
OUT_RING(evo, 0);
}
FIRE_RING(dev_priv->evo);
FIRE_RING(evo);
/* Almost like ack'ing a vblank interrupt, maybe in the spirit of
* cleaning up?
@ -267,10 +276,16 @@ int nv50_display_create(struct drm_device *dev)
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct dcb_table *dcb = &dev_priv->vbios.dcb;
struct drm_connector *connector, *ct;
struct nv50_display *priv;
int ret, i;
NV_DEBUG_KMS(dev, "\n");
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
dev_priv->engine.display.priv = priv;
/* init basic kernel modesetting */
drm_mode_config_init(dev);
@ -330,7 +345,7 @@ int nv50_display_create(struct drm_device *dev)
}
}
INIT_WORK(&dev_priv->irq_work, nv50_display_irq_handler_bh);
tasklet_init(&priv->tasklet, nv50_display_bh, (unsigned long)dev);
nouveau_irq_register(dev, 26, nv50_display_isr);
ret = nv50_display_init(dev);
@ -345,12 +360,131 @@ int nv50_display_create(struct drm_device *dev)
void
nv50_display_destroy(struct drm_device *dev)
{
struct nv50_display *disp = nv50_display(dev);
NV_DEBUG_KMS(dev, "\n");
drm_mode_config_cleanup(dev);
nv50_display_disable(dev);
nouveau_irq_unregister(dev, 26);
kfree(disp);
}
void
nv50_display_flip_stop(struct drm_crtc *crtc)
{
struct nv50_display *disp = nv50_display(crtc->dev);
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
struct nv50_display_crtc *dispc = &disp->crtc[nv_crtc->index];
struct nouveau_channel *evo = dispc->sync;
int ret;
ret = RING_SPACE(evo, 8);
if (ret) {
WARN_ON(1);
return;
}
BEGIN_RING(evo, 0, 0x0084, 1);
OUT_RING (evo, 0x00000000);
BEGIN_RING(evo, 0, 0x0094, 1);
OUT_RING (evo, 0x00000000);
BEGIN_RING(evo, 0, 0x00c0, 1);
OUT_RING (evo, 0x00000000);
BEGIN_RING(evo, 0, 0x0080, 1);
OUT_RING (evo, 0x00000000);
FIRE_RING (evo);
}
int
nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
struct nouveau_channel *chan)
{
struct drm_nouveau_private *dev_priv = crtc->dev->dev_private;
struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb);
struct nv50_display *disp = nv50_display(crtc->dev);
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
struct nv50_display_crtc *dispc = &disp->crtc[nv_crtc->index];
struct nouveau_channel *evo = dispc->sync;
int ret;
ret = RING_SPACE(evo, 24);
if (unlikely(ret))
return ret;
/* synchronise with the rendering channel, if necessary */
if (likely(chan)) {
u64 offset = dispc->sem.bo->vma.offset + dispc->sem.offset;
ret = RING_SPACE(chan, 10);
if (ret) {
WIND_RING(evo);
return ret;
}
if (dev_priv->chipset < 0xc0) {
BEGIN_RING(chan, NvSubSw, 0x0060, 2);
OUT_RING (chan, NvEvoSema0 + nv_crtc->index);
OUT_RING (chan, dispc->sem.offset);
BEGIN_RING(chan, NvSubSw, 0x006c, 1);
OUT_RING (chan, 0xf00d0000 | dispc->sem.value);
BEGIN_RING(chan, NvSubSw, 0x0064, 2);
OUT_RING (chan, dispc->sem.offset ^ 0x10);
OUT_RING (chan, 0x74b1e000);
BEGIN_RING(chan, NvSubSw, 0x0060, 1);
if (dev_priv->chipset < 0x84)
OUT_RING (chan, NvSema);
else
OUT_RING (chan, chan->vram_handle);
} else {
BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0010, 4);
OUT_RING (chan, upper_32_bits(offset));
OUT_RING (chan, lower_32_bits(offset));
OUT_RING (chan, 0xf00d0000 | dispc->sem.value);
OUT_RING (chan, 0x1002);
BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0010, 4);
OUT_RING (chan, upper_32_bits(offset));
OUT_RING (chan, lower_32_bits(offset ^ 0x10));
OUT_RING (chan, 0x74b1e000);
OUT_RING (chan, 0x1001);
}
FIRE_RING (chan);
} else {
nouveau_bo_wr32(dispc->sem.bo, dispc->sem.offset / 4,
0xf00d0000 | dispc->sem.value);
}
/* queue the flip on the crtc's "display sync" channel */
BEGIN_RING(evo, 0, 0x0100, 1);
OUT_RING (evo, 0xfffe0000);
BEGIN_RING(evo, 0, 0x0084, 5);
OUT_RING (evo, chan ? 0x00000100 : 0x00000010);
OUT_RING (evo, dispc->sem.offset);
OUT_RING (evo, 0xf00d0000 | dispc->sem.value);
OUT_RING (evo, 0x74b1e000);
OUT_RING (evo, NvEvoSync);
BEGIN_RING(evo, 0, 0x00a0, 2);
OUT_RING (evo, 0x00000000);
OUT_RING (evo, 0x00000000);
BEGIN_RING(evo, 0, 0x00c0, 1);
OUT_RING (evo, nv_fb->r_dma);
BEGIN_RING(evo, 0, 0x0110, 2);
OUT_RING (evo, 0x00000000);
OUT_RING (evo, 0x00000000);
BEGIN_RING(evo, 0, 0x0800, 5);
OUT_RING (evo, (nv_fb->nvbo->bo.mem.start << PAGE_SHIFT) >> 8);
OUT_RING (evo, 0);
OUT_RING (evo, (fb->height << 16) | fb->width);
OUT_RING (evo, nv_fb->r_pitch);
OUT_RING (evo, nv_fb->r_format);
BEGIN_RING(evo, 0, 0x0080, 1);
OUT_RING (evo, 0x00000000);
FIRE_RING (evo);
dispc->sem.offset ^= 0x10;
dispc->sem.value++;
return 0;
}
static u16
@ -466,11 +600,12 @@ static void
nv50_display_unk10_handler(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nv50_display *disp = nv50_display(dev);
u32 unk30 = nv_rd32(dev, 0x610030), mc;
int i, crtc, or, type = OUTPUT_ANY;
NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30);
dev_priv->evo_irq.dcb = NULL;
disp->irq.dcb = NULL;
nv_wr32(dev, 0x619494, nv_rd32(dev, 0x619494) & ~8);
@ -541,7 +676,7 @@ nv50_display_unk10_handler(struct drm_device *dev)
if (dcb->type == type && (dcb->or & (1 << or))) {
nouveau_bios_run_display_table(dev, dcb, 0, -1);
dev_priv->evo_irq.dcb = dcb;
disp->irq.dcb = dcb;
goto ack;
}
}
@ -587,15 +722,16 @@ static void
nv50_display_unk20_handler(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
u32 unk30 = nv_rd32(dev, 0x610030), tmp, pclk, script, mc;
struct nv50_display *disp = nv50_display(dev);
u32 unk30 = nv_rd32(dev, 0x610030), tmp, pclk, script, mc = 0;
struct dcb_entry *dcb;
int i, crtc, or, type = OUTPUT_ANY;
NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30);
dcb = dev_priv->evo_irq.dcb;
dcb = disp->irq.dcb;
if (dcb) {
nouveau_bios_run_display_table(dev, dcb, 0, -2);
dev_priv->evo_irq.dcb = NULL;
disp->irq.dcb = NULL;
}
/* CRTC clock change requested? */
@ -692,9 +828,9 @@ nv50_display_unk20_handler(struct drm_device *dev)
nv_wr32(dev, NV50_PDISPLAY_DAC_CLK_CTRL2(or), 0);
}
dev_priv->evo_irq.dcb = dcb;
dev_priv->evo_irq.pclk = pclk;
dev_priv->evo_irq.script = script;
disp->irq.dcb = dcb;
disp->irq.pclk = pclk;
disp->irq.script = script;
ack:
nv_wr32(dev, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_CLK_UNK20);
@ -735,13 +871,13 @@ nv50_display_unk40_dp_set_tmds(struct drm_device *dev, struct dcb_entry *dcb)
static void
nv50_display_unk40_handler(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct dcb_entry *dcb = dev_priv->evo_irq.dcb;
u16 script = dev_priv->evo_irq.script;
u32 unk30 = nv_rd32(dev, 0x610030), pclk = dev_priv->evo_irq.pclk;
struct nv50_display *disp = nv50_display(dev);
struct dcb_entry *dcb = disp->irq.dcb;
u16 script = disp->irq.script;
u32 unk30 = nv_rd32(dev, 0x610030), pclk = disp->irq.pclk;
NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30);
dev_priv->evo_irq.dcb = NULL;
disp->irq.dcb = NULL;
if (!dcb)
goto ack;
@ -754,12 +890,10 @@ ack:
nv_wr32(dev, 0x619494, nv_rd32(dev, 0x619494) | 8);
}
void
nv50_display_irq_handler_bh(struct work_struct *work)
static void
nv50_display_bh(unsigned long data)
{
struct drm_nouveau_private *dev_priv =
container_of(work, struct drm_nouveau_private, irq_work);
struct drm_device *dev = dev_priv->dev;
struct drm_device *dev = (struct drm_device *)data;
for (;;) {
uint32_t intr0 = nv_rd32(dev, NV50_PDISPLAY_INTR_0);
@ -807,7 +941,7 @@ nv50_display_error_handler(struct drm_device *dev)
static void
nv50_display_isr(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nv50_display *disp = nv50_display(dev);
uint32_t delayed = 0;
while (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_DISPLAY) {
@ -835,8 +969,7 @@ nv50_display_isr(struct drm_device *dev)
NV50_PDISPLAY_INTR_1_CLK_UNK40));
if (clock) {
nv_wr32(dev, NV03_PMC_INTR_EN_0, 0);
if (!work_pending(&dev_priv->irq_work))
queue_work(dev_priv->wq, &dev_priv->irq_work);
tasklet_schedule(&disp->tasklet);
delayed |= clock;
intr1 &= ~clock;
}

View File

@ -35,7 +35,36 @@
#include "nouveau_crtc.h"
#include "nv50_evo.h"
void nv50_display_irq_handler_bh(struct work_struct *work);
struct nv50_display_crtc {
struct nouveau_channel *sync;
struct {
struct nouveau_bo *bo;
u32 offset;
u16 value;
} sem;
};
struct nv50_display {
struct nouveau_channel *master;
struct nouveau_gpuobj *ntfy;
struct nv50_display_crtc crtc[2];
struct tasklet_struct tasklet;
struct {
struct dcb_entry *dcb;
u16 script;
u32 pclk;
} irq;
};
static inline struct nv50_display *
nv50_display(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
return dev_priv->engine.display.priv;
}
int nv50_display_early_init(struct drm_device *dev);
void nv50_display_late_takedown(struct drm_device *dev);
int nv50_display_create(struct drm_device *dev);
@ -44,4 +73,15 @@ void nv50_display_destroy(struct drm_device *dev);
int nv50_crtc_blank(struct nouveau_crtc *, bool blank);
int nv50_crtc_set_clock(struct drm_device *, int head, int pclk);
int nv50_display_flip_next(struct drm_crtc *, struct drm_framebuffer *,
struct nouveau_channel *chan);
void nv50_display_flip_stop(struct drm_crtc *);
int nv50_evo_init(struct drm_device *dev);
void nv50_evo_fini(struct drm_device *dev);
void nv50_evo_dmaobj_init(struct nouveau_gpuobj *, u32 memtype, u64 base,
u64 size);
int nv50_evo_dmaobj_new(struct nouveau_channel *, u32 handle, u32 memtype,
u64 base, u64 size, struct nouveau_gpuobj **);
#endif /* __NV50_DISPLAY_H__ */

View File

@ -27,20 +27,17 @@
#include "nouveau_drv.h"
#include "nouveau_dma.h"
#include "nouveau_ramht.h"
#include "nv50_display.h"
static void
nv50_evo_channel_del(struct nouveau_channel **pevo)
{
struct drm_nouveau_private *dev_priv;
struct nouveau_channel *evo = *pevo;
if (!evo)
return;
*pevo = NULL;
dev_priv = evo->dev->dev_private;
dev_priv->evo_alloc &= ~(1 << evo->id);
nouveau_gpuobj_channel_takedown(evo);
nouveau_bo_unmap(evo->pushbuf_bo);
nouveau_bo_ref(NULL, &evo->pushbuf_bo);
@ -51,42 +48,61 @@ nv50_evo_channel_del(struct nouveau_channel **pevo)
kfree(evo);
}
int
nv50_evo_dmaobj_new(struct nouveau_channel *evo, u32 class, u32 name,
u32 tile_flags, u32 magic_flags, u32 offset, u32 limit,
u32 flags5)
void
nv50_evo_dmaobj_init(struct nouveau_gpuobj *obj, u32 memtype, u64 base, u64 size)
{
struct drm_nouveau_private *dev_priv = evo->dev->dev_private;
struct drm_device *dev = evo->dev;
struct drm_nouveau_private *dev_priv = obj->dev->dev_private;
u32 flags5;
if (dev_priv->chipset < 0xc0) {
/* not supported on 0x50, specified in format mthd */
if (dev_priv->chipset == 0x50)
memtype = 0;
flags5 = 0x00010000;
} else {
if (memtype & 0x80000000)
flags5 = 0x00000000; /* large pages */
else
flags5 = 0x00020000;
}
nv50_gpuobj_dma_init(obj, 0, 0x3d, base, size, NV_MEM_TARGET_VRAM,
NV_MEM_ACCESS_RW, (memtype >> 8) & 0xff, 0);
nv_wo32(obj, 0x14, flags5);
dev_priv->engine.instmem.flush(obj->dev);
}
int
nv50_evo_dmaobj_new(struct nouveau_channel *evo, u32 handle, u32 memtype,
u64 base, u64 size, struct nouveau_gpuobj **pobj)
{
struct nv50_display *disp = nv50_display(evo->dev);
struct nouveau_gpuobj *obj = NULL;
int ret;
ret = nouveau_gpuobj_new(dev, dev_priv->evo, 6*4, 32, 0, &obj);
ret = nouveau_gpuobj_new(evo->dev, disp->master, 6*4, 32, 0, &obj);
if (ret)
return ret;
obj->engine = NVOBJ_ENGINE_DISPLAY;
nv_wo32(obj, 0, (tile_flags << 22) | (magic_flags << 16) | class);
nv_wo32(obj, 4, limit);
nv_wo32(obj, 8, offset);
nv_wo32(obj, 12, 0x00000000);
nv_wo32(obj, 16, 0x00000000);
nv_wo32(obj, 20, flags5);
dev_priv->engine.instmem.flush(dev);
nv50_evo_dmaobj_init(obj, memtype, base, size);
ret = nouveau_ramht_insert(evo, name, obj);
ret = nouveau_ramht_insert(evo, handle, obj);
if (ret)
goto out;
if (pobj)
nouveau_gpuobj_ref(obj, pobj);
out:
nouveau_gpuobj_ref(NULL, &obj);
if (ret) {
return ret;
}
return 0;
return ret;
}
static int
nv50_evo_channel_new(struct drm_device *dev, struct nouveau_channel **pevo)
nv50_evo_channel_new(struct drm_device *dev, int chid,
struct nouveau_channel **pevo)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nv50_display *disp = nv50_display(dev);
struct nouveau_channel *evo;
int ret;
@ -95,25 +111,13 @@ nv50_evo_channel_new(struct drm_device *dev, struct nouveau_channel **pevo)
return -ENOMEM;
*pevo = evo;
for (evo->id = 0; evo->id < 5; evo->id++) {
if (dev_priv->evo_alloc & (1 << evo->id))
continue;
dev_priv->evo_alloc |= (1 << evo->id);
break;
}
if (evo->id == 5) {
kfree(evo);
return -ENODEV;
}
evo->id = chid;
evo->dev = dev;
evo->user_get = 4;
evo->user_put = 0;
ret = nouveau_bo_new(dev, NULL, 4096, 0, TTM_PL_FLAG_VRAM, 0, 0,
false, true, &evo->pushbuf_bo);
&evo->pushbuf_bo);
if (ret == 0)
ret = nouveau_bo_pin(evo->pushbuf_bo, TTM_PL_FLAG_VRAM);
if (ret) {
@ -138,8 +142,8 @@ nv50_evo_channel_new(struct drm_device *dev, struct nouveau_channel **pevo)
}
/* bind primary evo channel's ramht to the channel */
if (dev_priv->evo && evo != dev_priv->evo)
nouveau_ramht_ref(dev_priv->evo->ramht, &evo->ramht, NULL);
if (disp->master && evo != disp->master)
nouveau_ramht_ref(disp->master->ramht, &evo->ramht, NULL);
return 0;
}
@ -212,21 +216,39 @@ nv50_evo_channel_fini(struct nouveau_channel *evo)
}
}
static void
nv50_evo_destroy(struct drm_device *dev)
{
struct nv50_display *disp = nv50_display(dev);
int i;
for (i = 0; i < 2; i++) {
if (disp->crtc[i].sem.bo) {
nouveau_bo_unmap(disp->crtc[i].sem.bo);
nouveau_bo_ref(NULL, &disp->crtc[i].sem.bo);
}
nv50_evo_channel_del(&disp->crtc[i].sync);
}
nouveau_gpuobj_ref(NULL, &disp->ntfy);
nv50_evo_channel_del(&disp->master);
}
static int
nv50_evo_create(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nv50_display *disp = nv50_display(dev);
struct nouveau_gpuobj *ramht = NULL;
struct nouveau_channel *evo;
int ret;
int ret, i, j;
/* create primary evo channel, the one we use for modesetting
* purporses
*/
ret = nv50_evo_channel_new(dev, &dev_priv->evo);
ret = nv50_evo_channel_new(dev, 0, &disp->master);
if (ret)
return ret;
evo = dev_priv->evo;
evo = disp->master;
/* setup object management on it, any other evo channel will
* use this also as there's no per-channel support on the
@ -236,109 +258,167 @@ nv50_evo_create(struct drm_device *dev)
NVOBJ_FLAG_ZERO_ALLOC, &evo->ramin);
if (ret) {
NV_ERROR(dev, "Error allocating EVO channel memory: %d\n", ret);
nv50_evo_channel_del(&dev_priv->evo);
return ret;
goto err;
}
ret = drm_mm_init(&evo->ramin_heap, 0, 32768);
if (ret) {
NV_ERROR(dev, "Error initialising EVO PRAMIN heap: %d\n", ret);
nv50_evo_channel_del(&dev_priv->evo);
return ret;
goto err;
}
ret = nouveau_gpuobj_new(dev, evo, 4096, 16, 0, &ramht);
if (ret) {
NV_ERROR(dev, "Unable to allocate EVO RAMHT: %d\n", ret);
nv50_evo_channel_del(&dev_priv->evo);
return ret;
goto err;
}
ret = nouveau_ramht_new(dev, ramht, &evo->ramht);
nouveau_gpuobj_ref(NULL, &ramht);
if (ret) {
nv50_evo_channel_del(&dev_priv->evo);
return ret;
}
if (ret)
goto err;
/* not sure exactly what this is..
*
* the first dword of the structure is used by nvidia to wait on
* full completion of an EVO "update" command.
*
* method 0x8c on the master evo channel will fill a lot more of
* this structure with some undefined info
*/
ret = nouveau_gpuobj_new(dev, disp->master, 0x1000, 0,
NVOBJ_FLAG_ZERO_ALLOC, &disp->ntfy);
if (ret)
goto err;
ret = nv50_evo_dmaobj_new(disp->master, NvEvoSync, 0x0000,
disp->ntfy->vinst, disp->ntfy->size, NULL);
if (ret)
goto err;
/* create some default objects for the scanout memtypes we support */
if (dev_priv->card_type >= NV_C0) {
ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoFB32, 0xfe, 0x19,
0, 0xffffffff, 0x00000000);
if (ret) {
nv50_evo_channel_del(&dev_priv->evo);
return ret;
ret = nv50_evo_dmaobj_new(disp->master, NvEvoVRAM, 0x0000,
0, dev_priv->vram_size, NULL);
if (ret)
goto err;
ret = nv50_evo_dmaobj_new(disp->master, NvEvoVRAM_LP, 0x80000000,
0, dev_priv->vram_size, NULL);
if (ret)
goto err;
ret = nv50_evo_dmaobj_new(disp->master, NvEvoFB32, 0x80000000 |
(dev_priv->chipset < 0xc0 ? 0x7a00 : 0xfe00),
0, dev_priv->vram_size, NULL);
if (ret)
goto err;
ret = nv50_evo_dmaobj_new(disp->master, NvEvoFB16, 0x80000000 |
(dev_priv->chipset < 0xc0 ? 0x7000 : 0xfe00),
0, dev_priv->vram_size, NULL);
if (ret)
goto err;
/* create "display sync" channels and other structures we need
* to implement page flipping
*/
for (i = 0; i < 2; i++) {
struct nv50_display_crtc *dispc = &disp->crtc[i];
u64 offset;
ret = nv50_evo_channel_new(dev, 1 + i, &dispc->sync);
if (ret)
goto err;
ret = nouveau_bo_new(dev, NULL, 4096, 0x1000, TTM_PL_FLAG_VRAM,
0, 0x0000, &dispc->sem.bo);
if (!ret) {
offset = dispc->sem.bo->bo.mem.start << PAGE_SHIFT;
ret = nouveau_bo_pin(dispc->sem.bo, TTM_PL_FLAG_VRAM);
if (!ret)
ret = nouveau_bo_map(dispc->sem.bo);
if (ret)
nouveau_bo_ref(NULL, &dispc->sem.bo);
}
ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoVRAM, 0, 0x19,
0, dev_priv->vram_size, 0x00020000);
if (ret) {
nv50_evo_channel_del(&dev_priv->evo);
return ret;
}
if (ret)
goto err;
ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoVRAM_LP, 0, 0x19,
0, dev_priv->vram_size, 0x00000000);
if (ret) {
nv50_evo_channel_del(&dev_priv->evo);
return ret;
}
} else {
ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoFB16, 0x70, 0x19,
0, 0xffffffff, 0x00010000);
if (ret) {
nv50_evo_channel_del(&dev_priv->evo);
return ret;
}
ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoSync, 0x0000,
offset, 4096, NULL);
if (ret)
goto err;
ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoVRAM_LP, 0x80000000,
0, dev_priv->vram_size, NULL);
if (ret)
goto err;
ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoFB32, 0x7a, 0x19,
0, 0xffffffff, 0x00010000);
if (ret) {
nv50_evo_channel_del(&dev_priv->evo);
return ret;
}
ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoFB32, 0x80000000 |
(dev_priv->chipset < 0xc0 ?
0x7a00 : 0xfe00),
0, dev_priv->vram_size, NULL);
if (ret)
goto err;
ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoVRAM, 0, 0x19,
0, dev_priv->vram_size, 0x00010000);
if (ret) {
nv50_evo_channel_del(&dev_priv->evo);
return ret;
}
ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoFB16, 0x80000000 |
(dev_priv->chipset < 0xc0 ?
0x7000 : 0xfe00),
0, dev_priv->vram_size, NULL);
if (ret)
goto err;
ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoVRAM_LP, 0, 0x19,
0, dev_priv->vram_size, 0x00010000);
if (ret) {
nv50_evo_channel_del(&dev_priv->evo);
return ret;
}
for (j = 0; j < 4096; j += 4)
nouveau_bo_wr32(dispc->sem.bo, j / 4, 0x74b1e000);
dispc->sem.offset = 0;
}
return 0;
err:
nv50_evo_destroy(dev);
return ret;
}
int
nv50_evo_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
int ret;
struct nv50_display *disp = nv50_display(dev);
int ret, i;
if (!dev_priv->evo) {
if (!disp->master) {
ret = nv50_evo_create(dev);
if (ret)
return ret;
}
return nv50_evo_channel_init(dev_priv->evo);
ret = nv50_evo_channel_init(disp->master);
if (ret)
return ret;
for (i = 0; i < 2; i++) {
ret = nv50_evo_channel_init(disp->crtc[i].sync);
if (ret)
return ret;
}
return 0;
}
void
nv50_evo_fini(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nv50_display *disp = nv50_display(dev);
int i;
if (dev_priv->evo) {
nv50_evo_channel_fini(dev_priv->evo);
nv50_evo_channel_del(&dev_priv->evo);
for (i = 0; i < 2; i++) {
if (disp->crtc[i].sync)
nv50_evo_channel_fini(disp->crtc[i].sync);
}
if (disp->master)
nv50_evo_channel_fini(disp->master);
nv50_evo_destroy(dev);
}

View File

@ -27,12 +27,6 @@
#ifndef __NV50_EVO_H__
#define __NV50_EVO_H__
int nv50_evo_init(struct drm_device *dev);
void nv50_evo_fini(struct drm_device *dev);
int nv50_evo_dmaobj_new(struct nouveau_channel *, u32 class, u32 name,
u32 tile_flags, u32 magic_flags,
u32 offset, u32 limit);
#define NV50_EVO_UPDATE 0x00000080
#define NV50_EVO_UNK84 0x00000084
#define NV50_EVO_UNK84_NOTIFY 0x40000000
@ -119,5 +113,7 @@ int nv50_evo_dmaobj_new(struct nouveau_channel *, u32 class, u32 name,
/* Both of these are needed, otherwise nothing happens. */
#define NV50_EVO_CRTC_SCALE_RES1 0x000008d8
#define NV50_EVO_CRTC_SCALE_RES2 0x000008dc
#define NV50_EVO_CRTC_UNK900 0x00000900
#define NV50_EVO_CRTC_UNK904 0x00000904
#endif

View File

@ -8,31 +8,61 @@ struct nv50_fb_priv {
dma_addr_t r100c08;
};
static void
nv50_fb_destroy(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
struct nv50_fb_priv *priv = pfb->priv;
if (drm_mm_initialized(&pfb->tag_heap))
drm_mm_takedown(&pfb->tag_heap);
if (priv->r100c08_page) {
pci_unmap_page(dev->pdev, priv->r100c08, PAGE_SIZE,
PCI_DMA_BIDIRECTIONAL);
__free_page(priv->r100c08_page);
}
kfree(priv);
pfb->priv = NULL;
}
static int
nv50_fb_create(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
struct nv50_fb_priv *priv;
u32 tagmem;
int ret;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
pfb->priv = priv;
priv->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!priv->r100c08_page) {
kfree(priv);
nv50_fb_destroy(dev);
return -ENOMEM;
}
priv->r100c08 = pci_map_page(dev->pdev, priv->r100c08_page, 0,
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
if (pci_dma_mapping_error(dev->pdev, priv->r100c08)) {
__free_page(priv->r100c08_page);
kfree(priv);
nv50_fb_destroy(dev);
return -EFAULT;
}
dev_priv->engine.fb.priv = priv;
tagmem = nv_rd32(dev, 0x100320);
NV_DEBUG(dev, "%d tags available\n", tagmem);
ret = drm_mm_init(&pfb->tag_heap, 0, tagmem);
if (ret) {
nv50_fb_destroy(dev);
return ret;
}
return 0;
}
@ -81,26 +111,112 @@ nv50_fb_init(struct drm_device *dev)
void
nv50_fb_takedown(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nv50_fb_priv *priv;
priv = dev_priv->engine.fb.priv;
if (!priv)
return;
dev_priv->engine.fb.priv = NULL;
pci_unmap_page(dev->pdev, priv->r100c08, PAGE_SIZE,
PCI_DMA_BIDIRECTIONAL);
__free_page(priv->r100c08_page);
kfree(priv);
nv50_fb_destroy(dev);
}
static struct nouveau_enum vm_dispatch_subclients[] = {
{ 0x00000000, "GRCTX", NULL },
{ 0x00000001, "NOTIFY", NULL },
{ 0x00000002, "QUERY", NULL },
{ 0x00000003, "COND", NULL },
{ 0x00000004, "M2M_IN", NULL },
{ 0x00000005, "M2M_OUT", NULL },
{ 0x00000006, "M2M_NOTIFY", NULL },
{}
};
static struct nouveau_enum vm_ccache_subclients[] = {
{ 0x00000000, "CB", NULL },
{ 0x00000001, "TIC", NULL },
{ 0x00000002, "TSC", NULL },
{}
};
static struct nouveau_enum vm_prop_subclients[] = {
{ 0x00000000, "RT0", NULL },
{ 0x00000001, "RT1", NULL },
{ 0x00000002, "RT2", NULL },
{ 0x00000003, "RT3", NULL },
{ 0x00000004, "RT4", NULL },
{ 0x00000005, "RT5", NULL },
{ 0x00000006, "RT6", NULL },
{ 0x00000007, "RT7", NULL },
{ 0x00000008, "ZETA", NULL },
{ 0x00000009, "LOCAL", NULL },
{ 0x0000000a, "GLOBAL", NULL },
{ 0x0000000b, "STACK", NULL },
{ 0x0000000c, "DST2D", NULL },
{}
};
static struct nouveau_enum vm_pfifo_subclients[] = {
{ 0x00000000, "PUSHBUF", NULL },
{ 0x00000001, "SEMAPHORE", NULL },
{}
};
static struct nouveau_enum vm_bar_subclients[] = {
{ 0x00000000, "FB", NULL },
{ 0x00000001, "IN", NULL },
{}
};
static struct nouveau_enum vm_client[] = {
{ 0x00000000, "STRMOUT", NULL },
{ 0x00000003, "DISPATCH", vm_dispatch_subclients },
{ 0x00000004, "PFIFO_WRITE", NULL },
{ 0x00000005, "CCACHE", vm_ccache_subclients },
{ 0x00000006, "PPPP", NULL },
{ 0x00000007, "CLIPID", NULL },
{ 0x00000008, "PFIFO_READ", NULL },
{ 0x00000009, "VFETCH", NULL },
{ 0x0000000a, "TEXTURE", NULL },
{ 0x0000000b, "PROP", vm_prop_subclients },
{ 0x0000000c, "PVP", NULL },
{ 0x0000000d, "PBSP", NULL },
{ 0x0000000e, "PCRYPT", NULL },
{ 0x0000000f, "PCOUNTER", NULL },
{ 0x00000011, "PDAEMON", NULL },
{}
};
static struct nouveau_enum vm_engine[] = {
{ 0x00000000, "PGRAPH", NULL },
{ 0x00000001, "PVP", NULL },
{ 0x00000004, "PEEPHOLE", NULL },
{ 0x00000005, "PFIFO", vm_pfifo_subclients },
{ 0x00000006, "BAR", vm_bar_subclients },
{ 0x00000008, "PPPP", NULL },
{ 0x00000009, "PBSP", NULL },
{ 0x0000000a, "PCRYPT", NULL },
{ 0x0000000b, "PCOUNTER", NULL },
{ 0x0000000c, "SEMAPHORE_BG", NULL },
{ 0x0000000d, "PCOPY", NULL },
{ 0x0000000e, "PDAEMON", NULL },
{}
};
static struct nouveau_enum vm_fault[] = {
{ 0x00000000, "PT_NOT_PRESENT", NULL },
{ 0x00000001, "PT_TOO_SHORT", NULL },
{ 0x00000002, "PAGE_NOT_PRESENT", NULL },
{ 0x00000003, "PAGE_SYSTEM_ONLY", NULL },
{ 0x00000004, "PAGE_READ_ONLY", NULL },
{ 0x00000006, "NULL_DMAOBJ", NULL },
{ 0x00000007, "WRONG_MEMTYPE", NULL },
{ 0x0000000b, "VRAM_LIMIT", NULL },
{ 0x0000000f, "DMAOBJ_LIMIT", NULL },
{}
};
void
nv50_fb_vm_trap(struct drm_device *dev, int display, const char *name)
nv50_fb_vm_trap(struct drm_device *dev, int display)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
const struct nouveau_enum *en, *cl;
unsigned long flags;
u32 trap[6], idx, chinst;
u8 st0, st1, st2, st3;
int i, ch;
idx = nv_rd32(dev, 0x100c90);
@ -117,8 +233,8 @@ nv50_fb_vm_trap(struct drm_device *dev, int display, const char *name)
if (!display)
return;
/* lookup channel id */
chinst = (trap[2] << 16) | trap[1];
spin_lock_irqsave(&dev_priv->channels.lock, flags);
for (ch = 0; ch < dev_priv->engine.fifo.channels; ch++) {
struct nouveau_channel *chan = dev_priv->channels.ptr[ch];
@ -131,9 +247,48 @@ nv50_fb_vm_trap(struct drm_device *dev, int display, const char *name)
}
spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
NV_INFO(dev, "%s - VM: Trapped %s at %02x%04x%04x status %08x "
"channel %d (0x%08x)\n",
name, (trap[5] & 0x100 ? "read" : "write"),
trap[5] & 0xff, trap[4] & 0xffff, trap[3] & 0xffff,
trap[0], ch, chinst);
/* decode status bits into something more useful */
if (dev_priv->chipset < 0xa3 ||
dev_priv->chipset == 0xaa || dev_priv->chipset == 0xac) {
st0 = (trap[0] & 0x0000000f) >> 0;
st1 = (trap[0] & 0x000000f0) >> 4;
st2 = (trap[0] & 0x00000f00) >> 8;
st3 = (trap[0] & 0x0000f000) >> 12;
} else {
st0 = (trap[0] & 0x000000ff) >> 0;
st1 = (trap[0] & 0x0000ff00) >> 8;
st2 = (trap[0] & 0x00ff0000) >> 16;
st3 = (trap[0] & 0xff000000) >> 24;
}
NV_INFO(dev, "VM: trapped %s at 0x%02x%04x%04x on ch %d [0x%08x] ",
(trap[5] & 0x00000100) ? "read" : "write",
trap[5] & 0xff, trap[4] & 0xffff, trap[3] & 0xffff, ch, chinst);
en = nouveau_enum_find(vm_engine, st0);
if (en)
printk("%s/", en->name);
else
printk("%02x/", st0);
cl = nouveau_enum_find(vm_client, st2);
if (cl)
printk("%s/", cl->name);
else
printk("%02x/", st2);
if (cl && cl->data) cl = nouveau_enum_find(cl->data, st3);
else if (en && en->data) cl = nouveau_enum_find(en->data, st3);
else cl = NULL;
if (cl)
printk("%s", cl->name);
else
printk("%02x", st3);
printk(" reason: ");
en = nouveau_enum_find(vm_fault, st1);
if (en)
printk("%s\n", en->name);
else
printk("0x%08x\n", st1);
}

View File

@ -149,6 +149,7 @@ nv50_fifo_init_regs(struct drm_device *dev)
nv_wr32(dev, 0x3204, 0);
nv_wr32(dev, 0x3210, 0);
nv_wr32(dev, 0x3270, 0);
nv_wr32(dev, 0x2044, 0x01003fff);
/* Enable dummy channels setup by nv50_instmem.c */
nv50_fifo_channel_enable(dev, 0);
@ -273,7 +274,7 @@ nv50_fifo_create_context(struct nouveau_channel *chan)
nv_wo32(ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
(4 << 24) /* SEARCH_FULL */ |
(chan->ramht->gpuobj->cinst >> 4));
nv_wo32(ramfc, 0x44, 0x2101ffff);
nv_wo32(ramfc, 0x44, 0x01003fff);
nv_wo32(ramfc, 0x60, 0x7fffffff);
nv_wo32(ramfc, 0x40, 0x00000000);
nv_wo32(ramfc, 0x7c, 0x30000001);

View File

@ -137,6 +137,7 @@ nv50_gpio_irq_unregister(struct drm_device *dev, enum dcb_gpio_tag tag,
struct nv50_gpio_priv *priv = pgpio->priv;
struct nv50_gpio_handler *gpioh, *tmp;
struct dcb_gpio_entry *gpio;
LIST_HEAD(tofree);
unsigned long flags;
gpio = nouveau_bios_gpio_entry(dev, tag);
@ -149,10 +150,14 @@ nv50_gpio_irq_unregister(struct drm_device *dev, enum dcb_gpio_tag tag,
gpioh->handler != handler ||
gpioh->data != data)
continue;
list_del(&gpioh->head);
kfree(gpioh);
list_move(&gpioh->head, &tofree);
}
spin_unlock_irqrestore(&priv->lock, flags);
list_for_each_entry_safe(gpioh, tmp, &tofree, head) {
flush_work_sync(&gpioh->work);
kfree(gpioh);
}
}
bool
@ -205,7 +210,6 @@ nv50_gpio_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
struct nv50_gpio_priv *priv;
int ret;
if (!pgpio->priv) {
@ -213,7 +217,6 @@ nv50_gpio_init(struct drm_device *dev)
if (ret)
return ret;
}
priv = pgpio->priv;
/* disable, and ack any pending gpio interrupts */
nv_wr32(dev, 0xe050, 0x00000000);
@ -293,7 +296,7 @@ nv50_gpio_isr(struct drm_device *dev)
continue;
gpioh->inhibit = true;
queue_work(dev_priv->wq, &gpioh->work);
schedule_work(&gpioh->work);
}
spin_unlock(&priv->lock);
}

Some files were not shown because too many files have changed in this diff Show More