forked from Minki/linux
be53bfdb80
Pull drm main changes from Dave Airlie: "This is the main drm pull request, I'm probably going to send two more smaller ones, will explain below. This contains a patch that is also in the fbdev tree, but it should be the same patch, it added an API for hot unplugging framebuffer devices, and I need that API for a new driver. It also contains some changes to the i2c tree which Jean has acked, and one change to moorestown platform stuff in x86. Highlights: - new drivers: UDL driver for USB displaylink devices, kms only, should support correct hotplug operations. - core: i2c speedups + better hotplug support, EDID overriding via firmware interface - allows user to load a firmware for a broken monitor/kvm from userspace, it even has documentation for it. - exynos: new HDMI audio + hdmi 1.4 + virtual output driver - gma500: code cleanup - radeon: cleanups, CS optimisations, streamout support and pageflip fix - nouveau: NVD9 displayport support + more reclocking work - i915: re-enabling GMBUS, finish gpu patch (might help hibernation who knows), missed irq fixes, stencil tiling fixes, interlaced support, aliasesd PPGTT support for SNB/IVB, swizzling for SNB/IVB, semaphore fixes As well as the usual bunch of cleanups and fixes all over the place. I've got two things I'd like to merge a bit later: a) AMD support for all their new radeonhd 7000 series GPU and APUs. AMD dropped this a bit late due to insane internal review processes, (please AMD just follow Intel and let open source guys ship stuff early) however I don't want to penalise people who own this hardware (since its been on sale for 3-4 months and GPU hw doesn't exactly have a lifetime in years) and consign them to using closed drivers for longer than necessary. The changes are well contained and just plug into the driver new gpu functionality so they should be fairly regression proof. I just want to give them a bit of a run on the hw AMD kindly sent me. b) drm prime/dma-buf interface code. This is just infrastructure code to expose the dma-buf stuff to drm drivers and to userspace. I'm not planning on pushing any driver support in this cycle (except maybe exynos), but I'd like to get the infrastructure code in so for the next cycle I can start getting the driver support into the individual drivers. We have started driver support for i915, nouveau and udl along with I think exynos and omap in staging. However this code relies on the dma-buf tree being pulled into your tree first since it needs the latest interfaces from that tree. I'll push to get that tree sent asap. (oh and any warnings you see in i915 are gcc's fault from what anyone can see)." Fix up trivial conflicts in arch/x86/platform/mrst/mrst.c due to the new msic_thermal_platform_data() thermal function being added next to the tc35876x_platform_data() i2c device function.. * 'drm-next' of git://people.freedesktop.org/~airlied/linux: (326 commits) drm/i915: use DDC_ADDR instead of hard-coding it drm/radeon: use DDC_ADDR instead of hard-coding it drm: remove unneeded redefinition of DDC_ADDR drm/exynos: added virtual display driver. drm: allow loading an EDID as firmware to override broken monitor drm/exynos: enable hdmi audio feature drm/exynos: add default pixel format for plane drm/exynos: cleanup exynos_hdmi.h drm/exynos: add is_local member in exynos_drm_subdrv struct drm/exynos: add subdrv open/close functions drm/exynos: remove module of exynos drm subdrv drm/exynos: release pending pageflip events when closed drm/exynos: added new funtion to get/put dma address. drm/exynos: update gem and buffer framework. drm/exynos: added mode_fixup feature and code clean. drm/exynos: add HDMI version 1.4 support drm/exynos: remove exynos_mixer.h gma500: Fix mmap frambuffer drm/radeon: Drop radeon_gem_object_(un)pin. drm/radeon: Restrict offset for legacy display engine. ...
394 lines
9.4 KiB
C
394 lines
9.4 KiB
C
/**************************************************************************
|
|
*
|
|
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
|
|
* All Rights Reserved.
|
|
*
|
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
* copy of this software and associated documentation files (the
|
|
* "Software"), to deal in the Software without restriction, including
|
|
* without limitation the rights to use, copy, modify, merge, publish,
|
|
* distribute, sub license, and/or sell copies of the Software, and to
|
|
* permit persons to whom the Software is furnished to do so, subject to
|
|
* the following conditions:
|
|
*
|
|
* The above copyright notice and this permission notice (including the
|
|
* next paragraph) shall be included in all copies or substantial portions
|
|
* of the Software.
|
|
*
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
|
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
|
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
|
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
|
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
*
|
|
**************************************************************************/
|
|
/*
|
|
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
|
|
*/
|
|
|
|
#define pr_fmt(fmt) "[TTM] " fmt
|
|
|
|
#include <linux/sched.h>
|
|
#include <linux/highmem.h>
|
|
#include <linux/pagemap.h>
|
|
#include <linux/shmem_fs.h>
|
|
#include <linux/file.h>
|
|
#include <linux/swap.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/export.h>
|
|
#include "drm_cache.h"
|
|
#include "drm_mem_util.h"
|
|
#include "ttm/ttm_module.h"
|
|
#include "ttm/ttm_bo_driver.h"
|
|
#include "ttm/ttm_placement.h"
|
|
#include "ttm/ttm_page_alloc.h"
|
|
|
|
/**
|
|
* Allocates storage for pointers to the pages that back the ttm.
|
|
*/
|
|
static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
|
|
{
|
|
ttm->pages = drm_calloc_large(ttm->num_pages, sizeof(void*));
|
|
}
|
|
|
|
static void ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
|
|
{
|
|
ttm->ttm.pages = drm_calloc_large(ttm->ttm.num_pages, sizeof(void*));
|
|
ttm->dma_address = drm_calloc_large(ttm->ttm.num_pages,
|
|
sizeof(*ttm->dma_address));
|
|
}
|
|
|
|
#ifdef CONFIG_X86
|
|
static inline int ttm_tt_set_page_caching(struct page *p,
|
|
enum ttm_caching_state c_old,
|
|
enum ttm_caching_state c_new)
|
|
{
|
|
int ret = 0;
|
|
|
|
if (PageHighMem(p))
|
|
return 0;
|
|
|
|
if (c_old != tt_cached) {
|
|
/* p isn't in the default caching state, set it to
|
|
* writeback first to free its current memtype. */
|
|
|
|
ret = set_pages_wb(p, 1);
|
|
if (ret)
|
|
return ret;
|
|
}
|
|
|
|
if (c_new == tt_wc)
|
|
ret = set_memory_wc((unsigned long) page_address(p), 1);
|
|
else if (c_new == tt_uncached)
|
|
ret = set_pages_uc(p, 1);
|
|
|
|
return ret;
|
|
}
|
|
#else /* CONFIG_X86 */
|
|
static inline int ttm_tt_set_page_caching(struct page *p,
|
|
enum ttm_caching_state c_old,
|
|
enum ttm_caching_state c_new)
|
|
{
|
|
return 0;
|
|
}
|
|
#endif /* CONFIG_X86 */
|
|
|
|
/*
|
|
* Change caching policy for the linear kernel map
|
|
* for range of pages in a ttm.
|
|
*/
|
|
|
|
static int ttm_tt_set_caching(struct ttm_tt *ttm,
|
|
enum ttm_caching_state c_state)
|
|
{
|
|
int i, j;
|
|
struct page *cur_page;
|
|
int ret;
|
|
|
|
if (ttm->caching_state == c_state)
|
|
return 0;
|
|
|
|
if (ttm->state == tt_unpopulated) {
|
|
/* Change caching but don't populate */
|
|
ttm->caching_state = c_state;
|
|
return 0;
|
|
}
|
|
|
|
if (ttm->caching_state == tt_cached)
|
|
drm_clflush_pages(ttm->pages, ttm->num_pages);
|
|
|
|
for (i = 0; i < ttm->num_pages; ++i) {
|
|
cur_page = ttm->pages[i];
|
|
if (likely(cur_page != NULL)) {
|
|
ret = ttm_tt_set_page_caching(cur_page,
|
|
ttm->caching_state,
|
|
c_state);
|
|
if (unlikely(ret != 0))
|
|
goto out_err;
|
|
}
|
|
}
|
|
|
|
ttm->caching_state = c_state;
|
|
|
|
return 0;
|
|
|
|
out_err:
|
|
for (j = 0; j < i; ++j) {
|
|
cur_page = ttm->pages[j];
|
|
if (likely(cur_page != NULL)) {
|
|
(void)ttm_tt_set_page_caching(cur_page, c_state,
|
|
ttm->caching_state);
|
|
}
|
|
}
|
|
|
|
return ret;
|
|
}
|
|
|
|
int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement)
|
|
{
|
|
enum ttm_caching_state state;
|
|
|
|
if (placement & TTM_PL_FLAG_WC)
|
|
state = tt_wc;
|
|
else if (placement & TTM_PL_FLAG_UNCACHED)
|
|
state = tt_uncached;
|
|
else
|
|
state = tt_cached;
|
|
|
|
return ttm_tt_set_caching(ttm, state);
|
|
}
|
|
EXPORT_SYMBOL(ttm_tt_set_placement_caching);
|
|
|
|
void ttm_tt_destroy(struct ttm_tt *ttm)
|
|
{
|
|
if (unlikely(ttm == NULL))
|
|
return;
|
|
|
|
if (ttm->state == tt_bound) {
|
|
ttm_tt_unbind(ttm);
|
|
}
|
|
|
|
if (likely(ttm->pages != NULL)) {
|
|
ttm->bdev->driver->ttm_tt_unpopulate(ttm);
|
|
}
|
|
|
|
if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP) &&
|
|
ttm->swap_storage)
|
|
fput(ttm->swap_storage);
|
|
|
|
ttm->swap_storage = NULL;
|
|
ttm->func->destroy(ttm);
|
|
}
|
|
|
|
int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev,
|
|
unsigned long size, uint32_t page_flags,
|
|
struct page *dummy_read_page)
|
|
{
|
|
ttm->bdev = bdev;
|
|
ttm->glob = bdev->glob;
|
|
ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
|
ttm->caching_state = tt_cached;
|
|
ttm->page_flags = page_flags;
|
|
ttm->dummy_read_page = dummy_read_page;
|
|
ttm->state = tt_unpopulated;
|
|
ttm->swap_storage = NULL;
|
|
|
|
ttm_tt_alloc_page_directory(ttm);
|
|
if (!ttm->pages) {
|
|
ttm_tt_destroy(ttm);
|
|
pr_err("Failed allocating page table\n");
|
|
return -ENOMEM;
|
|
}
|
|
return 0;
|
|
}
|
|
EXPORT_SYMBOL(ttm_tt_init);
|
|
|
|
void ttm_tt_fini(struct ttm_tt *ttm)
|
|
{
|
|
drm_free_large(ttm->pages);
|
|
ttm->pages = NULL;
|
|
}
|
|
EXPORT_SYMBOL(ttm_tt_fini);
|
|
|
|
int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev,
|
|
unsigned long size, uint32_t page_flags,
|
|
struct page *dummy_read_page)
|
|
{
|
|
struct ttm_tt *ttm = &ttm_dma->ttm;
|
|
|
|
ttm->bdev = bdev;
|
|
ttm->glob = bdev->glob;
|
|
ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
|
ttm->caching_state = tt_cached;
|
|
ttm->page_flags = page_flags;
|
|
ttm->dummy_read_page = dummy_read_page;
|
|
ttm->state = tt_unpopulated;
|
|
ttm->swap_storage = NULL;
|
|
|
|
INIT_LIST_HEAD(&ttm_dma->pages_list);
|
|
ttm_dma_tt_alloc_page_directory(ttm_dma);
|
|
if (!ttm->pages || !ttm_dma->dma_address) {
|
|
ttm_tt_destroy(ttm);
|
|
pr_err("Failed allocating page table\n");
|
|
return -ENOMEM;
|
|
}
|
|
return 0;
|
|
}
|
|
EXPORT_SYMBOL(ttm_dma_tt_init);
|
|
|
|
void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma)
|
|
{
|
|
struct ttm_tt *ttm = &ttm_dma->ttm;
|
|
|
|
drm_free_large(ttm->pages);
|
|
ttm->pages = NULL;
|
|
drm_free_large(ttm_dma->dma_address);
|
|
ttm_dma->dma_address = NULL;
|
|
}
|
|
EXPORT_SYMBOL(ttm_dma_tt_fini);
|
|
|
|
void ttm_tt_unbind(struct ttm_tt *ttm)
|
|
{
|
|
int ret;
|
|
|
|
if (ttm->state == tt_bound) {
|
|
ret = ttm->func->unbind(ttm);
|
|
BUG_ON(ret);
|
|
ttm->state = tt_unbound;
|
|
}
|
|
}
|
|
|
|
int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
|
|
{
|
|
int ret = 0;
|
|
|
|
if (!ttm)
|
|
return -EINVAL;
|
|
|
|
if (ttm->state == tt_bound)
|
|
return 0;
|
|
|
|
ret = ttm->bdev->driver->ttm_tt_populate(ttm);
|
|
if (ret)
|
|
return ret;
|
|
|
|
ret = ttm->func->bind(ttm, bo_mem);
|
|
if (unlikely(ret != 0))
|
|
return ret;
|
|
|
|
ttm->state = tt_bound;
|
|
|
|
return 0;
|
|
}
|
|
EXPORT_SYMBOL(ttm_tt_bind);
|
|
|
|
int ttm_tt_swapin(struct ttm_tt *ttm)
|
|
{
|
|
struct address_space *swap_space;
|
|
struct file *swap_storage;
|
|
struct page *from_page;
|
|
struct page *to_page;
|
|
void *from_virtual;
|
|
void *to_virtual;
|
|
int i;
|
|
int ret = -ENOMEM;
|
|
|
|
swap_storage = ttm->swap_storage;
|
|
BUG_ON(swap_storage == NULL);
|
|
|
|
swap_space = swap_storage->f_path.dentry->d_inode->i_mapping;
|
|
|
|
for (i = 0; i < ttm->num_pages; ++i) {
|
|
from_page = shmem_read_mapping_page(swap_space, i);
|
|
if (IS_ERR(from_page)) {
|
|
ret = PTR_ERR(from_page);
|
|
goto out_err;
|
|
}
|
|
to_page = ttm->pages[i];
|
|
if (unlikely(to_page == NULL))
|
|
goto out_err;
|
|
|
|
preempt_disable();
|
|
from_virtual = kmap_atomic(from_page);
|
|
to_virtual = kmap_atomic(to_page);
|
|
memcpy(to_virtual, from_virtual, PAGE_SIZE);
|
|
kunmap_atomic(to_virtual);
|
|
kunmap_atomic(from_virtual);
|
|
preempt_enable();
|
|
page_cache_release(from_page);
|
|
}
|
|
|
|
if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP))
|
|
fput(swap_storage);
|
|
ttm->swap_storage = NULL;
|
|
ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
|
|
|
|
return 0;
|
|
out_err:
|
|
return ret;
|
|
}
|
|
|
|
int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
|
|
{
|
|
struct address_space *swap_space;
|
|
struct file *swap_storage;
|
|
struct page *from_page;
|
|
struct page *to_page;
|
|
void *from_virtual;
|
|
void *to_virtual;
|
|
int i;
|
|
int ret = -ENOMEM;
|
|
|
|
BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated);
|
|
BUG_ON(ttm->caching_state != tt_cached);
|
|
|
|
if (!persistent_swap_storage) {
|
|
swap_storage = shmem_file_setup("ttm swap",
|
|
ttm->num_pages << PAGE_SHIFT,
|
|
0);
|
|
if (unlikely(IS_ERR(swap_storage))) {
|
|
pr_err("Failed allocating swap storage\n");
|
|
return PTR_ERR(swap_storage);
|
|
}
|
|
} else
|
|
swap_storage = persistent_swap_storage;
|
|
|
|
swap_space = swap_storage->f_path.dentry->d_inode->i_mapping;
|
|
|
|
for (i = 0; i < ttm->num_pages; ++i) {
|
|
from_page = ttm->pages[i];
|
|
if (unlikely(from_page == NULL))
|
|
continue;
|
|
to_page = shmem_read_mapping_page(swap_space, i);
|
|
if (unlikely(IS_ERR(to_page))) {
|
|
ret = PTR_ERR(to_page);
|
|
goto out_err;
|
|
}
|
|
preempt_disable();
|
|
from_virtual = kmap_atomic(from_page);
|
|
to_virtual = kmap_atomic(to_page);
|
|
memcpy(to_virtual, from_virtual, PAGE_SIZE);
|
|
kunmap_atomic(to_virtual);
|
|
kunmap_atomic(from_virtual);
|
|
preempt_enable();
|
|
set_page_dirty(to_page);
|
|
mark_page_accessed(to_page);
|
|
page_cache_release(to_page);
|
|
}
|
|
|
|
ttm->bdev->driver->ttm_tt_unpopulate(ttm);
|
|
ttm->swap_storage = swap_storage;
|
|
ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
|
|
if (persistent_swap_storage)
|
|
ttm->page_flags |= TTM_PAGE_FLAG_PERSISTENT_SWAP;
|
|
|
|
return 0;
|
|
out_err:
|
|
if (!persistent_swap_storage)
|
|
fput(swap_storage);
|
|
|
|
return ret;
|
|
}
|