forked from Minki/linux
Merge branch 'drm-anon' into drm-next
This commit is contained in:
commit
afab4463ac
@ -259,7 +259,9 @@ int ast_mm_init(struct ast_private *ast)
|
||||
|
||||
ret = ttm_bo_device_init(&ast->ttm.bdev,
|
||||
ast->ttm.bo_global_ref.ref.object,
|
||||
&ast_bo_driver, DRM_FILE_PAGE_OFFSET,
|
||||
&ast_bo_driver,
|
||||
dev->anon_inode->i_mapping,
|
||||
DRM_FILE_PAGE_OFFSET,
|
||||
true);
|
||||
if (ret) {
|
||||
DRM_ERROR("Error initialising bo driver; %d\n", ret);
|
||||
@ -324,7 +326,6 @@ int ast_bo_create(struct drm_device *dev, int size, int align,
|
||||
}
|
||||
|
||||
astbo->bo.bdev = &ast->ttm.bdev;
|
||||
astbo->bo.bdev->dev_mapping = dev->dev_mapping;
|
||||
|
||||
ast_ttm_placement(astbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
|
||||
|
||||
|
@ -225,7 +225,9 @@ int bochs_mm_init(struct bochs_device *bochs)
|
||||
|
||||
ret = ttm_bo_device_init(&bochs->ttm.bdev,
|
||||
bochs->ttm.bo_global_ref.ref.object,
|
||||
&bochs_bo_driver, DRM_FILE_PAGE_OFFSET,
|
||||
&bochs_bo_driver,
|
||||
bochs->dev->anon_inode->i_mapping,
|
||||
DRM_FILE_PAGE_OFFSET,
|
||||
true);
|
||||
if (ret) {
|
||||
DRM_ERROR("Error initialising bo driver; %d\n", ret);
|
||||
@ -359,7 +361,7 @@ static int bochs_bo_create(struct drm_device *dev, int size, int align,
|
||||
}
|
||||
|
||||
bochsbo->bo.bdev = &bochs->ttm.bdev;
|
||||
bochsbo->bo.bdev->dev_mapping = dev->dev_mapping;
|
||||
bochsbo->bo.bdev->dev_mapping = dev->anon_inode->i_mapping;
|
||||
|
||||
bochs_ttm_placement(bochsbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
|
||||
|
||||
|
@ -259,7 +259,9 @@ int cirrus_mm_init(struct cirrus_device *cirrus)
|
||||
|
||||
ret = ttm_bo_device_init(&cirrus->ttm.bdev,
|
||||
cirrus->ttm.bo_global_ref.ref.object,
|
||||
&cirrus_bo_driver, DRM_FILE_PAGE_OFFSET,
|
||||
&cirrus_bo_driver,
|
||||
dev->anon_inode->i_mapping,
|
||||
DRM_FILE_PAGE_OFFSET,
|
||||
true);
|
||||
if (ret) {
|
||||
DRM_ERROR("Error initialising bo driver; %d\n", ret);
|
||||
@ -329,7 +331,6 @@ int cirrus_bo_create(struct drm_device *dev, int size, int align,
|
||||
}
|
||||
|
||||
cirrusbo->bo.bdev = &cirrus->ttm.bdev;
|
||||
cirrusbo->bo.bdev->dev_mapping = dev->dev_mapping;
|
||||
|
||||
cirrus_ttm_placement(cirrusbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
|
||||
|
||||
|
@ -84,8 +84,6 @@ int drm_open(struct inode *inode, struct file *filp)
|
||||
struct drm_minor *minor;
|
||||
int retcode = 0;
|
||||
int need_setup = 0;
|
||||
struct address_space *old_mapping;
|
||||
struct address_space *old_imapping;
|
||||
|
||||
minor = idr_find(&drm_minors_idr, minor_id);
|
||||
if (!minor)
|
||||
@ -99,16 +97,9 @@ int drm_open(struct inode *inode, struct file *filp)
|
||||
|
||||
if (!dev->open_count++)
|
||||
need_setup = 1;
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
old_imapping = inode->i_mapping;
|
||||
old_mapping = dev->dev_mapping;
|
||||
if (old_mapping == NULL)
|
||||
dev->dev_mapping = &inode->i_data;
|
||||
/* ihold ensures nobody can remove inode with our i_data */
|
||||
ihold(container_of(dev->dev_mapping, struct inode, i_data));
|
||||
inode->i_mapping = dev->dev_mapping;
|
||||
filp->f_mapping = dev->dev_mapping;
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
/* share address_space across all char-devs of a single device */
|
||||
filp->f_mapping = dev->anon_inode->i_mapping;
|
||||
|
||||
retcode = drm_open_helper(inode, filp, dev);
|
||||
if (retcode)
|
||||
@ -121,12 +112,6 @@ int drm_open(struct inode *inode, struct file *filp)
|
||||
return 0;
|
||||
|
||||
err_undo:
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
filp->f_mapping = old_imapping;
|
||||
inode->i_mapping = old_imapping;
|
||||
iput(container_of(dev->dev_mapping, struct inode, i_data));
|
||||
dev->dev_mapping = old_mapping;
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
dev->open_count--;
|
||||
return retcode;
|
||||
}
|
||||
@ -434,7 +419,6 @@ int drm_lastclose(struct drm_device * dev)
|
||||
|
||||
drm_legacy_dma_takedown(dev);
|
||||
|
||||
dev->dev_mapping = NULL;
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
drm_legacy_dev_reinit(dev);
|
||||
@ -549,9 +533,6 @@ int drm_release(struct inode *inode, struct file *filp)
|
||||
}
|
||||
}
|
||||
|
||||
BUG_ON(dev->dev_mapping == NULL);
|
||||
iput(container_of(dev->dev_mapping, struct inode, i_data));
|
||||
|
||||
/* drop the reference held my the file priv */
|
||||
if (file_priv->master)
|
||||
drm_master_put(&file_priv->master);
|
||||
|
@ -31,8 +31,10 @@
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <linux/fs.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/moduleparam.h>
|
||||
#include <linux/mount.h>
|
||||
#include <linux/slab.h>
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/drm_core.h>
|
||||
@ -416,6 +418,78 @@ void drm_unplug_dev(struct drm_device *dev)
|
||||
}
|
||||
EXPORT_SYMBOL(drm_unplug_dev);
|
||||
|
||||
/*
|
||||
* DRM internal mount
|
||||
* We want to be able to allocate our own "struct address_space" to control
|
||||
* memory-mappings in VRAM (or stolen RAM, ...). However, core MM does not allow
|
||||
* stand-alone address_space objects, so we need an underlying inode. As there
|
||||
* is no way to allocate an independent inode easily, we need a fake internal
|
||||
* VFS mount-point.
|
||||
*
|
||||
* The drm_fs_inode_new() function allocates a new inode, drm_fs_inode_free()
|
||||
* frees it again. You are allowed to use iget() and iput() to get references to
|
||||
* the inode. But each drm_fs_inode_new() call must be paired with exactly one
|
||||
* drm_fs_inode_free() call (which does not have to be the last iput()).
|
||||
* We use drm_fs_inode_*() to manage our internal VFS mount-point and share it
|
||||
* between multiple inode-users. You could, technically, call
|
||||
* iget() + drm_fs_inode_free() directly after alloc and sometime later do an
|
||||
* iput(), but this way you'd end up with a new vfsmount for each inode.
|
||||
*/
|
||||
|
||||
static int drm_fs_cnt;
|
||||
static struct vfsmount *drm_fs_mnt;
|
||||
|
||||
static const struct dentry_operations drm_fs_dops = {
|
||||
.d_dname = simple_dname,
|
||||
};
|
||||
|
||||
static const struct super_operations drm_fs_sops = {
|
||||
.statfs = simple_statfs,
|
||||
};
|
||||
|
||||
static struct dentry *drm_fs_mount(struct file_system_type *fs_type, int flags,
|
||||
const char *dev_name, void *data)
|
||||
{
|
||||
return mount_pseudo(fs_type,
|
||||
"drm:",
|
||||
&drm_fs_sops,
|
||||
&drm_fs_dops,
|
||||
0x010203ff);
|
||||
}
|
||||
|
||||
static struct file_system_type drm_fs_type = {
|
||||
.name = "drm",
|
||||
.owner = THIS_MODULE,
|
||||
.mount = drm_fs_mount,
|
||||
.kill_sb = kill_anon_super,
|
||||
};
|
||||
|
||||
static struct inode *drm_fs_inode_new(void)
|
||||
{
|
||||
struct inode *inode;
|
||||
int r;
|
||||
|
||||
r = simple_pin_fs(&drm_fs_type, &drm_fs_mnt, &drm_fs_cnt);
|
||||
if (r < 0) {
|
||||
DRM_ERROR("Cannot mount pseudo fs: %d\n", r);
|
||||
return ERR_PTR(r);
|
||||
}
|
||||
|
||||
inode = alloc_anon_inode(drm_fs_mnt->mnt_sb);
|
||||
if (IS_ERR(inode))
|
||||
simple_release_fs(&drm_fs_mnt, &drm_fs_cnt);
|
||||
|
||||
return inode;
|
||||
}
|
||||
|
||||
static void drm_fs_inode_free(struct inode *inode)
|
||||
{
|
||||
if (inode) {
|
||||
iput(inode);
|
||||
simple_release_fs(&drm_fs_mnt, &drm_fs_cnt);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_dev_alloc - Allocate new drm device
|
||||
* @driver: DRM driver to allocate device for
|
||||
@ -452,8 +526,15 @@ struct drm_device *drm_dev_alloc(struct drm_driver *driver,
|
||||
mutex_init(&dev->struct_mutex);
|
||||
mutex_init(&dev->ctxlist_mutex);
|
||||
|
||||
if (drm_ht_create(&dev->map_hash, 12))
|
||||
dev->anon_inode = drm_fs_inode_new();
|
||||
if (IS_ERR(dev->anon_inode)) {
|
||||
ret = PTR_ERR(dev->anon_inode);
|
||||
DRM_ERROR("Cannot allocate anonymous inode: %d\n", ret);
|
||||
goto err_free;
|
||||
}
|
||||
|
||||
if (drm_ht_create(&dev->map_hash, 12))
|
||||
goto err_inode;
|
||||
|
||||
ret = drm_ctxbitmap_init(dev);
|
||||
if (ret) {
|
||||
@ -475,6 +556,8 @@ err_ctxbitmap:
|
||||
drm_ctxbitmap_cleanup(dev);
|
||||
err_ht:
|
||||
drm_ht_remove(&dev->map_hash);
|
||||
err_inode:
|
||||
drm_fs_inode_free(dev->anon_inode);
|
||||
err_free:
|
||||
kfree(dev);
|
||||
return NULL;
|
||||
@ -502,6 +585,7 @@ void drm_dev_free(struct drm_device *dev)
|
||||
|
||||
drm_ctxbitmap_cleanup(dev);
|
||||
drm_ht_remove(&dev->map_hash);
|
||||
drm_fs_inode_free(dev->anon_inode);
|
||||
|
||||
kfree(dev->devname);
|
||||
kfree(dev);
|
||||
|
@ -1508,7 +1508,8 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj)
|
||||
if (!obj->fault_mappable)
|
||||
return;
|
||||
|
||||
drm_vma_node_unmap(&obj->base.vma_node, obj->base.dev->dev_mapping);
|
||||
drm_vma_node_unmap(&obj->base.vma_node,
|
||||
obj->base.dev->anon_inode->i_mapping);
|
||||
obj->fault_mappable = false;
|
||||
}
|
||||
|
||||
|
@ -259,7 +259,9 @@ int mgag200_mm_init(struct mga_device *mdev)
|
||||
|
||||
ret = ttm_bo_device_init(&mdev->ttm.bdev,
|
||||
mdev->ttm.bo_global_ref.ref.object,
|
||||
&mgag200_bo_driver, DRM_FILE_PAGE_OFFSET,
|
||||
&mgag200_bo_driver,
|
||||
dev->anon_inode->i_mapping,
|
||||
DRM_FILE_PAGE_OFFSET,
|
||||
true);
|
||||
if (ret) {
|
||||
DRM_ERROR("Error initialising bo driver; %d\n", ret);
|
||||
@ -324,7 +326,6 @@ int mgag200_bo_create(struct drm_device *dev, int size, int align,
|
||||
}
|
||||
|
||||
mgabo->bo.bdev = &mdev->ttm.bdev;
|
||||
mgabo->bo.bdev->dev_mapping = dev->dev_mapping;
|
||||
|
||||
mgag200_ttm_placement(mgabo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
|
||||
|
||||
|
@ -228,8 +228,6 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
|
||||
struct nouveau_bo *nvbo = NULL;
|
||||
int ret = 0;
|
||||
|
||||
drm->ttm.bdev.dev_mapping = drm->dev->dev_mapping;
|
||||
|
||||
if (!pfb->memtype_valid(pfb, req->info.tile_flags)) {
|
||||
NV_ERROR(cli, "bad page flags: 0x%08x\n", req->info.tile_flags);
|
||||
return -EINVAL;
|
||||
|
@ -376,7 +376,9 @@ nouveau_ttm_init(struct nouveau_drm *drm)
|
||||
|
||||
ret = ttm_bo_device_init(&drm->ttm.bdev,
|
||||
drm->ttm.bo_global_ref.ref.object,
|
||||
&nouveau_bo_driver, DRM_FILE_PAGE_OFFSET,
|
||||
&nouveau_bo_driver,
|
||||
dev->anon_inode->i_mapping,
|
||||
DRM_FILE_PAGE_OFFSET,
|
||||
bits <= 32 ? true : false);
|
||||
if (ret) {
|
||||
NV_ERROR(drm, "error initialising bo driver, %d\n", ret);
|
||||
|
@ -153,24 +153,24 @@ static struct {
|
||||
static void evict_entry(struct drm_gem_object *obj,
|
||||
enum tiler_fmt fmt, struct usergart_entry *entry)
|
||||
{
|
||||
if (obj->dev->dev_mapping) {
|
||||
struct omap_gem_object *omap_obj = to_omap_bo(obj);
|
||||
int n = usergart[fmt].height;
|
||||
size_t size = PAGE_SIZE * n;
|
||||
loff_t off = mmap_offset(obj) +
|
||||
(entry->obj_pgoff << PAGE_SHIFT);
|
||||
const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE);
|
||||
if (m > 1) {
|
||||
int i;
|
||||
/* if stride > than PAGE_SIZE then sparse mapping: */
|
||||
for (i = n; i > 0; i--) {
|
||||
unmap_mapping_range(obj->dev->dev_mapping,
|
||||
off, PAGE_SIZE, 1);
|
||||
off += PAGE_SIZE * m;
|
||||
}
|
||||
} else {
|
||||
unmap_mapping_range(obj->dev->dev_mapping, off, size, 1);
|
||||
struct omap_gem_object *omap_obj = to_omap_bo(obj);
|
||||
int n = usergart[fmt].height;
|
||||
size_t size = PAGE_SIZE * n;
|
||||
loff_t off = mmap_offset(obj) +
|
||||
(entry->obj_pgoff << PAGE_SHIFT);
|
||||
const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE);
|
||||
|
||||
if (m > 1) {
|
||||
int i;
|
||||
/* if stride > than PAGE_SIZE then sparse mapping: */
|
||||
for (i = n; i > 0; i--) {
|
||||
unmap_mapping_range(obj->dev->anon_inode->i_mapping,
|
||||
off, PAGE_SIZE, 1);
|
||||
off += PAGE_SIZE * m;
|
||||
}
|
||||
} else {
|
||||
unmap_mapping_range(obj->dev->anon_inode->i_mapping,
|
||||
off, size, 1);
|
||||
}
|
||||
|
||||
entry->obj = NULL;
|
||||
|
@ -82,8 +82,6 @@ int qxl_bo_create(struct qxl_device *qdev,
|
||||
enum ttm_bo_type type;
|
||||
int r;
|
||||
|
||||
if (unlikely(qdev->mman.bdev.dev_mapping == NULL))
|
||||
qdev->mman.bdev.dev_mapping = qdev->ddev->dev_mapping;
|
||||
if (kernel)
|
||||
type = ttm_bo_type_kernel;
|
||||
else
|
||||
|
@ -493,7 +493,9 @@ int qxl_ttm_init(struct qxl_device *qdev)
|
||||
/* No others user of address space so set it to 0 */
|
||||
r = ttm_bo_device_init(&qdev->mman.bdev,
|
||||
qdev->mman.bo_global_ref.ref.object,
|
||||
&qxl_bo_driver, DRM_FILE_PAGE_OFFSET, 0);
|
||||
&qxl_bo_driver,
|
||||
qdev->ddev->anon_inode->i_mapping,
|
||||
DRM_FILE_PAGE_OFFSET, 0);
|
||||
if (r) {
|
||||
DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
|
||||
return r;
|
||||
@ -518,8 +520,6 @@ int qxl_ttm_init(struct qxl_device *qdev)
|
||||
((unsigned)num_io_pages * PAGE_SIZE) / (1024 * 1024));
|
||||
DRM_INFO("qxl: %uM of Surface memory size\n",
|
||||
(unsigned)qdev->surfaceram_size / (1024 * 1024));
|
||||
if (unlikely(qdev->mman.bdev.dev_mapping == NULL))
|
||||
qdev->mman.bdev.dev_mapping = qdev->ddev->dev_mapping;
|
||||
r = qxl_ttm_debugfs_init(qdev);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to init debugfs\n");
|
||||
|
@ -145,7 +145,6 @@ int radeon_bo_create(struct radeon_device *rdev,
|
||||
|
||||
size = ALIGN(size, PAGE_SIZE);
|
||||
|
||||
rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping;
|
||||
if (kernel) {
|
||||
type = ttm_bo_type_kernel;
|
||||
} else if (sg) {
|
||||
|
@ -707,7 +707,9 @@ int radeon_ttm_init(struct radeon_device *rdev)
|
||||
/* No others user of address space so set it to 0 */
|
||||
r = ttm_bo_device_init(&rdev->mman.bdev,
|
||||
rdev->mman.bo_global_ref.ref.object,
|
||||
&radeon_bo_driver, DRM_FILE_PAGE_OFFSET,
|
||||
&radeon_bo_driver,
|
||||
rdev->ddev->anon_inode->i_mapping,
|
||||
DRM_FILE_PAGE_OFFSET,
|
||||
rdev->need_dma32);
|
||||
if (r) {
|
||||
DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
|
||||
@ -745,7 +747,6 @@ int radeon_ttm_init(struct radeon_device *rdev)
|
||||
}
|
||||
DRM_INFO("radeon: %uM of GTT memory ready.\n",
|
||||
(unsigned)(rdev->mc.gtt_size / (1024 * 1024)));
|
||||
rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping;
|
||||
|
||||
r = radeon_ttm_debugfs_init(rdev);
|
||||
if (r) {
|
||||
|
@ -1449,6 +1449,7 @@ EXPORT_SYMBOL(ttm_bo_device_release);
|
||||
int ttm_bo_device_init(struct ttm_bo_device *bdev,
|
||||
struct ttm_bo_global *glob,
|
||||
struct ttm_bo_driver *driver,
|
||||
struct address_space *mapping,
|
||||
uint64_t file_page_offset,
|
||||
bool need_dma32)
|
||||
{
|
||||
@ -1470,7 +1471,7 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
|
||||
0x10000000);
|
||||
INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
|
||||
INIT_LIST_HEAD(&bdev->ddestroy);
|
||||
bdev->dev_mapping = NULL;
|
||||
bdev->dev_mapping = mapping;
|
||||
bdev->glob = glob;
|
||||
bdev->need_dma32 = need_dma32;
|
||||
bdev->val_seq = 0;
|
||||
|
@ -722,7 +722,9 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
|
||||
|
||||
ret = ttm_bo_device_init(&dev_priv->bdev,
|
||||
dev_priv->bo_global_ref.ref.object,
|
||||
&vmw_bo_driver, VMWGFX_FILE_PAGE_OFFSET,
|
||||
&vmw_bo_driver,
|
||||
dev->anon_inode->i_mapping,
|
||||
VMWGFX_FILE_PAGE_OFFSET,
|
||||
false);
|
||||
if (unlikely(ret != 0)) {
|
||||
DRM_ERROR("Failed initializing TTM buffer object driver.\n");
|
||||
@ -969,7 +971,6 @@ static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
|
||||
goto out_no_shman;
|
||||
|
||||
file_priv->driver_priv = vmw_fp;
|
||||
dev_priv->bdev.dev_mapping = dev->dev_mapping;
|
||||
|
||||
return 0;
|
||||
|
||||
|
@ -3112,6 +3112,7 @@ char *simple_dname(struct dentry *dentry, char *buffer, int buflen)
|
||||
end = ERR_PTR(-ENAMETOOLONG);
|
||||
return end;
|
||||
}
|
||||
EXPORT_SYMBOL(simple_dname);
|
||||
|
||||
/*
|
||||
* Write full pathname from the root of the filesystem into the buffer.
|
||||
|
@ -1183,7 +1183,7 @@ struct drm_device {
|
||||
struct drm_sg_mem *sg; /**< Scatter gather memory */
|
||||
unsigned int num_crtcs; /**< Number of CRTCs on this device */
|
||||
void *dev_private; /**< device private data */
|
||||
struct address_space *dev_mapping;
|
||||
struct inode *anon_inode;
|
||||
struct drm_sigdata sigdata; /**< For block_all_signals */
|
||||
sigset_t sigmask;
|
||||
|
||||
|
@ -221,8 +221,8 @@ static inline __u64 drm_vma_node_offset_addr(struct drm_vma_offset_node *node)
|
||||
* @file_mapping: Address space to unmap @node from
|
||||
*
|
||||
* Unmap all userspace mappings for a given offset node. The mappings must be
|
||||
* associated with the @file_mapping address-space. If no offset exists or
|
||||
* the address-space is invalid, nothing is done.
|
||||
* associated with the @file_mapping address-space. If no offset exists
|
||||
* nothing is done.
|
||||
*
|
||||
* This call is unlocked. The caller must guarantee that drm_vma_offset_remove()
|
||||
* is not called on this node concurrently.
|
||||
@ -230,7 +230,7 @@ static inline __u64 drm_vma_node_offset_addr(struct drm_vma_offset_node *node)
|
||||
static inline void drm_vma_node_unmap(struct drm_vma_offset_node *node,
|
||||
struct address_space *file_mapping)
|
||||
{
|
||||
if (file_mapping && drm_vma_node_has_offset(node))
|
||||
if (drm_vma_node_has_offset(node))
|
||||
unmap_mapping_range(file_mapping,
|
||||
drm_vma_node_offset_addr(node),
|
||||
drm_vma_node_size(node) << PAGE_SHIFT, 1);
|
||||
|
@ -747,6 +747,7 @@ extern int ttm_bo_device_release(struct ttm_bo_device *bdev);
|
||||
* @bdev: A pointer to a struct ttm_bo_device to initialize.
|
||||
* @glob: A pointer to an initialized struct ttm_bo_global.
|
||||
* @driver: A pointer to a struct ttm_bo_driver set up by the caller.
|
||||
* @mapping: The address space to use for this bo.
|
||||
* @file_page_offset: Offset into the device address space that is available
|
||||
* for buffer data. This ensures compatibility with other users of the
|
||||
* address space.
|
||||
@ -758,6 +759,7 @@ extern int ttm_bo_device_release(struct ttm_bo_device *bdev);
|
||||
extern int ttm_bo_device_init(struct ttm_bo_device *bdev,
|
||||
struct ttm_bo_global *glob,
|
||||
struct ttm_bo_driver *driver,
|
||||
struct address_space *mapping,
|
||||
uint64_t file_page_offset, bool need_dma32);
|
||||
|
||||
/**
|
||||
|
Loading…
Reference in New Issue
Block a user