drm/vmwgfx: Switch to a managed drm device
To cleanup some of the error handling and prepare for some other work lets switch to a managed drm device. It will let us get a better handle on some of the error paths. Signed-off-by: Zack Rusin <zackr@vmware.com> Reviewed-by: Martin Krastev <krastevm@vmware.com> Reviewed-by: Roland Scheidegger <sroland@vmware.com> Link: https://patchwork.freedesktop.org/patch/414039/?series=85516&rev=2
This commit is contained in:
parent
31856c8c1c
commit
9703bb3292
@ -1230,7 +1230,7 @@ int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man,
|
||||
|
||||
/* First, try to allocate a huge chunk of DMA memory */
|
||||
size = PAGE_ALIGN(size);
|
||||
man->map = dma_alloc_coherent(dev_priv->dev->dev, size,
|
||||
man->map = dma_alloc_coherent(dev_priv->drm.dev, size,
|
||||
&man->handle, GFP_KERNEL);
|
||||
if (man->map) {
|
||||
man->using_mob = false;
|
||||
@ -1313,7 +1313,7 @@ struct vmw_cmdbuf_man *vmw_cmdbuf_man_create(struct vmw_private *dev_priv)
|
||||
man->num_contexts = (dev_priv->capabilities & SVGA_CAP_HP_CMD_QUEUE) ?
|
||||
2 : 1;
|
||||
man->headers = dma_pool_create("vmwgfx cmdbuf",
|
||||
dev_priv->dev->dev,
|
||||
dev_priv->drm.dev,
|
||||
sizeof(SVGACBHeader),
|
||||
64, PAGE_SIZE);
|
||||
if (!man->headers) {
|
||||
@ -1322,7 +1322,7 @@ struct vmw_cmdbuf_man *vmw_cmdbuf_man_create(struct vmw_private *dev_priv)
|
||||
}
|
||||
|
||||
man->dheaders = dma_pool_create("vmwgfx inline cmdbuf",
|
||||
dev_priv->dev->dev,
|
||||
dev_priv->drm.dev,
|
||||
sizeof(struct vmw_cmdbuf_dheader),
|
||||
64, PAGE_SIZE);
|
||||
if (!man->dheaders) {
|
||||
@ -1387,7 +1387,7 @@ void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man)
|
||||
ttm_bo_put(man->cmd_space);
|
||||
man->cmd_space = NULL;
|
||||
} else {
|
||||
dma_free_coherent(man->dev_priv->dev->dev,
|
||||
dma_free_coherent(man->dev_priv->drm.dev,
|
||||
man->size, man->map, man->handle);
|
||||
}
|
||||
}
|
||||
|
@ -609,7 +609,7 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv)
|
||||
*/
|
||||
static int vmw_dma_masks(struct vmw_private *dev_priv)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
int ret = 0;
|
||||
|
||||
ret = dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64));
|
||||
@ -644,25 +644,17 @@ static void vmw_vram_manager_fini(struct vmw_private *dev_priv)
|
||||
#endif
|
||||
}
|
||||
|
||||
static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
|
||||
static int vmw_driver_load(struct vmw_private *dev_priv, unsigned long chipset)
|
||||
{
|
||||
struct vmw_private *dev_priv;
|
||||
int ret;
|
||||
uint32_t svga_id;
|
||||
enum vmw_res_type i;
|
||||
bool refuse_dma = false;
|
||||
char host_log[100] = {0};
|
||||
struct pci_dev *pdev = to_pci_dev(dev->dev);
|
||||
|
||||
dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
|
||||
if (unlikely(!dev_priv)) {
|
||||
DRM_ERROR("Failed allocating a device private struct.\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
|
||||
|
||||
pci_set_master(pdev);
|
||||
|
||||
dev_priv->dev = dev;
|
||||
dev_priv->vmw_chipset = chipset;
|
||||
dev_priv->last_read_seqno = (uint32_t) -100;
|
||||
mutex_init(&dev_priv->cmdbuf_mutex);
|
||||
@ -795,7 +787,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
|
||||
if (unlikely(ret != 0))
|
||||
goto out_err0;
|
||||
|
||||
dma_set_max_seg_size(dev->dev, U32_MAX);
|
||||
dma_set_max_seg_size(dev_priv->drm.dev, U32_MAX);
|
||||
|
||||
if (dev_priv->capabilities & SVGA_CAP_GMR2) {
|
||||
DRM_INFO("Max GMR ids is %u\n",
|
||||
@ -839,7 +831,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
|
||||
goto out_err4;
|
||||
}
|
||||
|
||||
dev->dev_private = dev_priv;
|
||||
dev_priv->drm.dev_private = dev_priv;
|
||||
|
||||
ret = pci_request_regions(pdev, "vmwgfx probe");
|
||||
if (ret) {
|
||||
@ -848,7 +840,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
|
||||
}
|
||||
|
||||
if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
|
||||
ret = vmw_irq_install(dev, pdev->irq);
|
||||
ret = vmw_irq_install(&dev_priv->drm, pdev->irq);
|
||||
if (ret != 0) {
|
||||
DRM_ERROR("Failed installing irq: %d\n", ret);
|
||||
goto out_no_irq;
|
||||
@ -865,8 +857,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
|
||||
DRM_FILE_PAGE_OFFSET_START,
|
||||
DRM_FILE_PAGE_OFFSET_SIZE);
|
||||
ret = ttm_bo_device_init(&dev_priv->bdev, &vmw_bo_driver,
|
||||
dev_priv->dev->dev,
|
||||
dev->anon_inode->i_mapping,
|
||||
dev_priv->drm.dev,
|
||||
dev_priv->drm.anon_inode->i_mapping,
|
||||
&dev_priv->vma_manager,
|
||||
dev_priv->map_mode == vmw_dma_alloc_coherent,
|
||||
false);
|
||||
@ -946,7 +938,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
|
||||
if (ret)
|
||||
goto out_no_fifo;
|
||||
|
||||
DRM_INFO("Atomic: %s\n", (dev->driver->driver_features & DRIVER_ATOMIC)
|
||||
DRM_INFO("Atomic: %s\n", (dev_priv->drm.driver->driver_features & DRIVER_ATOMIC)
|
||||
? "yes." : "no.");
|
||||
if (dev_priv->sm_type == VMW_SM_5)
|
||||
DRM_INFO("SM5 support available.\n");
|
||||
@ -991,7 +983,7 @@ out_no_bdev:
|
||||
vmw_fence_manager_takedown(dev_priv->fman);
|
||||
out_no_fman:
|
||||
if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
|
||||
vmw_irq_uninstall(dev_priv->dev);
|
||||
vmw_irq_uninstall(&dev_priv->drm);
|
||||
out_no_irq:
|
||||
pci_release_regions(pdev);
|
||||
out_no_device:
|
||||
@ -1041,7 +1033,7 @@ static void vmw_driver_unload(struct drm_device *dev)
|
||||
vmw_release_device_late(dev_priv);
|
||||
vmw_fence_manager_takedown(dev_priv->fman);
|
||||
if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
|
||||
vmw_irq_uninstall(dev_priv->dev);
|
||||
vmw_irq_uninstall(&dev_priv->drm);
|
||||
pci_release_regions(pdev);
|
||||
|
||||
ttm_object_device_release(&dev_priv->tdev);
|
||||
@ -1239,7 +1231,7 @@ void vmw_svga_disable(struct vmw_private *dev_priv)
|
||||
* to be inconsistent with the device, causing modesetting problems.
|
||||
*
|
||||
*/
|
||||
vmw_kms_lost_device(dev_priv->dev);
|
||||
vmw_kms_lost_device(&dev_priv->drm);
|
||||
ttm_write_lock(&dev_priv->reservation_sem, false);
|
||||
spin_lock(&dev_priv->svga_lock);
|
||||
if (ttm_resource_manager_used(man)) {
|
||||
@ -1261,8 +1253,6 @@ static void vmw_remove(struct pci_dev *pdev)
|
||||
|
||||
drm_dev_unregister(dev);
|
||||
vmw_driver_unload(dev);
|
||||
drm_dev_put(dev);
|
||||
pci_disable_device(pdev);
|
||||
}
|
||||
|
||||
static unsigned long
|
||||
@ -1363,7 +1353,7 @@ static int vmw_pm_freeze(struct device *kdev)
|
||||
* No user-space processes should be running now.
|
||||
*/
|
||||
ttm_suspend_unlock(&dev_priv->reservation_sem);
|
||||
ret = vmw_kms_suspend(dev_priv->dev);
|
||||
ret = vmw_kms_suspend(&dev_priv->drm);
|
||||
if (ret) {
|
||||
ttm_suspend_lock(&dev_priv->reservation_sem);
|
||||
DRM_ERROR("Failed to freeze modesetting.\n");
|
||||
@ -1424,7 +1414,7 @@ static int vmw_pm_restore(struct device *kdev)
|
||||
dev_priv->suspend_locked = false;
|
||||
ttm_suspend_unlock(&dev_priv->reservation_sem);
|
||||
if (dev_priv->suspend_state)
|
||||
vmw_kms_resume(dev_priv->dev);
|
||||
vmw_kms_resume(&dev_priv->drm);
|
||||
|
||||
if (dev_priv->enable_fb)
|
||||
vmw_fb_on(dev_priv);
|
||||
@ -1493,42 +1483,36 @@ static struct pci_driver vmw_pci_driver = {
|
||||
|
||||
static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
{
|
||||
struct drm_device *dev;
|
||||
struct vmw_private *vmw;
|
||||
int ret;
|
||||
|
||||
ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "svgadrmfb");
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = pci_enable_device(pdev);
|
||||
ret = pcim_enable_device(pdev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
dev = drm_dev_alloc(&driver, &pdev->dev);
|
||||
if (IS_ERR(dev)) {
|
||||
ret = PTR_ERR(dev);
|
||||
goto err_pci_disable_device;
|
||||
vmw = devm_drm_dev_alloc(&pdev->dev, &driver,
|
||||
struct vmw_private, drm);
|
||||
if (IS_ERR(vmw))
|
||||
return PTR_ERR(vmw);
|
||||
|
||||
vmw->drm.pdev = pdev;
|
||||
pci_set_drvdata(pdev, &vmw->drm);
|
||||
|
||||
ret = vmw_driver_load(vmw, ent->device);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = drm_dev_register(&vmw->drm, 0);
|
||||
if (ret) {
|
||||
vmw_driver_unload(&vmw->drm);
|
||||
return ret;
|
||||
}
|
||||
|
||||
pci_set_drvdata(pdev, dev);
|
||||
|
||||
ret = vmw_driver_load(dev, ent->driver_data);
|
||||
if (ret)
|
||||
goto err_drm_dev_put;
|
||||
|
||||
ret = drm_dev_register(dev, ent->driver_data);
|
||||
if (ret)
|
||||
goto err_vmw_driver_unload;
|
||||
|
||||
return 0;
|
||||
|
||||
err_vmw_driver_unload:
|
||||
vmw_driver_unload(dev);
|
||||
err_drm_dev_put:
|
||||
drm_dev_put(dev);
|
||||
err_pci_disable_device:
|
||||
pci_disable_device(pdev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int __init vmwgfx_init(void)
|
||||
|
@ -489,11 +489,11 @@ enum vmw_sm_type {
|
||||
};
|
||||
|
||||
struct vmw_private {
|
||||
struct drm_device drm;
|
||||
struct ttm_bo_device bdev;
|
||||
|
||||
struct vmw_fifo_state fifo;
|
||||
|
||||
struct drm_device *dev;
|
||||
struct drm_vma_offset_manager vma_manager;
|
||||
unsigned long vmw_chipset;
|
||||
unsigned int io_start;
|
||||
|
@ -481,7 +481,7 @@ static int vmw_fb_kms_detach(struct vmw_fb_par *par,
|
||||
DRM_ERROR("Could not unset a mode.\n");
|
||||
return ret;
|
||||
}
|
||||
drm_mode_destroy(par->vmw_priv->dev, par->set_mode);
|
||||
drm_mode_destroy(&par->vmw_priv->drm, par->set_mode);
|
||||
par->set_mode = NULL;
|
||||
}
|
||||
|
||||
@ -567,7 +567,7 @@ static int vmw_fb_set_par(struct fb_info *info)
|
||||
struct drm_display_mode *mode;
|
||||
int ret;
|
||||
|
||||
mode = drm_mode_duplicate(vmw_priv->dev, &new_mode);
|
||||
mode = drm_mode_duplicate(&vmw_priv->drm, &new_mode);
|
||||
if (!mode) {
|
||||
DRM_ERROR("Could not create new fb mode.\n");
|
||||
return -ENOMEM;
|
||||
@ -581,7 +581,7 @@ static int vmw_fb_set_par(struct fb_info *info)
|
||||
mode->hdisplay *
|
||||
DIV_ROUND_UP(var->bits_per_pixel, 8),
|
||||
mode->vdisplay)) {
|
||||
drm_mode_destroy(vmw_priv->dev, mode);
|
||||
drm_mode_destroy(&vmw_priv->drm, mode);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -615,7 +615,7 @@ static int vmw_fb_set_par(struct fb_info *info)
|
||||
|
||||
out_unlock:
|
||||
if (par->set_mode)
|
||||
drm_mode_destroy(vmw_priv->dev, par->set_mode);
|
||||
drm_mode_destroy(&vmw_priv->drm, par->set_mode);
|
||||
par->set_mode = mode;
|
||||
|
||||
mutex_unlock(&par->bo_mutex);
|
||||
@ -638,7 +638,7 @@ static const struct fb_ops vmw_fb_ops = {
|
||||
|
||||
int vmw_fb_init(struct vmw_private *vmw_priv)
|
||||
{
|
||||
struct device *device = vmw_priv->dev->dev;
|
||||
struct device *device = vmw_priv->drm.dev;
|
||||
struct vmw_fb_par *par;
|
||||
struct fb_info *info;
|
||||
unsigned fb_width, fb_height;
|
||||
|
@ -1033,7 +1033,7 @@ int vmw_event_fence_action_queue(struct drm_file *file_priv,
|
||||
eaction->action.type = VMW_ACTION_EVENT;
|
||||
|
||||
eaction->fence = vmw_fence_obj_reference(fence);
|
||||
eaction->dev = fman->dev_priv->dev;
|
||||
eaction->dev = &fman->dev_priv->drm;
|
||||
eaction->tv_sec = tv_sec;
|
||||
eaction->tv_usec = tv_usec;
|
||||
|
||||
@ -1055,7 +1055,7 @@ static int vmw_event_fence_action_create(struct drm_file *file_priv,
|
||||
{
|
||||
struct vmw_event_fence_pending *event;
|
||||
struct vmw_fence_manager *fman = fman_from_fence(fence);
|
||||
struct drm_device *dev = fman->dev_priv->dev;
|
||||
struct drm_device *dev = &fman->dev_priv->drm;
|
||||
int ret;
|
||||
|
||||
event = kzalloc(sizeof(*event), GFP_KERNEL);
|
||||
|
@ -236,7 +236,7 @@ err_unreserve:
|
||||
*/
|
||||
void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
struct vmw_display_unit *du;
|
||||
struct drm_crtc *crtc;
|
||||
|
||||
@ -252,7 +252,7 @@ void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv)
|
||||
|
||||
void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
struct vmw_display_unit *du;
|
||||
struct drm_crtc *crtc;
|
||||
|
||||
@ -891,7 +891,7 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
|
||||
bool is_bo_proxy)
|
||||
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
struct vmw_framebuffer_surface *vfbs;
|
||||
enum SVGA3dSurfaceFormat format;
|
||||
int ret;
|
||||
@ -1003,11 +1003,11 @@ static int vmw_framebuffer_bo_dirty(struct drm_framebuffer *framebuffer,
|
||||
struct drm_clip_rect norect;
|
||||
int ret, increment = 1;
|
||||
|
||||
drm_modeset_lock_all(dev_priv->dev);
|
||||
drm_modeset_lock_all(&dev_priv->drm);
|
||||
|
||||
ret = ttm_read_lock(&dev_priv->reservation_sem, true);
|
||||
if (unlikely(ret != 0)) {
|
||||
drm_modeset_unlock_all(dev_priv->dev);
|
||||
drm_modeset_unlock_all(&dev_priv->drm);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1036,7 +1036,7 @@ static int vmw_framebuffer_bo_dirty(struct drm_framebuffer *framebuffer,
|
||||
vmw_fifo_flush(dev_priv, false);
|
||||
ttm_read_unlock(&dev_priv->reservation_sem);
|
||||
|
||||
drm_modeset_unlock_all(dev_priv->dev);
|
||||
drm_modeset_unlock_all(&dev_priv->drm);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -1213,7 +1213,7 @@ static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv,
|
||||
*mode_cmd)
|
||||
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
struct vmw_framebuffer_bo *vfbd;
|
||||
unsigned int requested_size;
|
||||
struct drm_format_name_buf format_name;
|
||||
@ -1319,7 +1319,7 @@ vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
|
||||
bo && only_2d &&
|
||||
mode_cmd->width > 64 && /* Don't create a proxy for cursor */
|
||||
dev_priv->active_display_unit == vmw_du_screen_target) {
|
||||
ret = vmw_create_bo_proxy(dev_priv->dev, mode_cmd,
|
||||
ret = vmw_create_bo_proxy(&dev_priv->drm, mode_cmd,
|
||||
bo, &surface);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
@ -1780,7 +1780,7 @@ vmw_kms_create_hotplug_mode_update_property(struct vmw_private *dev_priv)
|
||||
return;
|
||||
|
||||
dev_priv->hotplug_mode_update_property =
|
||||
drm_property_create_range(dev_priv->dev,
|
||||
drm_property_create_range(&dev_priv->drm,
|
||||
DRM_MODE_PROP_IMMUTABLE,
|
||||
"hotplug_mode_update", 0, 1);
|
||||
|
||||
@ -1791,7 +1791,7 @@ vmw_kms_create_hotplug_mode_update_property(struct vmw_private *dev_priv)
|
||||
|
||||
int vmw_kms_init(struct vmw_private *dev_priv)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
int ret;
|
||||
|
||||
drm_mode_config_init(dev);
|
||||
@ -1823,7 +1823,7 @@ int vmw_kms_close(struct vmw_private *dev_priv)
|
||||
* but since it destroys encoders and our destructor calls
|
||||
* drm_encoder_cleanup which takes the lock we deadlock.
|
||||
*/
|
||||
drm_mode_config_cleanup(dev_priv->dev);
|
||||
drm_mode_config_cleanup(&dev_priv->drm);
|
||||
if (dev_priv->active_display_unit == vmw_du_legacy)
|
||||
ret = vmw_kms_ldu_close_display(dev_priv);
|
||||
|
||||
@ -1934,7 +1934,7 @@ void vmw_disable_vblank(struct drm_crtc *crtc)
|
||||
static int vmw_du_update_layout(struct vmw_private *dev_priv,
|
||||
unsigned int num_rects, struct drm_rect *rects)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
struct vmw_display_unit *du;
|
||||
struct drm_connector *con;
|
||||
struct drm_connector_list_iter conn_iter;
|
||||
@ -2366,7 +2366,7 @@ int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
|
||||
if (dirty->crtc) {
|
||||
units[num_units++] = vmw_crtc_to_du(dirty->crtc);
|
||||
} else {
|
||||
list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list,
|
||||
list_for_each_entry(crtc, &dev_priv->drm.mode_config.crtc_list,
|
||||
head) {
|
||||
struct drm_plane *plane = crtc->primary;
|
||||
|
||||
@ -2568,8 +2568,8 @@ int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
|
||||
int i = 0;
|
||||
int ret = 0;
|
||||
|
||||
mutex_lock(&dev_priv->dev->mode_config.mutex);
|
||||
list_for_each_entry(con, &dev_priv->dev->mode_config.connector_list,
|
||||
mutex_lock(&dev_priv->drm.mode_config.mutex);
|
||||
list_for_each_entry(con, &dev_priv->drm.mode_config.connector_list,
|
||||
head) {
|
||||
if (i == unit)
|
||||
break;
|
||||
@ -2577,7 +2577,7 @@ int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
|
||||
++i;
|
||||
}
|
||||
|
||||
if (&con->head == &dev_priv->dev->mode_config.connector_list) {
|
||||
if (&con->head == &dev_priv->drm.mode_config.connector_list) {
|
||||
DRM_ERROR("Could not find initial display unit.\n");
|
||||
ret = -EINVAL;
|
||||
goto out_unlock;
|
||||
@ -2611,7 +2611,7 @@ int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
|
||||
}
|
||||
|
||||
out_unlock:
|
||||
mutex_unlock(&dev_priv->dev->mode_config.mutex);
|
||||
mutex_unlock(&dev_priv->drm.mode_config.mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -2631,7 +2631,7 @@ vmw_kms_create_implicit_placement_property(struct vmw_private *dev_priv)
|
||||
return;
|
||||
|
||||
dev_priv->implicit_placement_property =
|
||||
drm_property_create_range(dev_priv->dev,
|
||||
drm_property_create_range(&dev_priv->drm,
|
||||
DRM_MODE_PROP_IMMUTABLE,
|
||||
"implicit_placement", 0, 1);
|
||||
}
|
||||
|
@ -355,7 +355,7 @@ static const struct drm_crtc_helper_funcs vmw_ldu_crtc_helper_funcs = {
|
||||
static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
|
||||
{
|
||||
struct vmw_legacy_display_unit *ldu;
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
struct drm_connector *connector;
|
||||
struct drm_encoder *encoder;
|
||||
struct drm_plane *primary, *cursor;
|
||||
@ -479,7 +479,7 @@ err_free:
|
||||
|
||||
int vmw_kms_ldu_init_display(struct vmw_private *dev_priv)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
int i, ret;
|
||||
|
||||
if (dev_priv->ldu_priv) {
|
||||
|
@ -829,7 +829,7 @@ static const struct drm_crtc_helper_funcs vmw_sou_crtc_helper_funcs = {
|
||||
static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
|
||||
{
|
||||
struct vmw_screen_object_unit *sou;
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
struct drm_connector *connector;
|
||||
struct drm_encoder *encoder;
|
||||
struct drm_plane *primary, *cursor;
|
||||
@ -946,7 +946,7 @@ err_free:
|
||||
|
||||
int vmw_kms_sou_init_display(struct vmw_private *dev_priv)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
int i, ret;
|
||||
|
||||
if (!(dev_priv->capabilities & SVGA_CAP_SCREEN_OBJECT_2)) {
|
||||
|
@ -1713,7 +1713,7 @@ static const struct drm_crtc_helper_funcs vmw_stdu_crtc_helper_funcs = {
|
||||
static int vmw_stdu_init(struct vmw_private *dev_priv, unsigned unit)
|
||||
{
|
||||
struct vmw_screen_target_display_unit *stdu;
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
struct drm_connector *connector;
|
||||
struct drm_encoder *encoder;
|
||||
struct drm_plane *primary, *cursor;
|
||||
@ -1861,7 +1861,7 @@ static void vmw_stdu_destroy(struct vmw_screen_target_display_unit *stdu)
|
||||
*/
|
||||
int vmw_kms_stdu_init_display(struct vmw_private *dev_priv)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
int i, ret;
|
||||
|
||||
|
||||
|
@ -309,7 +309,7 @@ void vmw_piter_start(struct vmw_piter *viter, const struct vmw_sg_table *vsgt,
|
||||
*/
|
||||
static void vmw_ttm_unmap_from_dma(struct vmw_ttm_tt *vmw_tt)
|
||||
{
|
||||
struct device *dev = vmw_tt->dev_priv->dev->dev;
|
||||
struct device *dev = vmw_tt->dev_priv->drm.dev;
|
||||
|
||||
dma_unmap_sgtable(dev, &vmw_tt->sgt, DMA_BIDIRECTIONAL, 0);
|
||||
vmw_tt->sgt.nents = vmw_tt->sgt.orig_nents;
|
||||
@ -330,7 +330,7 @@ static void vmw_ttm_unmap_from_dma(struct vmw_ttm_tt *vmw_tt)
|
||||
*/
|
||||
static int vmw_ttm_map_for_dma(struct vmw_ttm_tt *vmw_tt)
|
||||
{
|
||||
struct device *dev = vmw_tt->dev_priv->dev->dev;
|
||||
struct device *dev = vmw_tt->dev_priv->drm.dev;
|
||||
|
||||
return dma_map_sgtable(dev, &vmw_tt->sgt, DMA_BIDIRECTIONAL, 0);
|
||||
}
|
||||
@ -385,7 +385,7 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
|
||||
sg = __sg_alloc_table_from_pages(&vmw_tt->sgt, vsgt->pages,
|
||||
vsgt->num_pages, 0,
|
||||
(unsigned long) vsgt->num_pages << PAGE_SHIFT,
|
||||
dma_get_max_seg_size(dev_priv->dev->dev),
|
||||
dma_get_max_seg_size(dev_priv->drm.dev),
|
||||
NULL, 0, GFP_KERNEL);
|
||||
if (IS_ERR(sg)) {
|
||||
ret = PTR_ERR(sg);
|
||||
|
Loading…
Reference in New Issue
Block a user