2018-05-06 23:16:26 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0 OR MIT
|
2009-12-10 00:19:58 +00:00
|
|
|
/**************************************************************************
|
|
|
|
*
|
2018-05-06 23:16:26 +00:00
|
|
|
* Copyright 2009-2016 VMware, Inc., Palo Alto, CA., USA
|
2009-12-10 00:19:58 +00:00
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
|
|
* copy of this software and associated documentation files (the
|
|
|
|
* "Software"), to deal in the Software without restriction, including
|
|
|
|
* without limitation the rights to use, copy, modify, merge, publish,
|
|
|
|
* distribute, sub license, and/or sell copies of the Software, and to
|
|
|
|
* permit persons to whom the Software is furnished to do so, subject to
|
|
|
|
* the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice (including the
|
|
|
|
* next paragraph) shall be included in all copies or substantial portions
|
|
|
|
* of the Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
|
|
|
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
|
|
|
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
|
|
|
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
|
|
*
|
|
|
|
**************************************************************************/
|
2019-06-23 10:23:34 +00:00
|
|
|
|
2014-10-15 19:00:47 +00:00
|
|
|
#include <linux/console.h>
|
2019-01-17 13:34:52 +00:00
|
|
|
#include <linux/dma-mapping.h>
|
2019-06-23 10:23:34 +00:00
|
|
|
#include <linux/module.h>
|
2009-12-10 00:19:58 +00:00
|
|
|
|
2019-06-23 10:23:34 +00:00
|
|
|
#include <drm/drm_drv.h>
|
|
|
|
#include <drm/drm_ioctl.h>
|
|
|
|
#include <drm/drm_pci.h>
|
|
|
|
#include <drm/drm_sysfs.h>
|
2012-10-02 17:01:07 +00:00
|
|
|
#include <drm/ttm/ttm_bo_driver.h>
|
|
|
|
#include <drm/ttm/ttm_module.h>
|
2019-06-23 10:23:34 +00:00
|
|
|
#include <drm/ttm/ttm_placement.h>
|
|
|
|
|
|
|
|
#include "ttm_object.h"
|
|
|
|
#include "vmwgfx_binding.h"
|
|
|
|
#include "vmwgfx_drv.h"
|
2009-12-10 00:19:58 +00:00
|
|
|
|
|
|
|
#define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
|
|
|
|
#define VMWGFX_CHIP_SVGAII 0
|
|
|
|
#define VMW_FB_RESERVATION 0
|
|
|
|
|
2012-02-09 15:56:46 +00:00
|
|
|
#define VMW_MIN_INITIAL_WIDTH 800
|
|
|
|
#define VMW_MIN_INITIAL_HEIGHT 600
|
|
|
|
|
2016-04-28 02:11:18 +00:00
|
|
|
#ifndef VMWGFX_GIT_VERSION
|
|
|
|
#define VMWGFX_GIT_VERSION "Unknown"
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#define VMWGFX_REPO "In Tree"
|
|
|
|
|
2018-12-12 10:52:08 +00:00
|
|
|
#define VMWGFX_VALIDATION_MEM_GRAN (16*PAGE_SIZE)
|
|
|
|
|
2012-02-09 15:56:46 +00:00
|
|
|
|
2009-12-10 00:19:58 +00:00
|
|
|
/**
|
|
|
|
* Fully encoded drm commands. Might move to vmw_drm.h
|
|
|
|
*/
|
|
|
|
|
|
|
|
#define DRM_IOCTL_VMW_GET_PARAM \
|
|
|
|
DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GET_PARAM, \
|
|
|
|
struct drm_vmw_getparam_arg)
|
|
|
|
#define DRM_IOCTL_VMW_ALLOC_DMABUF \
|
|
|
|
DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_ALLOC_DMABUF, \
|
|
|
|
union drm_vmw_alloc_dmabuf_arg)
|
|
|
|
#define DRM_IOCTL_VMW_UNREF_DMABUF \
|
|
|
|
DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_DMABUF, \
|
|
|
|
struct drm_vmw_unref_dmabuf_arg)
|
|
|
|
#define DRM_IOCTL_VMW_CURSOR_BYPASS \
|
|
|
|
DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CURSOR_BYPASS, \
|
|
|
|
struct drm_vmw_cursor_bypass_arg)
|
|
|
|
|
|
|
|
#define DRM_IOCTL_VMW_CONTROL_STREAM \
|
|
|
|
DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CONTROL_STREAM, \
|
|
|
|
struct drm_vmw_control_stream_arg)
|
|
|
|
#define DRM_IOCTL_VMW_CLAIM_STREAM \
|
|
|
|
DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CLAIM_STREAM, \
|
|
|
|
struct drm_vmw_stream_arg)
|
|
|
|
#define DRM_IOCTL_VMW_UNREF_STREAM \
|
|
|
|
DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_STREAM, \
|
|
|
|
struct drm_vmw_stream_arg)
|
|
|
|
|
|
|
|
#define DRM_IOCTL_VMW_CREATE_CONTEXT \
|
|
|
|
DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CREATE_CONTEXT, \
|
|
|
|
struct drm_vmw_context_arg)
|
|
|
|
#define DRM_IOCTL_VMW_UNREF_CONTEXT \
|
|
|
|
DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_CONTEXT, \
|
|
|
|
struct drm_vmw_context_arg)
|
|
|
|
#define DRM_IOCTL_VMW_CREATE_SURFACE \
|
|
|
|
DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SURFACE, \
|
|
|
|
union drm_vmw_surface_create_arg)
|
|
|
|
#define DRM_IOCTL_VMW_UNREF_SURFACE \
|
|
|
|
DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SURFACE, \
|
|
|
|
struct drm_vmw_surface_arg)
|
|
|
|
#define DRM_IOCTL_VMW_REF_SURFACE \
|
|
|
|
DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_REF_SURFACE, \
|
|
|
|
union drm_vmw_surface_reference_arg)
|
|
|
|
#define DRM_IOCTL_VMW_EXECBUF \
|
|
|
|
DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_EXECBUF, \
|
|
|
|
struct drm_vmw_execbuf_arg)
|
2011-09-01 20:18:44 +00:00
|
|
|
#define DRM_IOCTL_VMW_GET_3D_CAP \
|
|
|
|
DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_GET_3D_CAP, \
|
|
|
|
struct drm_vmw_get_3d_cap_arg)
|
2009-12-10 00:19:58 +00:00
|
|
|
#define DRM_IOCTL_VMW_FENCE_WAIT \
|
|
|
|
DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT, \
|
|
|
|
struct drm_vmw_fence_wait_arg)
|
2011-09-01 20:18:44 +00:00
|
|
|
#define DRM_IOCTL_VMW_FENCE_SIGNALED \
|
|
|
|
DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_SIGNALED, \
|
|
|
|
struct drm_vmw_fence_signaled_arg)
|
|
|
|
#define DRM_IOCTL_VMW_FENCE_UNREF \
|
|
|
|
DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_UNREF, \
|
|
|
|
struct drm_vmw_fence_arg)
|
2011-10-10 10:23:26 +00:00
|
|
|
#define DRM_IOCTL_VMW_FENCE_EVENT \
|
|
|
|
DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_EVENT, \
|
|
|
|
struct drm_vmw_fence_event_arg)
|
2011-10-04 18:13:26 +00:00
|
|
|
#define DRM_IOCTL_VMW_PRESENT \
|
|
|
|
DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT, \
|
|
|
|
struct drm_vmw_present_arg)
|
|
|
|
#define DRM_IOCTL_VMW_PRESENT_READBACK \
|
|
|
|
DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT_READBACK, \
|
|
|
|
struct drm_vmw_present_readback_arg)
|
2011-10-25 21:35:53 +00:00
|
|
|
#define DRM_IOCTL_VMW_UPDATE_LAYOUT \
|
|
|
|
DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT, \
|
|
|
|
struct drm_vmw_update_layout_arg)
|
2012-11-21 11:10:26 +00:00
|
|
|
#define DRM_IOCTL_VMW_CREATE_SHADER \
|
|
|
|
DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SHADER, \
|
|
|
|
struct drm_vmw_shader_create_arg)
|
|
|
|
#define DRM_IOCTL_VMW_UNREF_SHADER \
|
|
|
|
DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SHADER, \
|
|
|
|
struct drm_vmw_shader_arg)
|
2012-11-21 10:45:13 +00:00
|
|
|
#define DRM_IOCTL_VMW_GB_SURFACE_CREATE \
|
|
|
|
DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE, \
|
|
|
|
union drm_vmw_gb_surface_create_arg)
|
|
|
|
#define DRM_IOCTL_VMW_GB_SURFACE_REF \
|
|
|
|
DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF, \
|
|
|
|
union drm_vmw_gb_surface_reference_arg)
|
2012-11-21 11:32:19 +00:00
|
|
|
#define DRM_IOCTL_VMW_SYNCCPU \
|
|
|
|
DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_SYNCCPU, \
|
|
|
|
struct drm_vmw_synccpu_arg)
|
2015-08-10 17:39:35 +00:00
|
|
|
#define DRM_IOCTL_VMW_CREATE_EXTENDED_CONTEXT \
|
|
|
|
DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_EXTENDED_CONTEXT, \
|
|
|
|
struct drm_vmw_context_arg)
|
2018-06-20 21:48:35 +00:00
|
|
|
#define DRM_IOCTL_VMW_GB_SURFACE_CREATE_EXT \
|
|
|
|
DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE_EXT, \
|
|
|
|
union drm_vmw_gb_surface_create_ext_arg)
|
|
|
|
#define DRM_IOCTL_VMW_GB_SURFACE_REF_EXT \
|
|
|
|
DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF_EXT, \
|
|
|
|
union drm_vmw_gb_surface_reference_ext_arg)
|
2009-12-10 00:19:58 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* The core DRM version of this macro doesn't account for
|
|
|
|
* DRM_COMMAND_BASE.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#define VMW_IOCTL_DEF(ioctl, func, flags) \
|
2015-03-27 13:51:56 +00:00
|
|
|
[DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = {DRM_IOCTL_##ioctl, flags, func}
|
2009-12-10 00:19:58 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Ioctl definitions.
|
|
|
|
*/
|
|
|
|
|
2013-08-02 17:27:49 +00:00
|
|
|
static const struct drm_ioctl_desc vmw_ioctls[] = {
|
2010-08-14 10:20:34 +00:00
|
|
|
VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl,
|
2015-09-08 11:56:30 +00:00
|
|
|
DRM_AUTH | DRM_RENDER_ALLOW),
|
2018-06-19 13:02:16 +00:00
|
|
|
VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_bo_alloc_ioctl,
|
2015-09-08 11:56:30 +00:00
|
|
|
DRM_AUTH | DRM_RENDER_ALLOW),
|
2018-06-19 13:02:16 +00:00
|
|
|
VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_bo_unref_ioctl,
|
2015-09-08 11:56:30 +00:00
|
|
|
DRM_RENDER_ALLOW),
|
2010-08-14 10:20:34 +00:00
|
|
|
VMW_IOCTL_DEF(VMW_CURSOR_BYPASS,
|
2009-12-08 11:57:51 +00:00
|
|
|
vmw_kms_cursor_bypass_ioctl,
|
2018-04-20 06:51:58 +00:00
|
|
|
DRM_MASTER),
|
2009-12-10 00:19:58 +00:00
|
|
|
|
2010-08-14 10:20:34 +00:00
|
|
|
VMW_IOCTL_DEF(VMW_CONTROL_STREAM, vmw_overlay_ioctl,
|
2018-04-20 06:51:58 +00:00
|
|
|
DRM_MASTER),
|
2010-08-14 10:20:34 +00:00
|
|
|
VMW_IOCTL_DEF(VMW_CLAIM_STREAM, vmw_stream_claim_ioctl,
|
2018-04-20 06:51:58 +00:00
|
|
|
DRM_MASTER),
|
2010-08-14 10:20:34 +00:00
|
|
|
VMW_IOCTL_DEF(VMW_UNREF_STREAM, vmw_stream_unref_ioctl,
|
2018-04-20 06:51:58 +00:00
|
|
|
DRM_MASTER),
|
2009-12-10 00:19:58 +00:00
|
|
|
|
2010-08-14 10:20:34 +00:00
|
|
|
VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
|
2015-09-08 11:56:30 +00:00
|
|
|
DRM_AUTH | DRM_RENDER_ALLOW),
|
2010-08-14 10:20:34 +00:00
|
|
|
VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl,
|
2015-09-08 11:56:30 +00:00
|
|
|
DRM_RENDER_ALLOW),
|
2010-08-14 10:20:34 +00:00
|
|
|
VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl,
|
2015-09-08 11:56:30 +00:00
|
|
|
DRM_AUTH | DRM_RENDER_ALLOW),
|
2010-08-14 10:20:34 +00:00
|
|
|
VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl,
|
2015-09-08 11:56:30 +00:00
|
|
|
DRM_RENDER_ALLOW),
|
2010-08-14 10:20:34 +00:00
|
|
|
VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
|
2015-09-08 11:56:30 +00:00
|
|
|
DRM_AUTH | DRM_RENDER_ALLOW),
|
2019-05-22 16:41:17 +00:00
|
|
|
VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl, DRM_AUTH |
|
2015-08-10 17:39:35 +00:00
|
|
|
DRM_RENDER_ALLOW),
|
2011-09-01 20:18:44 +00:00
|
|
|
VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl,
|
2015-09-08 11:56:30 +00:00
|
|
|
DRM_RENDER_ALLOW),
|
2011-09-01 20:18:44 +00:00
|
|
|
VMW_IOCTL_DEF(VMW_FENCE_SIGNALED,
|
|
|
|
vmw_fence_obj_signaled_ioctl,
|
2015-09-08 11:56:30 +00:00
|
|
|
DRM_RENDER_ALLOW),
|
2011-09-01 20:18:44 +00:00
|
|
|
VMW_IOCTL_DEF(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl,
|
2015-09-08 11:56:30 +00:00
|
|
|
DRM_RENDER_ALLOW),
|
2014-03-20 12:06:34 +00:00
|
|
|
VMW_IOCTL_DEF(VMW_FENCE_EVENT, vmw_fence_event_ioctl,
|
2015-09-08 11:56:30 +00:00
|
|
|
DRM_AUTH | DRM_RENDER_ALLOW),
|
2011-09-01 20:18:41 +00:00
|
|
|
VMW_IOCTL_DEF(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl,
|
2015-09-08 11:56:30 +00:00
|
|
|
DRM_AUTH | DRM_RENDER_ALLOW),
|
2011-10-04 18:13:26 +00:00
|
|
|
|
|
|
|
/* these allow direct access to the framebuffers mark as master only */
|
|
|
|
VMW_IOCTL_DEF(VMW_PRESENT, vmw_present_ioctl,
|
2015-09-08 11:56:30 +00:00
|
|
|
DRM_MASTER | DRM_AUTH),
|
2011-10-04 18:13:26 +00:00
|
|
|
VMW_IOCTL_DEF(VMW_PRESENT_READBACK,
|
|
|
|
vmw_present_readback_ioctl,
|
2015-09-08 11:56:30 +00:00
|
|
|
DRM_MASTER | DRM_AUTH),
|
2017-02-21 10:42:27 +00:00
|
|
|
/*
|
|
|
|
* The permissions of the below ioctl are overridden in
|
|
|
|
* vmw_generic_ioctl(). We require either
|
|
|
|
* DRM_MASTER or capable(CAP_SYS_ADMIN).
|
|
|
|
*/
|
2011-10-25 21:35:53 +00:00
|
|
|
VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT,
|
|
|
|
vmw_kms_update_layout_ioctl,
|
2017-02-21 10:42:27 +00:00
|
|
|
DRM_RENDER_ALLOW),
|
2012-11-21 11:10:26 +00:00
|
|
|
VMW_IOCTL_DEF(VMW_CREATE_SHADER,
|
|
|
|
vmw_shader_define_ioctl,
|
2015-09-08 11:56:30 +00:00
|
|
|
DRM_AUTH | DRM_RENDER_ALLOW),
|
2012-11-21 11:10:26 +00:00
|
|
|
VMW_IOCTL_DEF(VMW_UNREF_SHADER,
|
|
|
|
vmw_shader_destroy_ioctl,
|
2015-09-08 11:56:30 +00:00
|
|
|
DRM_RENDER_ALLOW),
|
2012-11-21 10:45:13 +00:00
|
|
|
VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE,
|
|
|
|
vmw_gb_surface_define_ioctl,
|
2015-09-08 11:56:30 +00:00
|
|
|
DRM_AUTH | DRM_RENDER_ALLOW),
|
2012-11-21 10:45:13 +00:00
|
|
|
VMW_IOCTL_DEF(VMW_GB_SURFACE_REF,
|
|
|
|
vmw_gb_surface_reference_ioctl,
|
2015-09-08 11:56:30 +00:00
|
|
|
DRM_AUTH | DRM_RENDER_ALLOW),
|
2012-11-21 11:32:19 +00:00
|
|
|
VMW_IOCTL_DEF(VMW_SYNCCPU,
|
2018-06-19 13:02:16 +00:00
|
|
|
vmw_user_bo_synccpu_ioctl,
|
2015-09-08 11:56:30 +00:00
|
|
|
DRM_RENDER_ALLOW),
|
2015-08-10 17:39:35 +00:00
|
|
|
VMW_IOCTL_DEF(VMW_CREATE_EXTENDED_CONTEXT,
|
|
|
|
vmw_extended_context_define_ioctl,
|
2015-09-08 11:56:30 +00:00
|
|
|
DRM_AUTH | DRM_RENDER_ALLOW),
|
2018-06-20 21:48:35 +00:00
|
|
|
VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE_EXT,
|
|
|
|
vmw_gb_surface_define_ext_ioctl,
|
|
|
|
DRM_AUTH | DRM_RENDER_ALLOW),
|
|
|
|
VMW_IOCTL_DEF(VMW_GB_SURFACE_REF_EXT,
|
|
|
|
vmw_gb_surface_reference_ext_ioctl,
|
|
|
|
DRM_AUTH | DRM_RENDER_ALLOW),
|
2009-12-10 00:19:58 +00:00
|
|
|
};
|
|
|
|
|
2017-07-15 07:14:53 +00:00
|
|
|
static const struct pci_device_id vmw_pci_id_list[] = {
|
2009-12-10 00:19:58 +00:00
|
|
|
{0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII},
|
|
|
|
{0, 0, 0}
|
|
|
|
};
|
2012-08-29 01:40:51 +00:00
|
|
|
MODULE_DEVICE_TABLE(pci, vmw_pci_id_list);
|
2009-12-10 00:19:58 +00:00
|
|
|
|
2012-08-29 01:38:49 +00:00
|
|
|
static int enable_fbdev = IS_ENABLED(CONFIG_DRM_VMWGFX_FBCON);
|
2013-10-24 08:49:26 +00:00
|
|
|
static int vmw_force_iommu;
|
|
|
|
static int vmw_restrict_iommu;
|
|
|
|
static int vmw_force_coherent;
|
2014-01-15 19:19:53 +00:00
|
|
|
static int vmw_restrict_dma_mask;
|
2016-06-29 19:15:48 +00:00
|
|
|
static int vmw_assume_16bpp;
|
2009-12-10 00:19:58 +00:00
|
|
|
|
|
|
|
static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
|
2010-01-13 21:28:43 +00:00
|
|
|
static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
|
|
|
|
void *ptr);
|
2009-12-10 00:19:58 +00:00
|
|
|
|
2010-10-01 08:21:48 +00:00
|
|
|
MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev");
|
2017-03-23 21:54:48 +00:00
|
|
|
module_param_named(enable_fbdev, enable_fbdev, int, 0600);
|
2013-10-24 08:49:26 +00:00
|
|
|
MODULE_PARM_DESC(force_dma_api, "Force using the DMA API for TTM pages");
|
2017-03-23 21:54:48 +00:00
|
|
|
module_param_named(force_dma_api, vmw_force_iommu, int, 0600);
|
2013-10-24 08:49:26 +00:00
|
|
|
MODULE_PARM_DESC(restrict_iommu, "Try to limit IOMMU usage for TTM pages");
|
2017-03-23 21:54:48 +00:00
|
|
|
module_param_named(restrict_iommu, vmw_restrict_iommu, int, 0600);
|
2013-10-24 08:49:26 +00:00
|
|
|
MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages");
|
2017-03-23 21:54:48 +00:00
|
|
|
module_param_named(force_coherent, vmw_force_coherent, int, 0600);
|
2014-01-15 19:19:53 +00:00
|
|
|
MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU");
|
2017-04-03 20:06:24 +00:00
|
|
|
module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, 0600);
|
2016-06-29 19:15:48 +00:00
|
|
|
MODULE_PARM_DESC(assume_16bpp, "Assume 16-bpp when filtering modes");
|
|
|
|
module_param_named(assume_16bpp, vmw_assume_16bpp, int, 0600);
|
2013-10-24 08:49:26 +00:00
|
|
|
|
2010-10-01 08:21:48 +00:00
|
|
|
|
2018-06-18 23:44:48 +00:00
|
|
|
static void vmw_print_capabilities2(uint32_t capabilities2)
|
|
|
|
{
|
|
|
|
DRM_INFO("Capabilities2:\n");
|
|
|
|
if (capabilities2 & SVGA_CAP2_GROW_OTABLE)
|
|
|
|
DRM_INFO(" Grow oTable.\n");
|
|
|
|
if (capabilities2 & SVGA_CAP2_INTRA_SURFACE_COPY)
|
|
|
|
DRM_INFO(" IntraSurface copy.\n");
|
|
|
|
}
|
|
|
|
|
2009-12-10 00:19:58 +00:00
|
|
|
static void vmw_print_capabilities(uint32_t capabilities)
|
|
|
|
{
|
|
|
|
DRM_INFO("Capabilities:\n");
|
|
|
|
if (capabilities & SVGA_CAP_RECT_COPY)
|
|
|
|
DRM_INFO(" Rect copy.\n");
|
|
|
|
if (capabilities & SVGA_CAP_CURSOR)
|
|
|
|
DRM_INFO(" Cursor.\n");
|
|
|
|
if (capabilities & SVGA_CAP_CURSOR_BYPASS)
|
|
|
|
DRM_INFO(" Cursor bypass.\n");
|
|
|
|
if (capabilities & SVGA_CAP_CURSOR_BYPASS_2)
|
|
|
|
DRM_INFO(" Cursor bypass 2.\n");
|
|
|
|
if (capabilities & SVGA_CAP_8BIT_EMULATION)
|
|
|
|
DRM_INFO(" 8bit emulation.\n");
|
|
|
|
if (capabilities & SVGA_CAP_ALPHA_CURSOR)
|
|
|
|
DRM_INFO(" Alpha cursor.\n");
|
|
|
|
if (capabilities & SVGA_CAP_3D)
|
|
|
|
DRM_INFO(" 3D.\n");
|
|
|
|
if (capabilities & SVGA_CAP_EXTENDED_FIFO)
|
|
|
|
DRM_INFO(" Extended Fifo.\n");
|
|
|
|
if (capabilities & SVGA_CAP_MULTIMON)
|
|
|
|
DRM_INFO(" Multimon.\n");
|
|
|
|
if (capabilities & SVGA_CAP_PITCHLOCK)
|
|
|
|
DRM_INFO(" Pitchlock.\n");
|
|
|
|
if (capabilities & SVGA_CAP_IRQMASK)
|
|
|
|
DRM_INFO(" Irq mask.\n");
|
|
|
|
if (capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)
|
|
|
|
DRM_INFO(" Display Topology.\n");
|
|
|
|
if (capabilities & SVGA_CAP_GMR)
|
|
|
|
DRM_INFO(" GMR.\n");
|
|
|
|
if (capabilities & SVGA_CAP_TRACES)
|
|
|
|
DRM_INFO(" Traces.\n");
|
2011-08-31 07:42:51 +00:00
|
|
|
if (capabilities & SVGA_CAP_GMR2)
|
|
|
|
DRM_INFO(" GMR2.\n");
|
|
|
|
if (capabilities & SVGA_CAP_SCREEN_OBJECT_2)
|
|
|
|
DRM_INFO(" Screen Object 2.\n");
|
2012-11-21 09:35:08 +00:00
|
|
|
if (capabilities & SVGA_CAP_COMMAND_BUFFERS)
|
|
|
|
DRM_INFO(" Command Buffers.\n");
|
|
|
|
if (capabilities & SVGA_CAP_CMD_BUFFERS_2)
|
|
|
|
DRM_INFO(" Command Buffers 2.\n");
|
|
|
|
if (capabilities & SVGA_CAP_GBOBJECTS)
|
|
|
|
DRM_INFO(" Guest Backed Resources.\n");
|
2015-07-09 04:20:39 +00:00
|
|
|
if (capabilities & SVGA_CAP_DX)
|
|
|
|
DRM_INFO(" DX Features.\n");
|
2018-03-22 09:15:23 +00:00
|
|
|
if (capabilities & SVGA_CAP_HP_CMD_QUEUE)
|
|
|
|
DRM_INFO(" HP Command Queue.\n");
|
2009-12-10 00:19:58 +00:00
|
|
|
}
|
|
|
|
|
2011-10-04 18:13:30 +00:00
|
|
|
/**
|
2013-10-10 16:52:52 +00:00
|
|
|
* vmw_dummy_query_bo_create - create a bo to hold a dummy query result
|
2011-10-04 18:13:30 +00:00
|
|
|
*
|
2013-10-10 16:52:52 +00:00
|
|
|
* @dev_priv: A device private structure.
|
2011-10-04 18:13:30 +00:00
|
|
|
*
|
2013-10-10 16:52:52 +00:00
|
|
|
* This function creates a small buffer object that holds the query
|
|
|
|
* result for dummy queries emitted as query barriers.
|
|
|
|
* The function will then map the first page and initialize a pending
|
|
|
|
* occlusion query result structure, Finally it will unmap the buffer.
|
|
|
|
* No interruptible waits are done within this function.
|
2011-10-04 18:13:30 +00:00
|
|
|
*
|
2013-10-10 16:52:52 +00:00
|
|
|
* Returns an error if bo creation or initialization fails.
|
2011-10-04 18:13:30 +00:00
|
|
|
*/
|
2013-10-10 16:52:52 +00:00
|
|
|
static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
|
2011-10-04 18:13:30 +00:00
|
|
|
{
|
2013-10-10 16:52:52 +00:00
|
|
|
int ret;
|
2018-06-19 13:02:16 +00:00
|
|
|
struct vmw_buffer_object *vbo;
|
2011-10-04 18:13:30 +00:00
|
|
|
struct ttm_bo_kmap_obj map;
|
|
|
|
volatile SVGA3dQueryResult *result;
|
|
|
|
bool dummy;
|
|
|
|
|
2013-10-10 16:52:52 +00:00
|
|
|
/*
|
2015-06-26 07:25:37 +00:00
|
|
|
* Create the vbo as pinned, so that a tryreserve will
|
2013-10-10 16:52:52 +00:00
|
|
|
* immediately succeed. This is because we're the only
|
|
|
|
* user of the bo currently.
|
|
|
|
*/
|
2015-06-26 07:25:37 +00:00
|
|
|
vbo = kzalloc(sizeof(*vbo), GFP_KERNEL);
|
|
|
|
if (!vbo)
|
|
|
|
return -ENOMEM;
|
2013-10-10 16:52:52 +00:00
|
|
|
|
2018-06-19 13:02:16 +00:00
|
|
|
ret = vmw_bo_init(dev_priv, vbo, PAGE_SIZE,
|
|
|
|
&vmw_sys_ne_placement, false,
|
|
|
|
&vmw_bo_bo_free);
|
2011-10-04 18:13:30 +00:00
|
|
|
if (unlikely(ret != 0))
|
2013-10-10 16:52:52 +00:00
|
|
|
return ret;
|
|
|
|
|
2016-04-06 09:12:03 +00:00
|
|
|
ret = ttm_bo_reserve(&vbo->base, false, true, NULL);
|
2013-10-10 16:52:52 +00:00
|
|
|
BUG_ON(ret != 0);
|
2015-06-26 07:25:37 +00:00
|
|
|
vmw_bo_pin_reserved(vbo, true);
|
2011-10-04 18:13:30 +00:00
|
|
|
|
2015-06-26 07:25:37 +00:00
|
|
|
ret = ttm_bo_kmap(&vbo->base, 0, 1, &map);
|
2011-10-04 18:13:30 +00:00
|
|
|
if (likely(ret == 0)) {
|
|
|
|
result = ttm_kmap_obj_virtual(&map, &dummy);
|
|
|
|
result->totalSize = sizeof(*result);
|
|
|
|
result->state = SVGA3D_QUERYSTATE_PENDING;
|
|
|
|
result->result32 = 0xff;
|
|
|
|
ttm_bo_kunmap(&map);
|
2013-10-10 16:52:52 +00:00
|
|
|
}
|
2015-06-26 07:25:37 +00:00
|
|
|
vmw_bo_pin_reserved(vbo, false);
|
|
|
|
ttm_bo_unreserve(&vbo->base);
|
2011-10-04 18:13:30 +00:00
|
|
|
|
2013-10-10 16:52:52 +00:00
|
|
|
if (unlikely(ret != 0)) {
|
|
|
|
DRM_ERROR("Dummy query buffer map failed.\n");
|
2018-06-19 13:02:16 +00:00
|
|
|
vmw_bo_unreference(&vbo);
|
2013-10-10 16:52:52 +00:00
|
|
|
} else
|
2015-06-26 07:25:37 +00:00
|
|
|
dev_priv->dummy_query_bo = vbo;
|
2011-10-04 18:13:30 +00:00
|
|
|
|
2013-10-10 16:52:52 +00:00
|
|
|
return ret;
|
2011-10-04 18:13:30 +00:00
|
|
|
}
|
|
|
|
|
2015-06-25 17:47:43 +00:00
|
|
|
/**
|
|
|
|
* vmw_request_device_late - Perform late device setup
|
|
|
|
*
|
|
|
|
* @dev_priv: Pointer to device private.
|
|
|
|
*
|
|
|
|
* This function performs setup of otables and enables large command
|
|
|
|
* buffer submission. These tasks are split out to a separate function
|
|
|
|
* because it reverts vmw_release_device_early and is intended to be used
|
|
|
|
* by an error path in the hibernation code.
|
|
|
|
*/
|
|
|
|
static int vmw_request_device_late(struct vmw_private *dev_priv)
|
2009-12-10 00:19:58 +00:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
2012-11-21 09:49:52 +00:00
|
|
|
if (dev_priv->has_mob) {
|
|
|
|
ret = vmw_otables_setup(dev_priv);
|
|
|
|
if (unlikely(ret != 0)) {
|
|
|
|
DRM_ERROR("Unable to initialize "
|
|
|
|
"guest Memory OBjects.\n");
|
2015-06-25 17:47:43 +00:00
|
|
|
return ret;
|
2012-11-21 09:49:52 +00:00
|
|
|
}
|
|
|
|
}
|
2015-06-25 17:47:43 +00:00
|
|
|
|
2015-06-25 18:57:56 +00:00
|
|
|
if (dev_priv->cman) {
|
|
|
|
ret = vmw_cmdbuf_set_pool_size(dev_priv->cman,
|
|
|
|
256*4096, 2*4096);
|
|
|
|
if (ret) {
|
|
|
|
struct vmw_cmdbuf_man *man = dev_priv->cman;
|
|
|
|
|
|
|
|
dev_priv->cman = NULL;
|
|
|
|
vmw_cmdbuf_man_destroy(man);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-06-25 17:47:43 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-12-10 00:19:58 +00:00
|
|
|
static int vmw_request_device(struct vmw_private *dev_priv)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = vmw_fifo_init(dev_priv, &dev_priv->fifo);
|
|
|
|
if (unlikely(ret != 0)) {
|
|
|
|
DRM_ERROR("Unable to initialize FIFO.\n");
|
|
|
|
return ret;
|
|
|
|
}
|
2011-09-01 20:18:44 +00:00
|
|
|
vmw_fence_fifo_up(dev_priv->fman);
|
2015-06-25 18:57:56 +00:00
|
|
|
dev_priv->cman = vmw_cmdbuf_man_create(dev_priv);
|
2015-08-10 17:39:35 +00:00
|
|
|
if (IS_ERR(dev_priv->cman)) {
|
2015-06-25 18:57:56 +00:00
|
|
|
dev_priv->cman = NULL;
|
2015-08-10 17:39:35 +00:00
|
|
|
dev_priv->has_dx = false;
|
2012-11-21 09:49:52 +00:00
|
|
|
}
|
2015-06-25 17:47:43 +00:00
|
|
|
|
|
|
|
ret = vmw_request_device_late(dev_priv);
|
|
|
|
if (ret)
|
|
|
|
goto out_no_mob;
|
|
|
|
|
2011-10-04 18:13:30 +00:00
|
|
|
ret = vmw_dummy_query_bo_create(dev_priv);
|
|
|
|
if (unlikely(ret != 0))
|
|
|
|
goto out_no_query_bo;
|
2009-12-10 00:19:58 +00:00
|
|
|
|
|
|
|
return 0;
|
2011-10-04 18:13:30 +00:00
|
|
|
|
|
|
|
out_no_query_bo:
|
2015-06-25 18:57:56 +00:00
|
|
|
if (dev_priv->cman)
|
|
|
|
vmw_cmdbuf_remove_pool(dev_priv->cman);
|
2015-06-25 17:47:43 +00:00
|
|
|
if (dev_priv->has_mob) {
|
|
|
|
(void) ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB);
|
2012-11-21 09:49:52 +00:00
|
|
|
vmw_otables_takedown(dev_priv);
|
2015-06-25 17:47:43 +00:00
|
|
|
}
|
2015-06-25 18:57:56 +00:00
|
|
|
if (dev_priv->cman)
|
|
|
|
vmw_cmdbuf_man_destroy(dev_priv->cman);
|
2012-11-21 09:49:52 +00:00
|
|
|
out_no_mob:
|
2011-10-04 18:13:30 +00:00
|
|
|
vmw_fence_fifo_down(dev_priv->fman);
|
|
|
|
vmw_fifo_release(dev_priv, &dev_priv->fifo);
|
|
|
|
return ret;
|
2009-12-10 00:19:58 +00:00
|
|
|
}
|
|
|
|
|
2015-06-25 17:47:43 +00:00
|
|
|
/**
|
|
|
|
* vmw_release_device_early - Early part of fifo takedown.
|
|
|
|
*
|
|
|
|
* @dev_priv: Pointer to device private struct.
|
|
|
|
*
|
|
|
|
* This is the first part of command submission takedown, to be called before
|
|
|
|
* buffer management is taken down.
|
|
|
|
*/
|
|
|
|
static void vmw_release_device_early(struct vmw_private *dev_priv)
|
2009-12-10 00:19:58 +00:00
|
|
|
{
|
2011-10-04 18:13:30 +00:00
|
|
|
/*
|
|
|
|
* Previous destructions should've released
|
|
|
|
* the pinned bo.
|
|
|
|
*/
|
|
|
|
|
|
|
|
BUG_ON(dev_priv->pinned_bo != NULL);
|
|
|
|
|
2018-06-19 13:02:16 +00:00
|
|
|
vmw_bo_unreference(&dev_priv->dummy_query_bo);
|
2015-06-25 18:57:56 +00:00
|
|
|
if (dev_priv->cman)
|
|
|
|
vmw_cmdbuf_remove_pool(dev_priv->cman);
|
2010-10-01 08:21:48 +00:00
|
|
|
|
2015-06-25 17:47:43 +00:00
|
|
|
if (dev_priv->has_mob) {
|
|
|
|
ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB);
|
2012-11-21 09:49:52 +00:00
|
|
|
vmw_otables_takedown(dev_priv);
|
2010-10-01 08:21:48 +00:00
|
|
|
}
|
2009-12-10 00:19:58 +00:00
|
|
|
}
|
|
|
|
|
2011-08-31 07:42:52 +00:00
|
|
|
/**
|
2015-06-25 17:47:43 +00:00
|
|
|
* vmw_release_device_late - Late part of fifo takedown.
|
|
|
|
*
|
|
|
|
* @dev_priv: Pointer to device private struct.
|
|
|
|
*
|
|
|
|
* This is the last part of the command submission takedown, to be called when
|
|
|
|
* command submission is no longer needed. It may wait on pending fences.
|
2011-08-31 07:42:52 +00:00
|
|
|
*/
|
2015-06-25 17:47:43 +00:00
|
|
|
static void vmw_release_device_late(struct vmw_private *dev_priv)
|
2010-10-01 08:21:48 +00:00
|
|
|
{
|
2015-06-25 17:47:43 +00:00
|
|
|
vmw_fence_fifo_down(dev_priv->fman);
|
2015-06-25 18:57:56 +00:00
|
|
|
if (dev_priv->cman)
|
|
|
|
vmw_cmdbuf_man_destroy(dev_priv->cman);
|
2010-10-01 08:21:48 +00:00
|
|
|
|
2015-06-25 17:47:43 +00:00
|
|
|
vmw_fifo_release(dev_priv, &dev_priv->fifo);
|
2010-10-01 08:21:48 +00:00
|
|
|
}
|
|
|
|
|
2012-02-09 15:56:46 +00:00
|
|
|
/**
|
|
|
|
* Sets the initial_[width|height] fields on the given vmw_private.
|
|
|
|
*
|
|
|
|
* It does so by reading SVGA_REG_[WIDTH|HEIGHT] regs and then
|
2012-02-09 15:56:47 +00:00
|
|
|
* clamping the value to fb_max_[width|height] fields and the
|
|
|
|
* VMW_MIN_INITIAL_[WIDTH|HEIGHT].
|
|
|
|
* If the values appear to be invalid, set them to
|
2012-02-09 15:56:46 +00:00
|
|
|
* VMW_MIN_INITIAL_[WIDTH|HEIGHT].
|
|
|
|
*/
|
|
|
|
static void vmw_get_initial_size(struct vmw_private *dev_priv)
|
|
|
|
{
|
|
|
|
uint32_t width;
|
|
|
|
uint32_t height;
|
|
|
|
|
|
|
|
width = vmw_read(dev_priv, SVGA_REG_WIDTH);
|
|
|
|
height = vmw_read(dev_priv, SVGA_REG_HEIGHT);
|
|
|
|
|
|
|
|
width = max_t(uint32_t, width, VMW_MIN_INITIAL_WIDTH);
|
|
|
|
height = max_t(uint32_t, height, VMW_MIN_INITIAL_HEIGHT);
|
2012-02-09 15:56:47 +00:00
|
|
|
|
|
|
|
if (width > dev_priv->fb_max_width ||
|
|
|
|
height > dev_priv->fb_max_height) {
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This is a host error and shouldn't occur.
|
|
|
|
*/
|
|
|
|
|
|
|
|
width = VMW_MIN_INITIAL_WIDTH;
|
|
|
|
height = VMW_MIN_INITIAL_HEIGHT;
|
|
|
|
}
|
2012-02-09 15:56:46 +00:00
|
|
|
|
|
|
|
dev_priv->initial_width = width;
|
|
|
|
dev_priv->initial_height = height;
|
|
|
|
}
|
|
|
|
|
2013-10-24 08:49:26 +00:00
|
|
|
/**
|
|
|
|
* vmw_dma_select_mode - Determine how DMA mappings should be set up for this
|
|
|
|
* system.
|
|
|
|
*
|
|
|
|
* @dev_priv: Pointer to a struct vmw_private
|
|
|
|
*
|
2019-04-23 12:02:57 +00:00
|
|
|
* This functions tries to determine what actions need to be taken by the
|
|
|
|
* driver to make system pages visible to the device.
|
2013-10-24 08:49:26 +00:00
|
|
|
* If this function decides that DMA is not possible, it returns -EINVAL.
|
|
|
|
* The driver may then try to disable features of the device that require
|
|
|
|
* DMA.
|
|
|
|
*/
|
|
|
|
static int vmw_dma_select_mode(struct vmw_private *dev_priv)
|
|
|
|
{
|
|
|
|
static const char *names[vmw_dma_map_max] = {
|
|
|
|
[vmw_dma_phys] = "Using physical TTM page addresses.",
|
|
|
|
[vmw_dma_alloc_coherent] = "Using coherent TTM pages.",
|
2019-04-23 12:02:57 +00:00
|
|
|
[vmw_dma_map_populate] = "Caching DMA mappings.",
|
2013-10-24 08:49:26 +00:00
|
|
|
[vmw_dma_map_bind] = "Giving up DMA mappings early."};
|
|
|
|
|
2019-01-05 08:01:08 +00:00
|
|
|
if (vmw_force_coherent)
|
|
|
|
dev_priv->map_mode = vmw_dma_alloc_coherent;
|
2019-04-23 12:02:57 +00:00
|
|
|
else if (vmw_restrict_iommu)
|
|
|
|
dev_priv->map_mode = vmw_dma_map_bind;
|
2019-01-05 08:01:07 +00:00
|
|
|
else
|
2013-10-24 08:49:26 +00:00
|
|
|
dev_priv->map_mode = vmw_dma_map_populate;
|
|
|
|
|
2019-01-05 08:01:06 +00:00
|
|
|
/* No TTM coherent page pool? FIXME: Ask TTM instead! */
|
|
|
|
if (!(IS_ENABLED(CONFIG_SWIOTLB) || IS_ENABLED(CONFIG_INTEL_IOMMU)) &&
|
|
|
|
(dev_priv->map_mode == vmw_dma_alloc_coherent))
|
2013-10-24 08:49:26 +00:00
|
|
|
return -EINVAL;
|
2013-11-12 07:49:26 +00:00
|
|
|
|
2013-10-24 08:49:26 +00:00
|
|
|
DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-01-15 19:19:53 +00:00
|
|
|
/**
|
|
|
|
* vmw_dma_masks - set required page- and dma masks
|
|
|
|
*
|
|
|
|
* @dev: Pointer to struct drm-device
|
|
|
|
*
|
|
|
|
* With 32-bit we can only handle 32 bit PFNs. Optionally set that
|
|
|
|
* restriction also for 64-bit systems.
|
|
|
|
*/
|
|
|
|
static int vmw_dma_masks(struct vmw_private *dev_priv)
|
|
|
|
{
|
|
|
|
struct drm_device *dev = dev_priv->dev;
|
2019-01-28 09:31:33 +00:00
|
|
|
int ret = 0;
|
2014-01-15 19:19:53 +00:00
|
|
|
|
2019-01-28 09:31:33 +00:00
|
|
|
ret = dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64));
|
|
|
|
if (dev_priv->map_mode != vmw_dma_phys &&
|
2014-01-15 19:19:53 +00:00
|
|
|
(sizeof(unsigned long) == 4 || vmw_restrict_dma_mask)) {
|
|
|
|
DRM_INFO("Restricting DMA addresses to 44 bits.\n");
|
2019-01-28 09:31:33 +00:00
|
|
|
return dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(44));
|
2014-01-15 19:19:53 +00:00
|
|
|
}
|
2019-01-28 09:31:33 +00:00
|
|
|
|
|
|
|
return ret;
|
2014-01-15 19:19:53 +00:00
|
|
|
}
|
|
|
|
|
2009-12-10 00:19:58 +00:00
|
|
|
static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
|
|
|
|
{
|
|
|
|
struct vmw_private *dev_priv;
|
|
|
|
int ret;
|
2010-01-30 03:38:07 +00:00
|
|
|
uint32_t svga_id;
|
2012-11-20 12:19:35 +00:00
|
|
|
enum vmw_res_type i;
|
2013-10-24 08:49:26 +00:00
|
|
|
bool refuse_dma = false;
|
2016-04-28 02:11:18 +00:00
|
|
|
char host_log[100] = {0};
|
2009-12-10 00:19:58 +00:00
|
|
|
|
|
|
|
dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
|
2016-11-08 12:00:31 +00:00
|
|
|
if (unlikely(!dev_priv)) {
|
2009-12-10 00:19:58 +00:00
|
|
|
DRM_ERROR("Failed allocating a device private struct.\n");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2011-12-19 11:15:29 +00:00
|
|
|
pci_set_master(dev->pdev);
|
|
|
|
|
2009-12-10 00:19:58 +00:00
|
|
|
dev_priv->dev = dev;
|
|
|
|
dev_priv->vmw_chipset = chipset;
|
2011-09-01 20:18:42 +00:00
|
|
|
dev_priv->last_read_seqno = (uint32_t) -100;
|
2009-12-10 00:19:58 +00:00
|
|
|
mutex_init(&dev_priv->cmdbuf_mutex);
|
2010-10-01 08:21:48 +00:00
|
|
|
mutex_init(&dev_priv->release_mutex);
|
2013-10-08 09:32:36 +00:00
|
|
|
mutex_init(&dev_priv->binding_mutex);
|
2016-05-03 09:24:35 +00:00
|
|
|
mutex_init(&dev_priv->global_kms_state_mutex);
|
2014-02-27 11:34:51 +00:00
|
|
|
ttm_lock_init(&dev_priv->reservation_sem);
|
2018-09-26 13:41:52 +00:00
|
|
|
spin_lock_init(&dev_priv->resource_lock);
|
2015-01-14 10:33:39 +00:00
|
|
|
spin_lock_init(&dev_priv->hw_lock);
|
|
|
|
spin_lock_init(&dev_priv->waiter_lock);
|
|
|
|
spin_lock_init(&dev_priv->cap_lock);
|
2015-06-25 17:47:43 +00:00
|
|
|
spin_lock_init(&dev_priv->svga_lock);
|
2017-03-23 18:28:11 +00:00
|
|
|
spin_lock_init(&dev_priv->cursor_lock);
|
2012-11-20 12:19:35 +00:00
|
|
|
|
|
|
|
for (i = vmw_res_context; i < vmw_res_max; ++i) {
|
|
|
|
idr_init(&dev_priv->res_idr[i]);
|
|
|
|
INIT_LIST_HEAD(&dev_priv->res_lru[i]);
|
|
|
|
}
|
|
|
|
|
2009-12-10 00:19:58 +00:00
|
|
|
init_waitqueue_head(&dev_priv->fence_queue);
|
|
|
|
init_waitqueue_head(&dev_priv->fifo_queue);
|
2011-09-01 20:18:43 +00:00
|
|
|
dev_priv->fence_queue_waiters = 0;
|
2015-10-28 18:07:35 +00:00
|
|
|
dev_priv->fifo_queue_waiters = 0;
|
2012-11-20 12:19:35 +00:00
|
|
|
|
2011-10-04 18:13:33 +00:00
|
|
|
dev_priv->used_memory_size = 0;
|
2009-12-10 00:19:58 +00:00
|
|
|
|
|
|
|
dev_priv->io_start = pci_resource_start(dev->pdev, 0);
|
|
|
|
dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
|
|
|
|
dev_priv->mmio_start = pci_resource_start(dev->pdev, 2);
|
|
|
|
|
2016-06-29 19:15:48 +00:00
|
|
|
dev_priv->assume_16bpp = !!vmw_assume_16bpp;
|
|
|
|
|
2010-10-01 08:21:48 +00:00
|
|
|
dev_priv->enable_fb = enable_fbdev;
|
|
|
|
|
2010-01-30 03:38:07 +00:00
|
|
|
vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
|
|
|
|
svga_id = vmw_read(dev_priv, SVGA_REG_ID);
|
|
|
|
if (svga_id != SVGA_ID_2) {
|
|
|
|
ret = -ENOSYS;
|
2012-02-05 13:50:36 +00:00
|
|
|
DRM_ERROR("Unsupported SVGA ID 0x%x\n", svga_id);
|
2010-01-30 03:38:07 +00:00
|
|
|
goto out_err0;
|
|
|
|
}
|
|
|
|
|
2009-12-10 00:19:58 +00:00
|
|
|
dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
|
2018-06-18 23:44:48 +00:00
|
|
|
|
|
|
|
if (dev_priv->capabilities & SVGA_CAP_CAP2_REGISTER) {
|
|
|
|
dev_priv->capabilities2 = vmw_read(dev_priv, SVGA_REG_CAP2);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-10-24 08:49:26 +00:00
|
|
|
ret = vmw_dma_select_mode(dev_priv);
|
|
|
|
if (unlikely(ret != 0)) {
|
|
|
|
DRM_INFO("Restricting capabilities due to IOMMU setup.\n");
|
|
|
|
refuse_dma = true;
|
|
|
|
}
|
2009-12-10 00:19:58 +00:00
|
|
|
|
2011-10-04 18:13:33 +00:00
|
|
|
dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
|
|
|
|
dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
|
|
|
|
dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH);
|
|
|
|
dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT);
|
2012-02-09 15:56:46 +00:00
|
|
|
|
|
|
|
vmw_get_initial_size(dev_priv);
|
|
|
|
|
2014-01-15 19:19:53 +00:00
|
|
|
if (dev_priv->capabilities & SVGA_CAP_GMR2) {
|
2009-12-10 00:19:58 +00:00
|
|
|
dev_priv->max_gmr_ids =
|
|
|
|
vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS);
|
2011-08-31 07:42:53 +00:00
|
|
|
dev_priv->max_gmr_pages =
|
|
|
|
vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES);
|
|
|
|
dev_priv->memory_size =
|
|
|
|
vmw_read(dev_priv, SVGA_REG_MEMORY_SIZE);
|
2011-10-04 18:13:33 +00:00
|
|
|
dev_priv->memory_size -= dev_priv->vram_size;
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* An arbitrary limit of 512MiB on surface
|
|
|
|
* memory. But all HWV8 hardware supports GMR2.
|
|
|
|
*/
|
|
|
|
dev_priv->memory_size = 512*1024*1024;
|
2011-08-31 07:42:53 +00:00
|
|
|
}
|
2012-11-21 10:06:22 +00:00
|
|
|
dev_priv->max_mob_pages = 0;
|
2014-02-12 11:07:38 +00:00
|
|
|
dev_priv->max_mob_size = 0;
|
2012-11-21 10:06:22 +00:00
|
|
|
if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
|
|
|
|
uint64_t mem_size =
|
|
|
|
vmw_read(dev_priv,
|
|
|
|
SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB);
|
|
|
|
|
drm/vmwgfx: Work around mode set failure in 2D VMs
In a low-memory 2D VM, fbdev can take up a large percentage of
available memory, making them unavailable for other DRM clients.
Since we do not take fbdev into account when filtering modes,
we end up claiming to support more modes than we actually do.
As a result, users get a black screen when setting a mode too
large for current available memory. In a low-memory VM
configuration, users can get a black screen for a mode as low
as 1024x768.
The current mode filtering mechanism keys off of
SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB, i.e. the maximum amount
of surface memory we have. Since this value is a performance
suggestion, not a hard limit, and since there should not be much
of a performance impact for a 2D VM, rather than filtering out
more modes, we will just allow ourselves to exceed the SVGA's
performance suggestion.
Also changed assumed bpp to 32 from 16 to make sure we can
actually support all the modes listed.
Signed-off-by: Sinclair Yeh <syeh@vmware.com>
Reviewed-by: Thomas Hellstrom <thellstrom@vmware.com>
Cc: <stable@vger.kernel.org>
2016-06-29 18:29:47 +00:00
|
|
|
/*
|
|
|
|
* Workaround for low memory 2D VMs to compensate for the
|
|
|
|
* allocation taken by fbdev
|
|
|
|
*/
|
|
|
|
if (!(dev_priv->capabilities & SVGA_CAP_3D))
|
2017-11-01 17:47:05 +00:00
|
|
|
mem_size *= 3;
|
drm/vmwgfx: Work around mode set failure in 2D VMs
In a low-memory 2D VM, fbdev can take up a large percentage of
available memory, making them unavailable for other DRM clients.
Since we do not take fbdev into account when filtering modes,
we end up claiming to support more modes than we actually do.
As a result, users get a black screen when setting a mode too
large for current available memory. In a low-memory VM
configuration, users can get a black screen for a mode as low
as 1024x768.
The current mode filtering mechanism keys off of
SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB, i.e. the maximum amount
of surface memory we have. Since this value is a performance
suggestion, not a hard limit, and since there should not be much
of a performance impact for a 2D VM, rather than filtering out
more modes, we will just allow ourselves to exceed the SVGA's
performance suggestion.
Also changed assumed bpp to 32 from 16 to make sure we can
actually support all the modes listed.
Signed-off-by: Sinclair Yeh <syeh@vmware.com>
Reviewed-by: Thomas Hellstrom <thellstrom@vmware.com>
Cc: <stable@vger.kernel.org>
2016-06-29 18:29:47 +00:00
|
|
|
|
2012-11-21 10:06:22 +00:00
|
|
|
dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE;
|
2012-11-21 10:09:56 +00:00
|
|
|
dev_priv->prim_bb_mem =
|
|
|
|
vmw_read(dev_priv,
|
|
|
|
SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM);
|
2014-02-12 11:07:38 +00:00
|
|
|
dev_priv->max_mob_size =
|
|
|
|
vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
|
2015-06-26 08:42:06 +00:00
|
|
|
dev_priv->stdu_max_width =
|
|
|
|
vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_WIDTH);
|
|
|
|
dev_priv->stdu_max_height =
|
|
|
|
vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_HEIGHT);
|
|
|
|
|
|
|
|
vmw_write(dev_priv, SVGA_REG_DEV_CAP,
|
|
|
|
SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH);
|
|
|
|
dev_priv->texture_max_width = vmw_read(dev_priv,
|
|
|
|
SVGA_REG_DEV_CAP);
|
|
|
|
vmw_write(dev_priv, SVGA_REG_DEV_CAP,
|
|
|
|
SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT);
|
|
|
|
dev_priv->texture_max_height = vmw_read(dev_priv,
|
|
|
|
SVGA_REG_DEV_CAP);
|
2015-08-12 16:30:09 +00:00
|
|
|
} else {
|
|
|
|
dev_priv->texture_max_width = 8192;
|
|
|
|
dev_priv->texture_max_height = 8192;
|
2012-11-21 10:09:56 +00:00
|
|
|
dev_priv->prim_bb_mem = dev_priv->vram_size;
|
2015-08-12 16:30:09 +00:00
|
|
|
}
|
|
|
|
|
2015-06-26 08:42:06 +00:00
|
|
|
vmw_print_capabilities(dev_priv->capabilities);
|
2018-06-18 23:44:48 +00:00
|
|
|
if (dev_priv->capabilities & SVGA_CAP_CAP2_REGISTER)
|
|
|
|
vmw_print_capabilities2(dev_priv->capabilities2);
|
2009-12-10 00:19:58 +00:00
|
|
|
|
2014-01-15 19:19:53 +00:00
|
|
|
ret = vmw_dma_masks(dev_priv);
|
2015-01-14 10:33:39 +00:00
|
|
|
if (unlikely(ret != 0))
|
2014-01-15 19:19:53 +00:00
|
|
|
goto out_err0;
|
|
|
|
|
2019-06-03 20:44:15 +00:00
|
|
|
dma_set_max_seg_size(dev->dev, min_t(unsigned int, U32_MAX & PAGE_MASK,
|
|
|
|
SCATTERLIST_MAX_SEGMENT));
|
|
|
|
|
2014-01-15 19:19:53 +00:00
|
|
|
if (dev_priv->capabilities & SVGA_CAP_GMR2) {
|
2009-12-10 00:19:58 +00:00
|
|
|
DRM_INFO("Max GMR ids is %u\n",
|
|
|
|
(unsigned)dev_priv->max_gmr_ids);
|
2011-08-31 07:42:53 +00:00
|
|
|
DRM_INFO("Max number of GMR pages is %u\n",
|
|
|
|
(unsigned)dev_priv->max_gmr_pages);
|
2011-10-04 18:13:33 +00:00
|
|
|
DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n",
|
|
|
|
(unsigned)dev_priv->memory_size / 1024);
|
2011-08-31 07:42:53 +00:00
|
|
|
}
|
2012-11-21 09:32:36 +00:00
|
|
|
DRM_INFO("Maximum display memory size is %u kiB\n",
|
|
|
|
dev_priv->prim_bb_mem / 1024);
|
2009-12-10 00:19:58 +00:00
|
|
|
DRM_INFO("VRAM at 0x%08x size is %u kiB\n",
|
|
|
|
dev_priv->vram_start, dev_priv->vram_size / 1024);
|
|
|
|
DRM_INFO("MMIO at 0x%08x size is %u kiB\n",
|
|
|
|
dev_priv->mmio_start, dev_priv->mmio_size / 1024);
|
|
|
|
|
2015-10-28 09:44:04 +00:00
|
|
|
dev_priv->mmio_virt = memremap(dev_priv->mmio_start,
|
|
|
|
dev_priv->mmio_size, MEMREMAP_WB);
|
2009-12-10 00:19:58 +00:00
|
|
|
|
|
|
|
if (unlikely(dev_priv->mmio_virt == NULL)) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
DRM_ERROR("Failed mapping MMIO.\n");
|
2018-10-19 14:55:26 +00:00
|
|
|
goto out_err0;
|
2009-12-10 00:19:58 +00:00
|
|
|
}
|
|
|
|
|
2010-05-28 09:21:59 +00:00
|
|
|
/* Need mmio memory to check for fifo pitchlock cap. */
|
|
|
|
if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
|
|
|
|
!(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) &&
|
|
|
|
!vmw_fifo_have_pitchlock(dev_priv)) {
|
|
|
|
ret = -ENOSYS;
|
|
|
|
DRM_ERROR("Hardware has no pitchlock\n");
|
|
|
|
goto out_err4;
|
|
|
|
}
|
|
|
|
|
2018-10-19 11:49:05 +00:00
|
|
|
dev_priv->tdev = ttm_object_device_init(&ttm_mem_glob, 12,
|
|
|
|
&vmw_prime_dmabuf_ops);
|
2009-12-10 00:19:58 +00:00
|
|
|
|
|
|
|
if (unlikely(dev_priv->tdev == NULL)) {
|
|
|
|
DRM_ERROR("Unable to initialize TTM object management.\n");
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto out_err4;
|
|
|
|
}
|
|
|
|
|
|
|
|
dev->dev_private = dev_priv;
|
|
|
|
|
|
|
|
ret = pci_request_regions(dev->pdev, "vmwgfx probe");
|
|
|
|
dev_priv->stealth = (ret != 0);
|
|
|
|
if (dev_priv->stealth) {
|
|
|
|
/**
|
|
|
|
* Request at least the mmio PCI resource.
|
|
|
|
*/
|
|
|
|
|
|
|
|
DRM_INFO("It appears like vesafb is loaded. "
|
2010-02-15 14:45:22 +00:00
|
|
|
"Ignore above error if any.\n");
|
2009-12-10 00:19:58 +00:00
|
|
|
ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe");
|
|
|
|
if (unlikely(ret != 0)) {
|
|
|
|
DRM_ERROR("Failed reserving the SVGA MMIO resource.\n");
|
|
|
|
goto out_no_device;
|
|
|
|
}
|
|
|
|
}
|
2011-09-01 20:18:44 +00:00
|
|
|
|
2012-11-09 12:26:14 +00:00
|
|
|
if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
|
2017-08-24 06:06:27 +00:00
|
|
|
ret = vmw_irq_install(dev, dev->pdev->irq);
|
2012-11-09 12:26:14 +00:00
|
|
|
if (ret != 0) {
|
|
|
|
DRM_ERROR("Failed installing irq: %d\n", ret);
|
|
|
|
goto out_no_irq;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-09-01 20:18:44 +00:00
|
|
|
dev_priv->fman = vmw_fence_manager_init(dev_priv);
|
2013-08-26 07:15:37 +00:00
|
|
|
if (unlikely(dev_priv->fman == NULL)) {
|
|
|
|
ret = -ENOMEM;
|
2011-09-01 20:18:44 +00:00
|
|
|
goto out_no_fman;
|
2013-08-26 07:15:37 +00:00
|
|
|
}
|
2011-10-04 18:13:22 +00:00
|
|
|
|
2019-09-05 07:05:08 +00:00
|
|
|
drm_vma_offset_manager_init(&dev_priv->vma_manager,
|
|
|
|
DRM_FILE_PAGE_OFFSET_START,
|
|
|
|
DRM_FILE_PAGE_OFFSET_SIZE);
|
2015-06-25 17:47:43 +00:00
|
|
|
ret = ttm_bo_device_init(&dev_priv->bdev,
|
|
|
|
&vmw_bo_driver,
|
|
|
|
dev->anon_inode->i_mapping,
|
2019-09-05 07:05:08 +00:00
|
|
|
&dev_priv->vma_manager,
|
2015-06-25 17:47:43 +00:00
|
|
|
false);
|
|
|
|
if (unlikely(ret != 0)) {
|
|
|
|
DRM_ERROR("Failed initializing TTM buffer object driver.\n");
|
|
|
|
goto out_no_bdev;
|
|
|
|
}
|
2015-03-05 10:33:24 +00:00
|
|
|
|
2015-06-25 17:47:43 +00:00
|
|
|
/*
|
|
|
|
* Enable VRAM, but initially don't use it until SVGA is enabled and
|
|
|
|
* unhidden.
|
|
|
|
*/
|
2015-03-05 10:33:24 +00:00
|
|
|
ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM,
|
|
|
|
(dev_priv->vram_size >> PAGE_SHIFT));
|
|
|
|
if (unlikely(ret != 0)) {
|
|
|
|
DRM_ERROR("Failed initializing memory manager for VRAM.\n");
|
|
|
|
goto out_no_vram;
|
|
|
|
}
|
2015-06-25 17:47:43 +00:00
|
|
|
dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
|
2015-03-05 10:33:24 +00:00
|
|
|
|
|
|
|
dev_priv->has_gmr = true;
|
|
|
|
if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) ||
|
|
|
|
refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
|
|
|
|
VMW_PL_GMR) != 0) {
|
|
|
|
DRM_INFO("No GMR memory available. "
|
|
|
|
"Graphics memory resources are very limited.\n");
|
|
|
|
dev_priv->has_gmr = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
|
|
|
|
dev_priv->has_mob = true;
|
|
|
|
if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB,
|
|
|
|
VMW_PL_MOB) != 0) {
|
|
|
|
DRM_INFO("No MOB memory available. "
|
|
|
|
"3D will be disabled.\n");
|
|
|
|
dev_priv->has_mob = false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-08-10 17:39:35 +00:00
|
|
|
if (dev_priv->has_mob) {
|
|
|
|
spin_lock(&dev_priv->cap_lock);
|
2018-06-13 20:53:28 +00:00
|
|
|
vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_DXCONTEXT);
|
2015-08-10 17:39:35 +00:00
|
|
|
dev_priv->has_dx = !!vmw_read(dev_priv, SVGA_REG_DEV_CAP);
|
|
|
|
spin_unlock(&dev_priv->cap_lock);
|
|
|
|
}
|
|
|
|
|
2018-12-12 10:52:08 +00:00
|
|
|
vmw_validation_mem_init_ttm(dev_priv, VMWGFX_VALIDATION_MEM_GRAN);
|
2010-10-01 08:21:49 +00:00
|
|
|
ret = vmw_kms_init(dev_priv);
|
|
|
|
if (unlikely(ret != 0))
|
|
|
|
goto out_no_kms;
|
2010-02-15 14:45:22 +00:00
|
|
|
vmw_overlay_init(dev_priv);
|
2011-10-04 18:13:22 +00:00
|
|
|
|
2015-06-25 17:47:43 +00:00
|
|
|
ret = vmw_request_device(dev_priv);
|
|
|
|
if (ret)
|
|
|
|
goto out_no_fifo;
|
|
|
|
|
2018-06-20 20:52:32 +00:00
|
|
|
if (dev_priv->has_dx) {
|
|
|
|
/*
|
|
|
|
* SVGA_CAP2_DX2 (DefineGBSurface_v3) is needed for SM4_1
|
|
|
|
* support
|
|
|
|
*/
|
|
|
|
if ((dev_priv->capabilities2 & SVGA_CAP2_DX2) != 0) {
|
|
|
|
vmw_write(dev_priv, SVGA_REG_DEV_CAP,
|
|
|
|
SVGA3D_DEVCAP_SM41);
|
|
|
|
dev_priv->has_sm4_1 = vmw_read(dev_priv,
|
|
|
|
SVGA_REG_DEV_CAP);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-08-10 17:39:35 +00:00
|
|
|
DRM_INFO("DX: %s\n", dev_priv->has_dx ? "yes." : "no.");
|
2018-06-20 20:52:32 +00:00
|
|
|
DRM_INFO("Atomic: %s\n", (dev->driver->driver_features & DRIVER_ATOMIC)
|
|
|
|
? "yes." : "no.");
|
|
|
|
DRM_INFO("SM4_1: %s\n", dev_priv->has_sm4_1 ? "yes." : "no.");
|
2015-08-10 17:39:35 +00:00
|
|
|
|
2016-04-28 02:11:18 +00:00
|
|
|
snprintf(host_log, sizeof(host_log), "vmwgfx: %s-%s",
|
|
|
|
VMWGFX_REPO, VMWGFX_GIT_VERSION);
|
|
|
|
vmw_host_log(host_log);
|
|
|
|
|
|
|
|
memset(host_log, 0, sizeof(host_log));
|
|
|
|
snprintf(host_log, sizeof(host_log), "vmwgfx: Module Version: %d.%d.%d",
|
|
|
|
VMWGFX_DRIVER_MAJOR, VMWGFX_DRIVER_MINOR,
|
|
|
|
VMWGFX_DRIVER_PATCHLEVEL);
|
|
|
|
vmw_host_log(host_log);
|
|
|
|
|
2010-10-01 08:21:48 +00:00
|
|
|
if (dev_priv->enable_fb) {
|
2015-06-25 17:47:43 +00:00
|
|
|
vmw_fifo_resource_inc(dev_priv);
|
|
|
|
vmw_svga_enable(dev_priv);
|
2010-10-01 08:21:48 +00:00
|
|
|
vmw_fb_init(dev_priv);
|
2010-10-01 08:21:49 +00:00
|
|
|
}
|
|
|
|
|
2010-01-13 21:28:43 +00:00
|
|
|
dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
|
|
|
|
register_pm_notifier(&dev_priv->pm_nb);
|
|
|
|
|
2009-12-10 00:19:58 +00:00
|
|
|
return 0;
|
|
|
|
|
2012-11-09 12:26:14 +00:00
|
|
|
out_no_fifo:
|
2011-10-04 18:13:22 +00:00
|
|
|
vmw_overlay_close(dev_priv);
|
|
|
|
vmw_kms_close(dev_priv);
|
|
|
|
out_no_kms:
|
2015-03-05 10:33:24 +00:00
|
|
|
if (dev_priv->has_mob)
|
|
|
|
(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
|
|
|
|
if (dev_priv->has_gmr)
|
|
|
|
(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
|
|
|
|
(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
|
|
|
|
out_no_vram:
|
2015-06-25 17:47:43 +00:00
|
|
|
(void)ttm_bo_device_release(&dev_priv->bdev);
|
|
|
|
out_no_bdev:
|
2011-09-01 20:18:44 +00:00
|
|
|
vmw_fence_manager_takedown(dev_priv->fman);
|
|
|
|
out_no_fman:
|
2012-11-09 12:26:14 +00:00
|
|
|
if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
|
2017-08-24 06:06:27 +00:00
|
|
|
vmw_irq_uninstall(dev_priv->dev);
|
2012-11-09 12:26:14 +00:00
|
|
|
out_no_irq:
|
2010-10-01 08:21:48 +00:00
|
|
|
if (dev_priv->stealth)
|
|
|
|
pci_release_region(dev->pdev, 2);
|
|
|
|
else
|
|
|
|
pci_release_regions(dev->pdev);
|
2009-12-10 00:19:58 +00:00
|
|
|
out_no_device:
|
|
|
|
ttm_object_device_release(&dev_priv->tdev);
|
|
|
|
out_err4:
|
2015-10-28 09:44:04 +00:00
|
|
|
memunmap(dev_priv->mmio_virt);
|
2009-12-10 00:19:58 +00:00
|
|
|
out_err0:
|
2012-11-20 12:19:35 +00:00
|
|
|
for (i = vmw_res_context; i < vmw_res_max; ++i)
|
|
|
|
idr_destroy(&dev_priv->res_idr[i]);
|
|
|
|
|
2015-08-10 17:39:35 +00:00
|
|
|
if (dev_priv->ctx.staged_bindings)
|
|
|
|
vmw_binding_state_free(dev_priv->ctx.staged_bindings);
|
2009-12-10 00:19:58 +00:00
|
|
|
kfree(dev_priv);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2017-01-06 17:57:31 +00:00
|
|
|
static void vmw_driver_unload(struct drm_device *dev)
|
2009-12-10 00:19:58 +00:00
|
|
|
{
|
|
|
|
struct vmw_private *dev_priv = vmw_priv(dev);
|
2012-11-20 12:19:35 +00:00
|
|
|
enum vmw_res_type i;
|
2009-12-10 00:19:58 +00:00
|
|
|
|
2010-01-13 21:28:43 +00:00
|
|
|
unregister_pm_notifier(&dev_priv->pm_nb);
|
|
|
|
|
2012-11-20 12:19:35 +00:00
|
|
|
if (dev_priv->ctx.res_ht_initialized)
|
|
|
|
drm_ht_remove(&dev_priv->ctx.res_ht);
|
2014-11-19 16:50:19 +00:00
|
|
|
vfree(dev_priv->ctx.cmd_bounce);
|
2010-10-01 08:21:48 +00:00
|
|
|
if (dev_priv->enable_fb) {
|
2015-08-12 05:53:39 +00:00
|
|
|
vmw_fb_off(dev_priv);
|
2010-10-01 08:21:48 +00:00
|
|
|
vmw_fb_close(dev_priv);
|
2015-06-25 17:47:43 +00:00
|
|
|
vmw_fifo_resource_dec(dev_priv);
|
|
|
|
vmw_svga_disable(dev_priv);
|
2010-10-01 08:21:48 +00:00
|
|
|
}
|
2015-06-25 17:47:43 +00:00
|
|
|
|
2010-02-15 14:45:22 +00:00
|
|
|
vmw_kms_close(dev_priv);
|
|
|
|
vmw_overlay_close(dev_priv);
|
2015-03-05 10:33:24 +00:00
|
|
|
|
|
|
|
if (dev_priv->has_gmr)
|
|
|
|
(void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
|
|
|
|
(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
|
|
|
|
|
2015-06-25 17:47:43 +00:00
|
|
|
vmw_release_device_early(dev_priv);
|
|
|
|
if (dev_priv->has_mob)
|
|
|
|
(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
|
|
|
|
(void) ttm_bo_device_release(&dev_priv->bdev);
|
2019-09-05 07:05:08 +00:00
|
|
|
drm_vma_offset_manager_destroy(&dev_priv->vma_manager);
|
2015-06-25 17:47:43 +00:00
|
|
|
vmw_release_device_late(dev_priv);
|
2011-09-01 20:18:44 +00:00
|
|
|
vmw_fence_manager_takedown(dev_priv->fman);
|
2012-11-09 12:26:14 +00:00
|
|
|
if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
|
2017-08-24 06:06:27 +00:00
|
|
|
vmw_irq_uninstall(dev_priv->dev);
|
2010-02-15 14:45:22 +00:00
|
|
|
if (dev_priv->stealth)
|
2009-12-10 00:19:58 +00:00
|
|
|
pci_release_region(dev->pdev, 2);
|
2010-02-15 14:45:22 +00:00
|
|
|
else
|
|
|
|
pci_release_regions(dev->pdev);
|
|
|
|
|
2009-12-10 00:19:58 +00:00
|
|
|
ttm_object_device_release(&dev_priv->tdev);
|
2015-10-28 09:44:04 +00:00
|
|
|
memunmap(dev_priv->mmio_virt);
|
2015-08-10 17:39:35 +00:00
|
|
|
if (dev_priv->ctx.staged_bindings)
|
|
|
|
vmw_binding_state_free(dev_priv->ctx.staged_bindings);
|
2012-11-20 12:19:35 +00:00
|
|
|
|
|
|
|
for (i = vmw_res_context; i < vmw_res_max; ++i)
|
|
|
|
idr_destroy(&dev_priv->res_idr[i]);
|
2009-12-10 00:19:58 +00:00
|
|
|
|
|
|
|
kfree(dev_priv);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void vmw_postclose(struct drm_device *dev,
|
|
|
|
struct drm_file *file_priv)
|
|
|
|
{
|
2019-05-28 06:08:55 +00:00
|
|
|
struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
|
2013-10-09 08:42:51 +00:00
|
|
|
|
|
|
|
ttm_object_file_release(&vmw_fp->tfile);
|
2009-12-10 00:19:58 +00:00
|
|
|
kfree(vmw_fp);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
|
|
|
|
{
|
|
|
|
struct vmw_private *dev_priv = vmw_priv(dev);
|
|
|
|
struct vmw_fpriv *vmw_fp;
|
|
|
|
int ret = -ENOMEM;
|
|
|
|
|
|
|
|
vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL);
|
2016-11-08 12:00:31 +00:00
|
|
|
if (unlikely(!vmw_fp))
|
2009-12-10 00:19:58 +00:00
|
|
|
return ret;
|
|
|
|
|
|
|
|
vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10);
|
|
|
|
if (unlikely(vmw_fp->tfile == NULL))
|
|
|
|
goto out_no_tfile;
|
|
|
|
|
|
|
|
file_priv->driver_priv = vmw_fp;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
out_no_tfile:
|
|
|
|
kfree(vmw_fp);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2014-02-27 11:56:08 +00:00
|
|
|
static long vmw_generic_ioctl(struct file *filp, unsigned int cmd,
|
|
|
|
unsigned long arg,
|
|
|
|
long (*ioctl_func)(struct file *, unsigned int,
|
|
|
|
unsigned long))
|
2009-12-10 00:19:58 +00:00
|
|
|
{
|
|
|
|
struct drm_file *file_priv = filp->private_data;
|
|
|
|
struct drm_device *dev = file_priv->minor->dev;
|
|
|
|
unsigned int nr = DRM_IOCTL_NR(cmd);
|
2014-02-27 11:56:08 +00:00
|
|
|
unsigned int flags;
|
2009-12-10 00:19:58 +00:00
|
|
|
|
|
|
|
/*
|
2009-12-08 11:57:51 +00:00
|
|
|
* Do extra checking on driver private ioctls.
|
2009-12-10 00:19:58 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
|
|
|
|
&& (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
|
2013-08-02 17:27:49 +00:00
|
|
|
const struct drm_ioctl_desc *ioctl =
|
2014-02-27 11:56:08 +00:00
|
|
|
&vmw_ioctls[nr - DRM_COMMAND_BASE];
|
2009-12-10 00:19:58 +00:00
|
|
|
|
2015-08-10 17:39:35 +00:00
|
|
|
if (nr == DRM_COMMAND_BASE + DRM_VMW_EXECBUF) {
|
2019-05-22 16:41:17 +00:00
|
|
|
return ioctl_func(filp, cmd, arg);
|
2017-02-21 10:42:27 +00:00
|
|
|
} else if (nr == DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT) {
|
|
|
|
if (!drm_is_current_master(file_priv) &&
|
|
|
|
!capable(CAP_SYS_ADMIN))
|
|
|
|
return -EACCES;
|
2009-12-10 00:19:58 +00:00
|
|
|
}
|
2015-08-10 17:39:35 +00:00
|
|
|
|
|
|
|
if (unlikely(ioctl->cmd != cmd))
|
|
|
|
goto out_io_encoding;
|
|
|
|
|
2014-02-27 11:56:08 +00:00
|
|
|
flags = ioctl->flags;
|
|
|
|
} else if (!drm_ioctl_flags(nr, &flags))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2019-05-28 06:08:55 +00:00
|
|
|
return ioctl_func(filp, cmd, arg);
|
2015-08-10 17:39:35 +00:00
|
|
|
|
|
|
|
out_io_encoding:
|
|
|
|
DRM_ERROR("Invalid command format, ioctl %d\n",
|
|
|
|
nr - DRM_COMMAND_BASE);
|
|
|
|
|
|
|
|
return -EINVAL;
|
2014-02-27 11:56:08 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
|
|
|
|
unsigned long arg)
|
|
|
|
{
|
|
|
|
return vmw_generic_ioctl(filp, cmd, arg, &drm_ioctl);
|
2009-12-10 00:19:58 +00:00
|
|
|
}
|
|
|
|
|
2014-02-27 11:56:08 +00:00
|
|
|
#ifdef CONFIG_COMPAT
|
|
|
|
static long vmw_compat_ioctl(struct file *filp, unsigned int cmd,
|
|
|
|
unsigned long arg)
|
|
|
|
{
|
|
|
|
return vmw_generic_ioctl(filp, cmd, arg, &drm_compat_ioctl);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2009-12-10 00:19:58 +00:00
|
|
|
static int vmw_master_set(struct drm_device *dev,
|
|
|
|
struct drm_file *file_priv,
|
|
|
|
bool from_open)
|
|
|
|
{
|
2019-05-07 09:07:53 +00:00
|
|
|
/*
|
|
|
|
* Inform a new master that the layout may have changed while
|
|
|
|
* it was gone.
|
|
|
|
*/
|
|
|
|
if (!from_open)
|
|
|
|
drm_sysfs_hotplug_event(dev);
|
2009-12-10 00:19:58 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void vmw_master_drop(struct drm_device *dev,
|
2016-06-21 12:20:38 +00:00
|
|
|
struct drm_file *file_priv)
|
2009-12-10 00:19:58 +00:00
|
|
|
{
|
|
|
|
struct vmw_private *dev_priv = vmw_priv(dev);
|
|
|
|
|
2015-11-26 18:45:16 +00:00
|
|
|
vmw_kms_legacy_hotspot_clear(dev_priv);
|
2015-06-25 17:47:43 +00:00
|
|
|
if (!dev_priv->enable_fb)
|
|
|
|
vmw_svga_disable(dev_priv);
|
2009-12-10 00:19:58 +00:00
|
|
|
}
|
|
|
|
|
2015-06-25 17:47:43 +00:00
|
|
|
/**
|
|
|
|
* __vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM.
|
|
|
|
*
|
|
|
|
* @dev_priv: Pointer to device private struct.
|
|
|
|
* Needs the reservation sem to be held in non-exclusive mode.
|
|
|
|
*/
|
2015-04-02 09:39:45 +00:00
|
|
|
static void __vmw_svga_enable(struct vmw_private *dev_priv)
|
2015-06-25 17:47:43 +00:00
|
|
|
{
|
|
|
|
spin_lock(&dev_priv->svga_lock);
|
|
|
|
if (!dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
|
|
|
|
vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE);
|
|
|
|
dev_priv->bdev.man[TTM_PL_VRAM].use_type = true;
|
|
|
|
}
|
|
|
|
spin_unlock(&dev_priv->svga_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM.
|
|
|
|
*
|
|
|
|
* @dev_priv: Pointer to device private struct.
|
|
|
|
*/
|
|
|
|
void vmw_svga_enable(struct vmw_private *dev_priv)
|
|
|
|
{
|
2017-01-19 18:57:00 +00:00
|
|
|
(void) ttm_read_lock(&dev_priv->reservation_sem, false);
|
2015-06-25 17:47:43 +00:00
|
|
|
__vmw_svga_enable(dev_priv);
|
|
|
|
ttm_read_unlock(&dev_priv->reservation_sem);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* __vmw_svga_disable - Disable SVGA mode and use of VRAM.
|
|
|
|
*
|
|
|
|
* @dev_priv: Pointer to device private struct.
|
|
|
|
* Needs the reservation sem to be held in exclusive mode.
|
|
|
|
* Will not empty VRAM. VRAM must be emptied by caller.
|
|
|
|
*/
|
2015-04-02 09:39:45 +00:00
|
|
|
static void __vmw_svga_disable(struct vmw_private *dev_priv)
|
2015-06-25 17:47:43 +00:00
|
|
|
{
|
|
|
|
spin_lock(&dev_priv->svga_lock);
|
|
|
|
if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
|
|
|
|
dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
|
|
|
|
vmw_write(dev_priv, SVGA_REG_ENABLE,
|
2015-07-09 04:20:39 +00:00
|
|
|
SVGA_REG_ENABLE_HIDE |
|
|
|
|
SVGA_REG_ENABLE_ENABLE);
|
2015-06-25 17:47:43 +00:00
|
|
|
}
|
|
|
|
spin_unlock(&dev_priv->svga_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* vmw_svga_disable - Disable SVGA_MODE, and use of VRAM. Keep the fifo
|
|
|
|
* running.
|
|
|
|
*
|
|
|
|
* @dev_priv: Pointer to device private struct.
|
|
|
|
* Will empty VRAM.
|
|
|
|
*/
|
|
|
|
void vmw_svga_disable(struct vmw_private *dev_priv)
|
|
|
|
{
|
2018-03-08 09:07:37 +00:00
|
|
|
/*
|
|
|
|
* Disabling SVGA will turn off device modesetting capabilities, so
|
|
|
|
* notify KMS about that so that it doesn't cache atomic state that
|
|
|
|
* isn't valid anymore, for example crtcs turned on.
|
|
|
|
* Strictly we'd want to do this under the SVGA lock (or an SVGA mutex),
|
|
|
|
* but vmw_kms_lost_device() takes the reservation sem and thus we'll
|
|
|
|
* end up with lock order reversal. Thus, a master may actually perform
|
|
|
|
* a new modeset just after we call vmw_kms_lost_device() and race with
|
|
|
|
* vmw_svga_disable(), but that should at worst cause atomic KMS state
|
|
|
|
* to be inconsistent with the device, causing modesetting problems.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
vmw_kms_lost_device(dev_priv->dev);
|
2015-06-25 17:47:43 +00:00
|
|
|
ttm_write_lock(&dev_priv->reservation_sem, false);
|
|
|
|
spin_lock(&dev_priv->svga_lock);
|
|
|
|
if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
|
|
|
|
dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
|
|
|
|
spin_unlock(&dev_priv->svga_lock);
|
|
|
|
if (ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM))
|
|
|
|
DRM_ERROR("Failed evicting VRAM buffers.\n");
|
2015-07-09 04:20:39 +00:00
|
|
|
vmw_write(dev_priv, SVGA_REG_ENABLE,
|
|
|
|
SVGA_REG_ENABLE_HIDE |
|
|
|
|
SVGA_REG_ENABLE_ENABLE);
|
2015-06-25 17:47:43 +00:00
|
|
|
} else
|
|
|
|
spin_unlock(&dev_priv->svga_lock);
|
|
|
|
ttm_write_unlock(&dev_priv->reservation_sem);
|
|
|
|
}
|
2009-12-10 00:19:58 +00:00
|
|
|
|
|
|
|
static void vmw_remove(struct pci_dev *pdev)
|
|
|
|
{
|
|
|
|
struct drm_device *dev = pci_get_drvdata(pdev);
|
|
|
|
|
2015-03-10 18:07:40 +00:00
|
|
|
pci_disable_device(pdev);
|
2009-12-10 00:19:58 +00:00
|
|
|
drm_put_dev(dev);
|
|
|
|
}
|
|
|
|
|
2010-01-13 21:28:43 +00:00
|
|
|
static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
|
|
|
|
void *ptr)
|
|
|
|
{
|
|
|
|
struct vmw_private *dev_priv =
|
|
|
|
container_of(nb, struct vmw_private, pm_nb);
|
|
|
|
|
|
|
|
switch (val) {
|
|
|
|
case PM_HIBERNATION_PREPARE:
|
2015-06-25 17:47:43 +00:00
|
|
|
/*
|
2018-03-22 09:26:37 +00:00
|
|
|
* Take the reservation sem in write mode, which will make sure
|
|
|
|
* there are no other processes holding a buffer object
|
|
|
|
* reservation, meaning we should be able to evict all buffer
|
|
|
|
* objects if needed.
|
|
|
|
* Once user-space processes have been frozen, we can release
|
|
|
|
* the lock again.
|
2010-01-13 21:28:43 +00:00
|
|
|
*/
|
2018-03-22 09:26:37 +00:00
|
|
|
ttm_suspend_lock(&dev_priv->reservation_sem);
|
|
|
|
dev_priv->suspend_locked = true;
|
2010-01-13 21:28:43 +00:00
|
|
|
break;
|
|
|
|
case PM_POST_HIBERNATION:
|
2010-10-05 10:43:00 +00:00
|
|
|
case PM_POST_RESTORE:
|
2018-03-22 09:26:37 +00:00
|
|
|
if (READ_ONCE(dev_priv->suspend_locked)) {
|
|
|
|
dev_priv->suspend_locked = false;
|
|
|
|
ttm_suspend_unlock(&dev_priv->reservation_sem);
|
|
|
|
}
|
2010-01-13 21:28:43 +00:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2010-10-05 10:43:01 +00:00
|
|
|
static int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
|
2010-01-13 21:28:43 +00:00
|
|
|
{
|
2010-10-05 10:43:00 +00:00
|
|
|
struct drm_device *dev = pci_get_drvdata(pdev);
|
|
|
|
struct vmw_private *dev_priv = vmw_priv(dev);
|
|
|
|
|
2015-06-25 17:47:43 +00:00
|
|
|
if (dev_priv->refuse_hibernation)
|
2010-10-05 10:43:00 +00:00
|
|
|
return -EBUSY;
|
|
|
|
|
2010-01-13 21:28:43 +00:00
|
|
|
pci_save_state(pdev);
|
|
|
|
pci_disable_device(pdev);
|
|
|
|
pci_set_power_state(pdev, PCI_D3hot);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2010-10-05 10:43:01 +00:00
|
|
|
static int vmw_pci_resume(struct pci_dev *pdev)
|
2010-01-13 21:28:43 +00:00
|
|
|
{
|
|
|
|
pci_set_power_state(pdev, PCI_D0);
|
|
|
|
pci_restore_state(pdev);
|
|
|
|
return pci_enable_device(pdev);
|
|
|
|
}
|
|
|
|
|
2010-10-05 10:43:01 +00:00
|
|
|
static int vmw_pm_suspend(struct device *kdev)
|
|
|
|
{
|
|
|
|
struct pci_dev *pdev = to_pci_dev(kdev);
|
|
|
|
struct pm_message dummy;
|
|
|
|
|
|
|
|
dummy.event = 0;
|
|
|
|
|
|
|
|
return vmw_pci_suspend(pdev, dummy);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int vmw_pm_resume(struct device *kdev)
|
|
|
|
{
|
|
|
|
struct pci_dev *pdev = to_pci_dev(kdev);
|
|
|
|
|
|
|
|
return vmw_pci_resume(pdev);
|
|
|
|
}
|
|
|
|
|
2015-06-25 17:47:43 +00:00
|
|
|
static int vmw_pm_freeze(struct device *kdev)
|
2010-10-05 10:43:01 +00:00
|
|
|
{
|
|
|
|
struct pci_dev *pdev = to_pci_dev(kdev);
|
|
|
|
struct drm_device *dev = pci_get_drvdata(pdev);
|
|
|
|
struct vmw_private *dev_priv = vmw_priv(dev);
|
2018-03-22 09:26:37 +00:00
|
|
|
int ret;
|
2010-10-05 10:43:01 +00:00
|
|
|
|
2018-03-22 09:26:37 +00:00
|
|
|
/*
|
|
|
|
* Unlock for vmw_kms_suspend.
|
|
|
|
* No user-space processes should be running now.
|
|
|
|
*/
|
|
|
|
ttm_suspend_unlock(&dev_priv->reservation_sem);
|
|
|
|
ret = vmw_kms_suspend(dev_priv->dev);
|
|
|
|
if (ret) {
|
|
|
|
ttm_suspend_lock(&dev_priv->reservation_sem);
|
|
|
|
DRM_ERROR("Failed to freeze modesetting.\n");
|
|
|
|
return ret;
|
|
|
|
}
|
2010-10-05 10:43:01 +00:00
|
|
|
if (dev_priv->enable_fb)
|
2018-03-22 09:26:37 +00:00
|
|
|
vmw_fb_off(dev_priv);
|
2010-10-05 10:43:01 +00:00
|
|
|
|
2018-03-22 09:26:37 +00:00
|
|
|
ttm_suspend_lock(&dev_priv->reservation_sem);
|
|
|
|
vmw_execbuf_release_pinned_bo(dev_priv);
|
|
|
|
vmw_resource_evict_all(dev_priv);
|
|
|
|
vmw_release_device_early(dev_priv);
|
|
|
|
ttm_bo_swapout_all(&dev_priv->bdev);
|
|
|
|
if (dev_priv->enable_fb)
|
|
|
|
vmw_fifo_resource_dec(dev_priv);
|
2015-06-25 17:47:43 +00:00
|
|
|
if (atomic_read(&dev_priv->num_fifo_resources) != 0) {
|
|
|
|
DRM_ERROR("Can't hibernate while 3D resources are active.\n");
|
2010-10-05 10:43:01 +00:00
|
|
|
if (dev_priv->enable_fb)
|
2015-06-25 17:47:43 +00:00
|
|
|
vmw_fifo_resource_inc(dev_priv);
|
|
|
|
WARN_ON(vmw_request_device_late(dev_priv));
|
2018-03-22 09:26:37 +00:00
|
|
|
dev_priv->suspend_locked = false;
|
|
|
|
ttm_suspend_unlock(&dev_priv->reservation_sem);
|
|
|
|
if (dev_priv->suspend_state)
|
|
|
|
vmw_kms_resume(dev);
|
|
|
|
if (dev_priv->enable_fb)
|
|
|
|
vmw_fb_on(dev_priv);
|
2010-10-05 10:43:01 +00:00
|
|
|
return -EBUSY;
|
|
|
|
}
|
|
|
|
|
2018-03-22 09:26:37 +00:00
|
|
|
vmw_fence_fifo_down(dev_priv->fman);
|
|
|
|
__vmw_svga_disable(dev_priv);
|
2015-06-25 17:47:43 +00:00
|
|
|
|
|
|
|
vmw_release_device_late(dev_priv);
|
2010-10-05 10:43:01 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-06-25 17:47:43 +00:00
|
|
|
static int vmw_pm_restore(struct device *kdev)
|
2010-10-05 10:43:01 +00:00
|
|
|
{
|
|
|
|
struct pci_dev *pdev = to_pci_dev(kdev);
|
|
|
|
struct drm_device *dev = pci_get_drvdata(pdev);
|
|
|
|
struct vmw_private *dev_priv = vmw_priv(dev);
|
2015-06-25 17:47:43 +00:00
|
|
|
int ret;
|
2010-10-05 10:43:01 +00:00
|
|
|
|
2012-11-09 09:05:57 +00:00
|
|
|
vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
|
|
|
|
(void) vmw_read(dev_priv, SVGA_REG_ID);
|
|
|
|
|
2010-10-05 10:43:01 +00:00
|
|
|
if (dev_priv->enable_fb)
|
2015-06-25 17:47:43 +00:00
|
|
|
vmw_fifo_resource_inc(dev_priv);
|
|
|
|
|
|
|
|
ret = vmw_request_device(dev_priv);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
if (dev_priv->enable_fb)
|
|
|
|
__vmw_svga_enable(dev_priv);
|
2010-10-05 10:43:01 +00:00
|
|
|
|
2018-03-22 09:26:37 +00:00
|
|
|
vmw_fence_fifo_up(dev_priv->fman);
|
|
|
|
dev_priv->suspend_locked = false;
|
|
|
|
ttm_suspend_unlock(&dev_priv->reservation_sem);
|
|
|
|
if (dev_priv->suspend_state)
|
|
|
|
vmw_kms_resume(dev_priv->dev);
|
|
|
|
|
|
|
|
if (dev_priv->enable_fb)
|
|
|
|
vmw_fb_on(dev_priv);
|
|
|
|
|
2015-06-25 17:47:43 +00:00
|
|
|
return 0;
|
2010-10-05 10:43:01 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static const struct dev_pm_ops vmw_pm_ops = {
|
2015-06-25 17:47:43 +00:00
|
|
|
.freeze = vmw_pm_freeze,
|
|
|
|
.thaw = vmw_pm_restore,
|
|
|
|
.restore = vmw_pm_restore,
|
2010-10-05 10:43:01 +00:00
|
|
|
.suspend = vmw_pm_suspend,
|
|
|
|
.resume = vmw_pm_resume,
|
|
|
|
};
|
|
|
|
|
2011-10-31 14:28:57 +00:00
|
|
|
static const struct file_operations vmwgfx_driver_fops = {
|
|
|
|
.owner = THIS_MODULE,
|
|
|
|
.open = drm_open,
|
|
|
|
.release = drm_release,
|
|
|
|
.unlocked_ioctl = vmw_unlocked_ioctl,
|
|
|
|
.mmap = vmw_mmap,
|
|
|
|
.poll = vmw_fops_poll,
|
|
|
|
.read = vmw_fops_read,
|
|
|
|
#if defined(CONFIG_COMPAT)
|
2014-02-27 11:56:08 +00:00
|
|
|
.compat_ioctl = vmw_compat_ioctl,
|
2011-10-31 14:28:57 +00:00
|
|
|
#endif
|
|
|
|
.llseek = noop_llseek,
|
|
|
|
};
|
|
|
|
|
2009-12-10 00:19:58 +00:00
|
|
|
static struct drm_driver driver = {
|
2019-01-29 10:42:48 +00:00
|
|
|
.driver_features =
|
2019-06-17 15:39:24 +00:00
|
|
|
DRIVER_MODESET | DRIVER_RENDER | DRIVER_ATOMIC,
|
2009-12-10 00:19:58 +00:00
|
|
|
.load = vmw_driver_load,
|
|
|
|
.unload = vmw_driver_unload,
|
2010-10-01 08:21:49 +00:00
|
|
|
.get_vblank_counter = vmw_get_vblank_counter,
|
2011-10-17 09:59:45 +00:00
|
|
|
.enable_vblank = vmw_enable_vblank,
|
|
|
|
.disable_vblank = vmw_disable_vblank,
|
2009-12-10 00:19:58 +00:00
|
|
|
.ioctls = vmw_ioctls,
|
2014-06-09 13:39:49 +00:00
|
|
|
.num_ioctls = ARRAY_SIZE(vmw_ioctls),
|
2009-12-10 00:19:58 +00:00
|
|
|
.master_set = vmw_master_set,
|
|
|
|
.master_drop = vmw_master_drop,
|
|
|
|
.open = vmw_driver_open,
|
|
|
|
.postclose = vmw_postclose,
|
2012-08-28 01:53:54 +00:00
|
|
|
|
|
|
|
.dumb_create = vmw_dumb_create,
|
|
|
|
.dumb_map_offset = vmw_dumb_map_offset,
|
|
|
|
.dumb_destroy = vmw_dumb_destroy,
|
|
|
|
|
2013-11-13 09:50:46 +00:00
|
|
|
.prime_fd_to_handle = vmw_prime_fd_to_handle,
|
|
|
|
.prime_handle_to_fd = vmw_prime_handle_to_fd,
|
|
|
|
|
2011-10-31 14:28:57 +00:00
|
|
|
.fops = &vmwgfx_driver_fops,
|
2009-12-10 00:19:58 +00:00
|
|
|
.name = VMWGFX_DRIVER_NAME,
|
|
|
|
.desc = VMWGFX_DRIVER_DESC,
|
|
|
|
.date = VMWGFX_DRIVER_DATE,
|
|
|
|
.major = VMWGFX_DRIVER_MAJOR,
|
|
|
|
.minor = VMWGFX_DRIVER_MINOR,
|
|
|
|
.patchlevel = VMWGFX_DRIVER_PATCHLEVEL
|
|
|
|
};
|
|
|
|
|
2010-12-14 17:16:38 +00:00
|
|
|
static struct pci_driver vmw_pci_driver = {
|
|
|
|
.name = VMWGFX_DRIVER_NAME,
|
|
|
|
.id_table = vmw_pci_id_list,
|
|
|
|
.probe = vmw_probe,
|
|
|
|
.remove = vmw_remove,
|
|
|
|
.driver = {
|
|
|
|
.pm = &vmw_pm_ops
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2009-12-10 00:19:58 +00:00
|
|
|
static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|
|
|
{
|
2010-05-27 19:40:25 +00:00
|
|
|
return drm_get_pci_dev(pdev, ent, &driver);
|
2009-12-10 00:19:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int __init vmwgfx_init(void)
|
|
|
|
{
|
|
|
|
int ret;
|
2014-10-15 19:00:47 +00:00
|
|
|
|
|
|
|
if (vgacon_text_force())
|
|
|
|
return -EINVAL;
|
|
|
|
|
2017-05-24 14:51:40 +00:00
|
|
|
ret = pci_register_driver(&vmw_pci_driver);
|
2009-12-10 00:19:58 +00:00
|
|
|
if (ret)
|
|
|
|
DRM_ERROR("Failed initializing DRM.\n");
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __exit vmwgfx_exit(void)
|
|
|
|
{
|
2017-05-24 14:51:40 +00:00
|
|
|
pci_unregister_driver(&vmw_pci_driver);
|
2009-12-10 00:19:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
module_init(vmwgfx_init);
|
|
|
|
module_exit(vmwgfx_exit);
|
|
|
|
|
|
|
|
MODULE_AUTHOR("VMware Inc. and others");
|
|
|
|
MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device");
|
|
|
|
MODULE_LICENSE("GPL and additional rights");
|
2010-10-05 10:43:07 +00:00
|
|
|
MODULE_VERSION(__stringify(VMWGFX_DRIVER_MAJOR) "."
|
|
|
|
__stringify(VMWGFX_DRIVER_MINOR) "."
|
|
|
|
__stringify(VMWGFX_DRIVER_PATCHLEVEL) "."
|
|
|
|
"0");
|