forked from Minki/linux
Merge branch 'vmwgfx-next' of git://people.freedesktop.org/~thomash/linux into drm-next
- Disable DMA when using SEV encryption - An -RT fix - Code cleanups Signed-off-by: Dave Airlie <airlied@redhat.com> From: "Thomas Hellstrom (VMware)" <thomas_os@shipmail.org> Link: https://patchwork.freedesktop.org/patch/msgid/20200316105212.26504-1-thomas_os@shipmail.org
This commit is contained in:
commit
71fa42fac5
@ -1241,7 +1241,8 @@ int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man,
|
||||
* actually call into the already enabled manager, when
|
||||
* binding the MOB.
|
||||
*/
|
||||
if (!(dev_priv->capabilities & SVGA_CAP_DX))
|
||||
if (!(dev_priv->capabilities & SVGA_CAP_DX) ||
|
||||
!dev_priv->has_mob)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = ttm_bo_create(&dev_priv->bdev, size, ttm_bo_type_device,
|
||||
|
@ -29,6 +29,7 @@
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/mem_encrypt.h>
|
||||
|
||||
#include <drm/drm_drv.h>
|
||||
#include <drm/drm_ioctl.h>
|
||||
@ -575,6 +576,10 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv)
|
||||
[vmw_dma_map_populate] = "Caching DMA mappings.",
|
||||
[vmw_dma_map_bind] = "Giving up DMA mappings early."};
|
||||
|
||||
/* TTM currently doesn't fully support SEV encryption. */
|
||||
if (mem_encrypt_active())
|
||||
return -EINVAL;
|
||||
|
||||
if (vmw_force_coherent)
|
||||
dev_priv->map_mode = vmw_dma_alloc_coherent;
|
||||
else if (vmw_restrict_iommu)
|
||||
@ -682,8 +687,10 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
|
||||
|
||||
ret = vmw_dma_select_mode(dev_priv);
|
||||
if (unlikely(ret != 0)) {
|
||||
DRM_INFO("Restricting capabilities due to IOMMU setup.\n");
|
||||
DRM_INFO("Restricting capabilities since DMA not available.\n");
|
||||
refuse_dma = true;
|
||||
if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS)
|
||||
DRM_INFO("Disabling 3D acceleration.\n");
|
||||
}
|
||||
|
||||
dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
|
||||
@ -866,7 +873,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
|
||||
dev_priv->has_gmr = false;
|
||||
}
|
||||
|
||||
if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
|
||||
if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS && !refuse_dma) {
|
||||
dev_priv->has_mob = true;
|
||||
if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB,
|
||||
VMW_PL_MOB) != 0) {
|
||||
|
@ -479,19 +479,6 @@ struct vmw_private {
|
||||
bool assume_16bpp;
|
||||
bool has_sm4_1;
|
||||
|
||||
/*
|
||||
* VGA registers.
|
||||
*/
|
||||
|
||||
struct vmw_vga_topology_state vga_save[VMWGFX_MAX_DISPLAYS];
|
||||
uint32_t vga_width;
|
||||
uint32_t vga_height;
|
||||
uint32_t vga_bpp;
|
||||
uint32_t vga_bpl;
|
||||
uint32_t vga_pitchlock;
|
||||
|
||||
uint32_t num_displays;
|
||||
|
||||
/*
|
||||
* Framebuffer info.
|
||||
*/
|
||||
@ -900,7 +887,6 @@ extern void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes);
|
||||
extern void vmw_fifo_commit_flush(struct vmw_private *dev_priv, uint32_t bytes);
|
||||
extern int vmw_fifo_send_fence(struct vmw_private *dev_priv,
|
||||
uint32_t *seqno);
|
||||
extern void vmw_fifo_ping_host_locked(struct vmw_private *, uint32_t reason);
|
||||
extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason);
|
||||
extern bool vmw_fifo_have_3d(struct vmw_private *dev_priv);
|
||||
extern bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv);
|
||||
@ -947,7 +933,6 @@ extern struct ttm_placement vmw_mob_placement;
|
||||
extern struct ttm_placement vmw_mob_ne_placement;
|
||||
extern struct ttm_placement vmw_nonfixed_placement;
|
||||
extern struct ttm_bo_driver vmw_bo_driver;
|
||||
extern int vmw_dma_quiescent(struct drm_device *dev);
|
||||
extern int vmw_bo_map_dma(struct ttm_buffer_object *bo);
|
||||
extern void vmw_bo_unmap_dma(struct ttm_buffer_object *bo);
|
||||
extern const struct vmw_sg_table *
|
||||
@ -1085,8 +1070,6 @@ int vmw_fb_on(struct vmw_private *vmw_priv);
|
||||
|
||||
int vmw_kms_init(struct vmw_private *dev_priv);
|
||||
int vmw_kms_close(struct vmw_private *dev_priv);
|
||||
int vmw_kms_save_vga(struct vmw_private *vmw_priv);
|
||||
int vmw_kms_restore_vga(struct vmw_private *vmw_priv);
|
||||
int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv);
|
||||
@ -1139,7 +1122,6 @@ int vmw_overlay_init(struct vmw_private *dev_priv);
|
||||
int vmw_overlay_close(struct vmw_private *dev_priv);
|
||||
int vmw_overlay_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
int vmw_overlay_stop_all(struct vmw_private *dev_priv);
|
||||
int vmw_overlay_resume_all(struct vmw_private *dev_priv);
|
||||
int vmw_overlay_pause_all(struct vmw_private *dev_priv);
|
||||
int vmw_overlay_claim(struct vmw_private *dev_priv, uint32_t *out);
|
||||
@ -1186,10 +1168,6 @@ extern void vmw_otables_takedown(struct vmw_private *dev_priv);
|
||||
|
||||
extern const struct vmw_user_resource_conv *user_context_converter;
|
||||
|
||||
extern int vmw_context_check(struct vmw_private *dev_priv,
|
||||
struct ttm_object_file *tfile,
|
||||
int id,
|
||||
struct vmw_resource **p_res);
|
||||
extern int vmw_context_define_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
extern int vmw_extended_context_define_ioctl(struct drm_device *dev, void *data,
|
||||
@ -1219,7 +1197,6 @@ vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res);
|
||||
|
||||
extern const struct vmw_user_resource_conv *user_surface_converter;
|
||||
|
||||
extern void vmw_surface_res_free(struct vmw_resource *res);
|
||||
extern int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
extern int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
|
||||
@ -1230,11 +1207,6 @@ extern int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
extern int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
extern int vmw_surface_check(struct vmw_private *dev_priv,
|
||||
struct ttm_object_file *tfile,
|
||||
uint32_t handle, int *id);
|
||||
extern int vmw_surface_validate(struct vmw_private *dev_priv,
|
||||
struct vmw_surface *srf);
|
||||
int vmw_surface_gb_priv_define(struct drm_device *dev,
|
||||
uint32_t user_accounting_size,
|
||||
SVGA3dSurfaceAllFlags svga3d_flags,
|
||||
|
@ -169,10 +169,8 @@ void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
|
||||
{
|
||||
u32 *fifo_mem = dev_priv->mmio_virt;
|
||||
|
||||
preempt_disable();
|
||||
if (cmpxchg(fifo_mem + SVGA_FIFO_BUSY, 0, 1) == 0)
|
||||
vmw_write(dev_priv, SVGA_REG_SYNC, reason);
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
|
||||
|
@ -1897,87 +1897,6 @@ int vmw_kms_write_svga(struct vmw_private *vmw_priv,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int vmw_kms_save_vga(struct vmw_private *vmw_priv)
|
||||
{
|
||||
struct vmw_vga_topology_state *save;
|
||||
uint32_t i;
|
||||
|
||||
vmw_priv->vga_width = vmw_read(vmw_priv, SVGA_REG_WIDTH);
|
||||
vmw_priv->vga_height = vmw_read(vmw_priv, SVGA_REG_HEIGHT);
|
||||
vmw_priv->vga_bpp = vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL);
|
||||
if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
|
||||
vmw_priv->vga_pitchlock =
|
||||
vmw_read(vmw_priv, SVGA_REG_PITCHLOCK);
|
||||
else if (vmw_fifo_have_pitchlock(vmw_priv))
|
||||
vmw_priv->vga_pitchlock = vmw_mmio_read(vmw_priv->mmio_virt +
|
||||
SVGA_FIFO_PITCHLOCK);
|
||||
|
||||
if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY))
|
||||
return 0;
|
||||
|
||||
vmw_priv->num_displays = vmw_read(vmw_priv,
|
||||
SVGA_REG_NUM_GUEST_DISPLAYS);
|
||||
|
||||
if (vmw_priv->num_displays == 0)
|
||||
vmw_priv->num_displays = 1;
|
||||
|
||||
for (i = 0; i < vmw_priv->num_displays; ++i) {
|
||||
save = &vmw_priv->vga_save[i];
|
||||
vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, i);
|
||||
save->primary = vmw_read(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY);
|
||||
save->pos_x = vmw_read(vmw_priv, SVGA_REG_DISPLAY_POSITION_X);
|
||||
save->pos_y = vmw_read(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y);
|
||||
save->width = vmw_read(vmw_priv, SVGA_REG_DISPLAY_WIDTH);
|
||||
save->height = vmw_read(vmw_priv, SVGA_REG_DISPLAY_HEIGHT);
|
||||
vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
|
||||
if (i == 0 && vmw_priv->num_displays == 1 &&
|
||||
save->width == 0 && save->height == 0) {
|
||||
|
||||
/*
|
||||
* It should be fairly safe to assume that these
|
||||
* values are uninitialized.
|
||||
*/
|
||||
|
||||
save->width = vmw_priv->vga_width - save->pos_x;
|
||||
save->height = vmw_priv->vga_height - save->pos_y;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int vmw_kms_restore_vga(struct vmw_private *vmw_priv)
|
||||
{
|
||||
struct vmw_vga_topology_state *save;
|
||||
uint32_t i;
|
||||
|
||||
vmw_write(vmw_priv, SVGA_REG_WIDTH, vmw_priv->vga_width);
|
||||
vmw_write(vmw_priv, SVGA_REG_HEIGHT, vmw_priv->vga_height);
|
||||
vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, vmw_priv->vga_bpp);
|
||||
if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
|
||||
vmw_write(vmw_priv, SVGA_REG_PITCHLOCK,
|
||||
vmw_priv->vga_pitchlock);
|
||||
else if (vmw_fifo_have_pitchlock(vmw_priv))
|
||||
vmw_mmio_write(vmw_priv->vga_pitchlock,
|
||||
vmw_priv->mmio_virt + SVGA_FIFO_PITCHLOCK);
|
||||
|
||||
if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY))
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < vmw_priv->num_displays; ++i) {
|
||||
save = &vmw_priv->vga_save[i];
|
||||
vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, i);
|
||||
vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, save->primary);
|
||||
vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, save->pos_x);
|
||||
vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, save->pos_y);
|
||||
vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, save->width);
|
||||
vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, save->height);
|
||||
vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
|
||||
uint32_t pitch,
|
||||
uint32_t height)
|
||||
|
@ -353,37 +353,6 @@ static int vmw_overlay_update_stream(struct vmw_private *dev_priv,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Stop all streams.
|
||||
*
|
||||
* Used by the fb code when starting.
|
||||
*
|
||||
* Takes the overlay lock.
|
||||
*/
|
||||
int vmw_overlay_stop_all(struct vmw_private *dev_priv)
|
||||
{
|
||||
struct vmw_overlay *overlay = dev_priv->overlay_priv;
|
||||
int i, ret;
|
||||
|
||||
if (!overlay)
|
||||
return 0;
|
||||
|
||||
mutex_lock(&overlay->mutex);
|
||||
|
||||
for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
|
||||
struct vmw_stream *stream = &overlay->stream[i];
|
||||
if (!stream->buf)
|
||||
continue;
|
||||
|
||||
ret = vmw_overlay_stop(dev_priv, i, false, false);
|
||||
WARN_ON(ret != 0);
|
||||
}
|
||||
|
||||
mutex_unlock(&overlay->mutex);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Try to resume all paused streams.
|
||||
*
|
||||
|
@ -69,7 +69,7 @@ struct vmw_bo_dirty {
|
||||
unsigned int ref_count;
|
||||
unsigned long bitmap_size;
|
||||
size_t size;
|
||||
unsigned long bitmap[0];
|
||||
unsigned long bitmap[];
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -1888,7 +1888,7 @@ int vmw_kms_stdu_init_display(struct vmw_private *dev_priv)
|
||||
|
||||
|
||||
/* Do nothing if Screen Target support is turned off */
|
||||
if (!VMWGFX_ENABLE_SCREEN_TARGET_OTABLE)
|
||||
if (!VMWGFX_ENABLE_SCREEN_TARGET_OTABLE || !dev_priv->has_mob)
|
||||
return -ENOSYS;
|
||||
|
||||
if (!(dev_priv->capabilities & SVGA_CAP_GBOBJECTS))
|
||||
|
@ -79,7 +79,7 @@ struct vmw_surface_dirty {
|
||||
struct svga3dsurface_cache cache;
|
||||
size_t size;
|
||||
u32 num_subres;
|
||||
SVGA3dBox boxes[0];
|
||||
SVGA3dBox boxes[];
|
||||
};
|
||||
|
||||
static void vmw_user_surface_free(struct vmw_resource *res);
|
||||
|
Loading…
Reference in New Issue
Block a user