linux/drivers/gpu/drm/i915/i915_vgpu.c
Jani Nikula 9e859eb9d0 drm/i915/vgpu: improve vgpu abstractions
Add intel_vgpu_register() abstraction, rename i915_detect_vgpu() to
intel_vgpu_detect() to match other function naming, un-inline
intel_vgpu_active(), intel_vgpu_has_full_ppgtt() and
intel_vgpu_has_huge_gtt() to reduce header interdependencies.

The i915_vgpu.[ch] filename and intel_vgpu_ prefix discrepancy remains.

Cc: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200227144408.24345-1-jani.nikula@intel.com
2020-03-03 17:46:54 +02:00

339 lines
11 KiB
C

/*
* Copyright(c) 2011-2015 Intel Corporation. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "i915_drv.h"
#include "i915_pvinfo.h"
#include "i915_vgpu.h"
/**
* DOC: Intel GVT-g guest support
*
* Intel GVT-g is a graphics virtualization technology which shares the
* GPU among multiple virtual machines on a time-sharing basis. Each
* virtual machine is presented a virtual GPU (vGPU), which has equivalent
* features as the underlying physical GPU (pGPU), so i915 driver can run
* seamlessly in a virtual machine. This file provides vGPU specific
* optimizations when running in a virtual machine, to reduce the complexity
* of vGPU emulation and to improve the overall performance.
*
* A primary function introduced here is so-called "address space ballooning"
* technique. Intel GVT-g partitions global graphics memory among multiple VMs,
* so each VM can directly access a portion of the memory without hypervisor's
* intervention, e.g. filling textures or queuing commands. However with the
* partitioning an unmodified i915 driver would assume a smaller graphics
* memory starting from address ZERO, then requires vGPU emulation module to
* translate the graphics address between 'guest view' and 'host view', for
* all registers and command opcodes which contain a graphics memory address.
* To reduce the complexity, Intel GVT-g introduces "address space ballooning",
* by telling the exact partitioning knowledge to each guest i915 driver, which
* then reserves and prevents non-allocated portions from allocation. Thus vGPU
* emulation module only needs to scan and validate graphics addresses without
* complexity of address translation.
*
*/
/**
* intel_vgpu_detect - detect virtual GPU
* @dev_priv: i915 device private
*
* This function is called at the initialization stage, to detect whether
* running on a vGPU.
*/
void intel_vgpu_detect(struct drm_i915_private *dev_priv)
{
struct pci_dev *pdev = dev_priv->drm.pdev;
u64 magic;
u16 version_major;
void __iomem *shared_area;
BUILD_BUG_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
/*
* This is called before we setup the main MMIO BAR mappings used via
* the uncore structure, so we need to access the BAR directly. Since
* we do not support VGT on older gens, return early so we don't have
* to consider differently numbered or sized MMIO bars
*/
if (INTEL_GEN(dev_priv) < 6)
return;
shared_area = pci_iomap_range(pdev, 0, VGT_PVINFO_PAGE, VGT_PVINFO_SIZE);
if (!shared_area) {
drm_err(&dev_priv->drm,
"failed to map MMIO bar to check for VGT\n");
return;
}
magic = readq(shared_area + vgtif_offset(magic));
if (magic != VGT_MAGIC)
goto out;
version_major = readw(shared_area + vgtif_offset(version_major));
if (version_major < VGT_VERSION_MAJOR) {
drm_info(&dev_priv->drm, "VGT interface version mismatch!\n");
goto out;
}
dev_priv->vgpu.caps = readl(shared_area + vgtif_offset(vgt_caps));
dev_priv->vgpu.active = true;
mutex_init(&dev_priv->vgpu.lock);
drm_info(&dev_priv->drm, "Virtual GPU for Intel GVT-g detected.\n");
out:
pci_iounmap(pdev, shared_area);
}
void intel_vgpu_register(struct drm_i915_private *i915)
{
/*
* Notify a valid surface after modesetting, when running inside a VM.
*/
if (intel_vgpu_active(i915))
intel_uncore_write(&i915->uncore, vgtif_reg(display_ready),
VGT_DRV_DISPLAY_READY);
}
bool intel_vgpu_active(struct drm_i915_private *dev_priv)
{
return dev_priv->vgpu.active;
}
bool intel_vgpu_has_full_ppgtt(struct drm_i915_private *dev_priv)
{
return dev_priv->vgpu.caps & VGT_CAPS_FULL_PPGTT;
}
bool intel_vgpu_has_hwsp_emulation(struct drm_i915_private *dev_priv)
{
return dev_priv->vgpu.caps & VGT_CAPS_HWSP_EMULATION;
}
bool intel_vgpu_has_huge_gtt(struct drm_i915_private *dev_priv)
{
return dev_priv->vgpu.caps & VGT_CAPS_HUGE_GTT;
}
struct _balloon_info_ {
/*
* There are up to 2 regions per mappable/unmappable graphic
* memory that might be ballooned. Here, index 0/1 is for mappable
* graphic memory, 2/3 for unmappable graphic memory.
*/
struct drm_mm_node space[4];
};
static struct _balloon_info_ bl_info;
static void vgt_deballoon_space(struct i915_ggtt *ggtt,
struct drm_mm_node *node)
{
struct drm_i915_private *dev_priv = ggtt->vm.i915;
if (!drm_mm_node_allocated(node))
return;
drm_dbg(&dev_priv->drm,
"deballoon space: range [0x%llx - 0x%llx] %llu KiB.\n",
node->start,
node->start + node->size,
node->size / 1024);
ggtt->vm.reserved -= node->size;
drm_mm_remove_node(node);
}
/**
* intel_vgt_deballoon - deballoon reserved graphics address trunks
* @ggtt: the global GGTT from which we reserved earlier
*
* This function is called to deallocate the ballooned-out graphic memory, when
* driver is unloaded or when ballooning fails.
*/
void intel_vgt_deballoon(struct i915_ggtt *ggtt)
{
struct drm_i915_private *dev_priv = ggtt->vm.i915;
int i;
if (!intel_vgpu_active(ggtt->vm.i915))
return;
drm_dbg(&dev_priv->drm, "VGT deballoon.\n");
for (i = 0; i < 4; i++)
vgt_deballoon_space(ggtt, &bl_info.space[i]);
}
static int vgt_balloon_space(struct i915_ggtt *ggtt,
struct drm_mm_node *node,
unsigned long start, unsigned long end)
{
struct drm_i915_private *dev_priv = ggtt->vm.i915;
unsigned long size = end - start;
int ret;
if (start >= end)
return -EINVAL;
drm_info(&dev_priv->drm,
"balloon space: range [ 0x%lx - 0x%lx ] %lu KiB.\n",
start, end, size / 1024);
ret = i915_gem_gtt_reserve(&ggtt->vm, node,
size, start, I915_COLOR_UNEVICTABLE,
0);
if (!ret)
ggtt->vm.reserved += size;
return ret;
}
/**
* intel_vgt_balloon - balloon out reserved graphics address trunks
* @ggtt: the global GGTT from which to reserve
*
* This function is called at the initialization stage, to balloon out the
* graphic address space allocated to other vGPUs, by marking these spaces as
* reserved. The ballooning related knowledge(starting address and size of
* the mappable/unmappable graphic memory) is described in the vgt_if structure
* in a reserved mmio range.
*
* To give an example, the drawing below depicts one typical scenario after
* ballooning. Here the vGPU1 has 2 pieces of graphic address spaces ballooned
* out each for the mappable and the non-mappable part. From the vGPU1 point of
* view, the total size is the same as the physical one, with the start address
* of its graphic space being zero. Yet there are some portions ballooned out(
* the shadow part, which are marked as reserved by drm allocator). From the
* host point of view, the graphic address space is partitioned by multiple
* vGPUs in different VMs. ::
*
* vGPU1 view Host view
* 0 ------> +-----------+ +-----------+
* ^ |###########| | vGPU3 |
* | |###########| +-----------+
* | |###########| | vGPU2 |
* | +-----------+ +-----------+
* mappable GM | available | ==> | vGPU1 |
* | +-----------+ +-----------+
* | |###########| | |
* v |###########| | Host |
* +=======+===========+ +===========+
* ^ |###########| | vGPU3 |
* | |###########| +-----------+
* | |###########| | vGPU2 |
* | +-----------+ +-----------+
* unmappable GM | available | ==> | vGPU1 |
* | +-----------+ +-----------+
* | |###########| | |
* | |###########| | Host |
* v |###########| | |
* total GM size ------> +-----------+ +-----------+
*
* Returns:
* zero on success, non-zero if configuration invalid or ballooning failed
*/
int intel_vgt_balloon(struct i915_ggtt *ggtt)
{
struct drm_i915_private *dev_priv = ggtt->vm.i915;
struct intel_uncore *uncore = &dev_priv->uncore;
unsigned long ggtt_end = ggtt->vm.total;
unsigned long mappable_base, mappable_size, mappable_end;
unsigned long unmappable_base, unmappable_size, unmappable_end;
int ret;
if (!intel_vgpu_active(ggtt->vm.i915))
return 0;
mappable_base =
intel_uncore_read(uncore, vgtif_reg(avail_rs.mappable_gmadr.base));
mappable_size =
intel_uncore_read(uncore, vgtif_reg(avail_rs.mappable_gmadr.size));
unmappable_base =
intel_uncore_read(uncore, vgtif_reg(avail_rs.nonmappable_gmadr.base));
unmappable_size =
intel_uncore_read(uncore, vgtif_reg(avail_rs.nonmappable_gmadr.size));
mappable_end = mappable_base + mappable_size;
unmappable_end = unmappable_base + unmappable_size;
drm_info(&dev_priv->drm, "VGT ballooning configuration:\n");
drm_info(&dev_priv->drm,
"Mappable graphic memory: base 0x%lx size %ldKiB\n",
mappable_base, mappable_size / 1024);
drm_info(&dev_priv->drm,
"Unmappable graphic memory: base 0x%lx size %ldKiB\n",
unmappable_base, unmappable_size / 1024);
if (mappable_end > ggtt->mappable_end ||
unmappable_base < ggtt->mappable_end ||
unmappable_end > ggtt_end) {
drm_err(&dev_priv->drm, "Invalid ballooning configuration!\n");
return -EINVAL;
}
/* Unmappable graphic memory ballooning */
if (unmappable_base > ggtt->mappable_end) {
ret = vgt_balloon_space(ggtt, &bl_info.space[2],
ggtt->mappable_end, unmappable_base);
if (ret)
goto err;
}
if (unmappable_end < ggtt_end) {
ret = vgt_balloon_space(ggtt, &bl_info.space[3],
unmappable_end, ggtt_end);
if (ret)
goto err_upon_mappable;
}
/* Mappable graphic memory ballooning */
if (mappable_base) {
ret = vgt_balloon_space(ggtt, &bl_info.space[0],
0, mappable_base);
if (ret)
goto err_upon_unmappable;
}
if (mappable_end < ggtt->mappable_end) {
ret = vgt_balloon_space(ggtt, &bl_info.space[1],
mappable_end, ggtt->mappable_end);
if (ret)
goto err_below_mappable;
}
drm_info(&dev_priv->drm, "VGT balloon successfully\n");
return 0;
err_below_mappable:
vgt_deballoon_space(ggtt, &bl_info.space[0]);
err_upon_unmappable:
vgt_deballoon_space(ggtt, &bl_info.space[3]);
err_upon_mappable:
vgt_deballoon_space(ggtt, &bl_info.space[2]);
err:
drm_err(&dev_priv->drm, "VGT balloon fail\n");
return ret;
}