- Fix guest page access by using the brand new VFIO dma r/w interface (Yan)
- Fix for i915 perf read buffers (Ashutosh) -----BEGIN PGP SIGNATURE----- iQEzBAABCAAdFiEEbSBwaO7dZQkcLOKj+mJfZA7rE8oFAl6XaJUACgkQ+mJfZA7r E8qzmAf/eMg8RpqrLlvi9tokgF7m8WRe/X2O5911Z4hXq+ACWsM4EtaNcp99ImCL qWGuLDJdXb/MdQZcp8o1GNz0k7//onU6WPbniOb+9YaZw4HY+u8xwW6dAM6Vszyv tLAkM2wPJXySMVv1a6A47S9nlElht35MwRx5mf53rsc/6Osj7fBSbQQIIDXSweo6 1/TS2lhXybn9LjeILVnJIfhhZxpC4Lld5FKEeeWWtI3eIgjr5ujDSem5jAPhs9wS JjLSjUhNCeOqEXXTFBLHlFi2+yLT+Raj1p5NMj3l88ZRuFBw3bTmmb+S7tAoIN7F H9ctk+NXvE7Hu3ZRsbCqBzf/jfSVQA== =jUMn -----END PGP SIGNATURE----- Merge tag 'drm-intel-fixes-2020-04-15' of git://anongit.freedesktop.org/drm/drm-intel into drm-fixes - Fix guest page access by using the brand new VFIO dma r/w interface (Yan) - Fix for i915 perf read buffers (Ashutosh) Signed-off-by: Dave Airlie <airlied@redhat.com> From: Rodrigo Vivi <rodrigo.vivi@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20200415200349.GA2550694@intel.com
This commit is contained in:
commit
bbf4ee2663
@ -131,6 +131,7 @@ struct kvmgt_vdev {
|
|||||||
struct work_struct release_work;
|
struct work_struct release_work;
|
||||||
atomic_t released;
|
atomic_t released;
|
||||||
struct vfio_device *vfio_device;
|
struct vfio_device *vfio_device;
|
||||||
|
struct vfio_group *vfio_group;
|
||||||
};
|
};
|
||||||
|
|
||||||
static inline struct kvmgt_vdev *kvmgt_vdev(struct intel_vgpu *vgpu)
|
static inline struct kvmgt_vdev *kvmgt_vdev(struct intel_vgpu *vgpu)
|
||||||
@ -151,6 +152,7 @@ static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
|
|||||||
unsigned long size)
|
unsigned long size)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
|
struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
|
||||||
|
struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
|
||||||
int total_pages;
|
int total_pages;
|
||||||
int npage;
|
int npage;
|
||||||
int ret;
|
int ret;
|
||||||
@ -160,7 +162,7 @@ static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
|
|||||||
for (npage = 0; npage < total_pages; npage++) {
|
for (npage = 0; npage < total_pages; npage++) {
|
||||||
unsigned long cur_gfn = gfn + npage;
|
unsigned long cur_gfn = gfn + npage;
|
||||||
|
|
||||||
ret = vfio_unpin_pages(mdev_dev(kvmgt_vdev(vgpu)->mdev), &cur_gfn, 1);
|
ret = vfio_group_unpin_pages(vdev->vfio_group, &cur_gfn, 1);
|
||||||
drm_WARN_ON(&i915->drm, ret != 1);
|
drm_WARN_ON(&i915->drm, ret != 1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -169,6 +171,7 @@ static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
|
|||||||
static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
|
static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
|
||||||
unsigned long size, struct page **page)
|
unsigned long size, struct page **page)
|
||||||
{
|
{
|
||||||
|
struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
|
||||||
unsigned long base_pfn = 0;
|
unsigned long base_pfn = 0;
|
||||||
int total_pages;
|
int total_pages;
|
||||||
int npage;
|
int npage;
|
||||||
@ -183,7 +186,7 @@ static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
|
|||||||
unsigned long cur_gfn = gfn + npage;
|
unsigned long cur_gfn = gfn + npage;
|
||||||
unsigned long pfn;
|
unsigned long pfn;
|
||||||
|
|
||||||
ret = vfio_pin_pages(mdev_dev(kvmgt_vdev(vgpu)->mdev), &cur_gfn, 1,
|
ret = vfio_group_pin_pages(vdev->vfio_group, &cur_gfn, 1,
|
||||||
IOMMU_READ | IOMMU_WRITE, &pfn);
|
IOMMU_READ | IOMMU_WRITE, &pfn);
|
||||||
if (ret != 1) {
|
if (ret != 1) {
|
||||||
gvt_vgpu_err("vfio_pin_pages failed for gfn 0x%lx, ret %d\n",
|
gvt_vgpu_err("vfio_pin_pages failed for gfn 0x%lx, ret %d\n",
|
||||||
@ -792,6 +795,7 @@ static int intel_vgpu_open(struct mdev_device *mdev)
|
|||||||
struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
|
struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
|
||||||
unsigned long events;
|
unsigned long events;
|
||||||
int ret;
|
int ret;
|
||||||
|
struct vfio_group *vfio_group;
|
||||||
|
|
||||||
vdev->iommu_notifier.notifier_call = intel_vgpu_iommu_notifier;
|
vdev->iommu_notifier.notifier_call = intel_vgpu_iommu_notifier;
|
||||||
vdev->group_notifier.notifier_call = intel_vgpu_group_notifier;
|
vdev->group_notifier.notifier_call = intel_vgpu_group_notifier;
|
||||||
@ -814,6 +818,14 @@ static int intel_vgpu_open(struct mdev_device *mdev)
|
|||||||
goto undo_iommu;
|
goto undo_iommu;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
vfio_group = vfio_group_get_external_user_from_dev(mdev_dev(mdev));
|
||||||
|
if (IS_ERR_OR_NULL(vfio_group)) {
|
||||||
|
ret = !vfio_group ? -EFAULT : PTR_ERR(vfio_group);
|
||||||
|
gvt_vgpu_err("vfio_group_get_external_user_from_dev failed\n");
|
||||||
|
goto undo_register;
|
||||||
|
}
|
||||||
|
vdev->vfio_group = vfio_group;
|
||||||
|
|
||||||
/* Take a module reference as mdev core doesn't take
|
/* Take a module reference as mdev core doesn't take
|
||||||
* a reference for vendor driver.
|
* a reference for vendor driver.
|
||||||
*/
|
*/
|
||||||
@ -830,6 +842,10 @@ static int intel_vgpu_open(struct mdev_device *mdev)
|
|||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
undo_group:
|
undo_group:
|
||||||
|
vfio_group_put_external_user(vdev->vfio_group);
|
||||||
|
vdev->vfio_group = NULL;
|
||||||
|
|
||||||
|
undo_register:
|
||||||
vfio_unregister_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY,
|
vfio_unregister_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY,
|
||||||
&vdev->group_notifier);
|
&vdev->group_notifier);
|
||||||
|
|
||||||
@ -884,6 +900,7 @@ static void __intel_vgpu_release(struct intel_vgpu *vgpu)
|
|||||||
kvmgt_guest_exit(info);
|
kvmgt_guest_exit(info);
|
||||||
|
|
||||||
intel_vgpu_release_msi_eventfd_ctx(vgpu);
|
intel_vgpu_release_msi_eventfd_ctx(vgpu);
|
||||||
|
vfio_group_put_external_user(vdev->vfio_group);
|
||||||
|
|
||||||
vdev->kvm = NULL;
|
vdev->kvm = NULL;
|
||||||
vgpu->handle = 0;
|
vgpu->handle = 0;
|
||||||
@ -2035,33 +2052,14 @@ static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa,
|
|||||||
void *buf, unsigned long len, bool write)
|
void *buf, unsigned long len, bool write)
|
||||||
{
|
{
|
||||||
struct kvmgt_guest_info *info;
|
struct kvmgt_guest_info *info;
|
||||||
struct kvm *kvm;
|
|
||||||
int idx, ret;
|
|
||||||
bool kthread = current->mm == NULL;
|
|
||||||
|
|
||||||
if (!handle_valid(handle))
|
if (!handle_valid(handle))
|
||||||
return -ESRCH;
|
return -ESRCH;
|
||||||
|
|
||||||
info = (struct kvmgt_guest_info *)handle;
|
info = (struct kvmgt_guest_info *)handle;
|
||||||
kvm = info->kvm;
|
|
||||||
|
|
||||||
if (kthread) {
|
return vfio_dma_rw(kvmgt_vdev(info->vgpu)->vfio_group,
|
||||||
if (!mmget_not_zero(kvm->mm))
|
gpa, buf, len, write);
|
||||||
return -EFAULT;
|
|
||||||
use_mm(kvm->mm);
|
|
||||||
}
|
|
||||||
|
|
||||||
idx = srcu_read_lock(&kvm->srcu);
|
|
||||||
ret = write ? kvm_write_guest(kvm, gpa, buf, len) :
|
|
||||||
kvm_read_guest(kvm, gpa, buf, len);
|
|
||||||
srcu_read_unlock(&kvm->srcu, idx);
|
|
||||||
|
|
||||||
if (kthread) {
|
|
||||||
unuse_mm(kvm->mm);
|
|
||||||
mmput(kvm->mm);
|
|
||||||
}
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int kvmgt_read_gpa(unsigned long handle, unsigned long gpa,
|
static int kvmgt_read_gpa(unsigned long handle, unsigned long gpa,
|
||||||
|
@ -2940,49 +2940,6 @@ void i915_oa_init_reg_state(const struct intel_context *ce,
|
|||||||
gen8_update_reg_state_unlocked(ce, stream);
|
gen8_update_reg_state_unlocked(ce, stream);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* i915_perf_read_locked - &i915_perf_stream_ops->read with error normalisation
|
|
||||||
* @stream: An i915 perf stream
|
|
||||||
* @file: An i915 perf stream file
|
|
||||||
* @buf: destination buffer given by userspace
|
|
||||||
* @count: the number of bytes userspace wants to read
|
|
||||||
* @ppos: (inout) file seek position (unused)
|
|
||||||
*
|
|
||||||
* Besides wrapping &i915_perf_stream_ops->read this provides a common place to
|
|
||||||
* ensure that if we've successfully copied any data then reporting that takes
|
|
||||||
* precedence over any internal error status, so the data isn't lost.
|
|
||||||
*
|
|
||||||
* For example ret will be -ENOSPC whenever there is more buffered data than
|
|
||||||
* can be copied to userspace, but that's only interesting if we weren't able
|
|
||||||
* to copy some data because it implies the userspace buffer is too small to
|
|
||||||
* receive a single record (and we never split records).
|
|
||||||
*
|
|
||||||
* Another case with ret == -EFAULT is more of a grey area since it would seem
|
|
||||||
* like bad form for userspace to ask us to overrun its buffer, but the user
|
|
||||||
* knows best:
|
|
||||||
*
|
|
||||||
* http://yarchive.net/comp/linux/partial_reads_writes.html
|
|
||||||
*
|
|
||||||
* Returns: The number of bytes copied or a negative error code on failure.
|
|
||||||
*/
|
|
||||||
static ssize_t i915_perf_read_locked(struct i915_perf_stream *stream,
|
|
||||||
struct file *file,
|
|
||||||
char __user *buf,
|
|
||||||
size_t count,
|
|
||||||
loff_t *ppos)
|
|
||||||
{
|
|
||||||
/* Note we keep the offset (aka bytes read) separate from any
|
|
||||||
* error status so that the final check for whether we return
|
|
||||||
* the bytes read with a higher precedence than any error (see
|
|
||||||
* comment below) doesn't need to be handled/duplicated in
|
|
||||||
* stream->ops->read() implementations.
|
|
||||||
*/
|
|
||||||
size_t offset = 0;
|
|
||||||
int ret = stream->ops->read(stream, buf, count, &offset);
|
|
||||||
|
|
||||||
return offset ?: (ret ?: -EAGAIN);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* i915_perf_read - handles read() FOP for i915 perf stream FDs
|
* i915_perf_read - handles read() FOP for i915 perf stream FDs
|
||||||
* @file: An i915 perf stream file
|
* @file: An i915 perf stream file
|
||||||
@ -3008,7 +2965,8 @@ static ssize_t i915_perf_read(struct file *file,
|
|||||||
{
|
{
|
||||||
struct i915_perf_stream *stream = file->private_data;
|
struct i915_perf_stream *stream = file->private_data;
|
||||||
struct i915_perf *perf = stream->perf;
|
struct i915_perf *perf = stream->perf;
|
||||||
ssize_t ret;
|
size_t offset = 0;
|
||||||
|
int ret;
|
||||||
|
|
||||||
/* To ensure it's handled consistently we simply treat all reads of a
|
/* To ensure it's handled consistently we simply treat all reads of a
|
||||||
* disabled stream as an error. In particular it might otherwise lead
|
* disabled stream as an error. In particular it might otherwise lead
|
||||||
@ -3031,13 +2989,12 @@ static ssize_t i915_perf_read(struct file *file,
|
|||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
mutex_lock(&perf->lock);
|
mutex_lock(&perf->lock);
|
||||||
ret = i915_perf_read_locked(stream, file,
|
ret = stream->ops->read(stream, buf, count, &offset);
|
||||||
buf, count, ppos);
|
|
||||||
mutex_unlock(&perf->lock);
|
mutex_unlock(&perf->lock);
|
||||||
} while (ret == -EAGAIN);
|
} while (!offset && !ret);
|
||||||
} else {
|
} else {
|
||||||
mutex_lock(&perf->lock);
|
mutex_lock(&perf->lock);
|
||||||
ret = i915_perf_read_locked(stream, file, buf, count, ppos);
|
ret = stream->ops->read(stream, buf, count, &offset);
|
||||||
mutex_unlock(&perf->lock);
|
mutex_unlock(&perf->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3048,15 +3005,15 @@ static ssize_t i915_perf_read(struct file *file,
|
|||||||
* and read() returning -EAGAIN. Clearing the oa.pollin state here
|
* and read() returning -EAGAIN. Clearing the oa.pollin state here
|
||||||
* effectively ensures we back off until the next hrtimer callback
|
* effectively ensures we back off until the next hrtimer callback
|
||||||
* before reporting another EPOLLIN event.
|
* before reporting another EPOLLIN event.
|
||||||
|
* The exception to this is if ops->read() returned -ENOSPC which means
|
||||||
|
* that more OA data is available than could fit in the user provided
|
||||||
|
* buffer. In this case we want the next poll() call to not block.
|
||||||
*/
|
*/
|
||||||
if (ret >= 0 || ret == -EAGAIN) {
|
if (ret != -ENOSPC)
|
||||||
/* Maybe make ->pollin per-stream state if we support multiple
|
|
||||||
* concurrent streams in the future.
|
|
||||||
*/
|
|
||||||
stream->pollin = false;
|
stream->pollin = false;
|
||||||
}
|
|
||||||
|
|
||||||
return ret;
|
/* Possible values for ret are 0, -EFAULT, -ENOSPC, -EIO, ... */
|
||||||
|
return offset ?: (ret ?: -EAGAIN);
|
||||||
}
|
}
|
||||||
|
|
||||||
static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer)
|
static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer)
|
||||||
|
Loading…
Reference in New Issue
Block a user