drm/i915: Attempt to prefault user pages for pread/pwrite

... in the hope that it makes the atomic fast paths more likely.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
This commit is contained in:
Chris Wilson 2010-10-14 13:47:43 +01:00
parent 202f2fef7a
commit b5e4feb661

View File

@ -265,19 +265,14 @@ fast_shmem_read(struct page **pages,
char __user *data,
int length)
{
char __iomem *vaddr;
int unwritten;
char *vaddr;
vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
if (vaddr == NULL)
return -ENOMEM;
unwritten = __copy_to_user_inatomic(data, vaddr + page_offset, length);
kunmap_atomic(vaddr, KM_USER0);
if (unwritten)
return -EFAULT;
return 0;
return unwritten ? -EFAULT : 0;
}
static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj)
@ -602,6 +597,13 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
goto out;
}
ret = fault_in_pages_writeable((char __user *)(uintptr_t)args->data_ptr,
args->size);
if (ret) {
ret = -EFAULT;
goto out;
}
if (i915_gem_object_needs_bit17_swizzle(obj)) {
ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
} else {
@ -668,18 +670,14 @@ fast_shmem_write(struct page **pages,
char __user *data,
int length)
{
char __iomem *vaddr;
unsigned long unwritten;
int unwritten;
char *vaddr;
vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
if (vaddr == NULL)
return -ENOMEM;
unwritten = __copy_from_user_inatomic(vaddr + page_offset, data, length);
kunmap_atomic(vaddr, KM_USER0);
if (unwritten)
return -EFAULT;
return 0;
return unwritten ? -EFAULT : 0;
}
/**
@ -1078,6 +1076,13 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
goto out;
}
ret = fault_in_pages_readable((char __user *)(uintptr_t)args->data_ptr,
args->size);
if (ret) {
ret = -EFAULT;
goto out;
}
/* We can only do the GTT pwrite on untiled buffers, as otherwise
* it would end up going through the fenced access, and we'll get
* different detiling behavior between reading and writing.