linux/drivers/gpu/drm/i915/gt/shmem_utils.c
Venkata Ramana Nayana 78b2eb8a1f drm/i915/gt: Retain default context state across shrinking
As we use a shmemfs file to hold the context state, when not in use it
may be swapped out, such as across suspend. Since we wrote into the
shmemfs without marking the pages as dirty, the contents may be dropped
instead of being written back to swap. On re-using the shmemfs file,
such as creating a new context after resume, the contents of that file
were likely garbage and so the new context could then hang the GPU.

Simply mark the page as being written when copying into the shmemfs
file, and it the new contents will be retained across swapout.

Fixes: be1cb55a07 ("drm/i915/gt: Keep a no-frills swappable copy of the default context state")
Cc: Sudeep Dutt <sudeep.dutt@intel.com>
Cc: Matthew Auld <matthew.auld@intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Ramalingam C <ramalingam.c@intel.com>
Signed-off-by: CQ Tang <cq.tang@intel.com>
Signed-off-by: Venkata Ramana Nayana <venkata.ramana.nayana@intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: <stable@vger.kernel.org> # v5.8+
Link: https://patchwork.freedesktop.org/patch/msgid/20201127120718.454037-161-matthew.auld@intel.com
(cherry picked from commit a9d71f76cc)
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
2020-12-02 17:05:58 -08:00

137 lines
2.7 KiB
C

// SPDX-License-Identifier: MIT
/*
* Copyright © 2020 Intel Corporation
*/
#include <linux/mm.h>
#include <linux/pagemap.h>
#include <linux/shmem_fs.h>
#include "gem/i915_gem_object.h"
#include "shmem_utils.h"
struct file *shmem_create_from_data(const char *name, void *data, size_t len)
{
struct file *file;
int err;
file = shmem_file_setup(name, PAGE_ALIGN(len), VM_NORESERVE);
if (IS_ERR(file))
return file;
err = shmem_write(file, 0, data, len);
if (err) {
fput(file);
return ERR_PTR(err);
}
return file;
}
struct file *shmem_create_from_object(struct drm_i915_gem_object *obj)
{
struct file *file;
void *ptr;
if (obj->ops == &i915_gem_shmem_ops) {
file = obj->base.filp;
atomic_long_inc(&file->f_count);
return file;
}
ptr = i915_gem_object_pin_map(obj, I915_MAP_WB);
if (IS_ERR(ptr))
return ERR_CAST(ptr);
file = shmem_create_from_data("", ptr, obj->base.size);
i915_gem_object_unpin_map(obj);
return file;
}
void *shmem_pin_map(struct file *file)
{
struct page **pages;
size_t n_pages, i;
void *vaddr;
n_pages = file->f_mapping->host->i_size >> PAGE_SHIFT;
pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL);
if (!pages)
return NULL;
for (i = 0; i < n_pages; i++) {
pages[i] = shmem_read_mapping_page_gfp(file->f_mapping, i,
GFP_KERNEL);
if (IS_ERR(pages[i]))
goto err_page;
}
vaddr = vmap(pages, n_pages, VM_MAP_PUT_PAGES, PAGE_KERNEL);
if (!vaddr)
goto err_page;
mapping_set_unevictable(file->f_mapping);
return vaddr;
err_page:
while (--i >= 0)
put_page(pages[i]);
kvfree(pages);
return NULL;
}
void shmem_unpin_map(struct file *file, void *ptr)
{
mapping_clear_unevictable(file->f_mapping);
vfree(ptr);
}
static int __shmem_rw(struct file *file, loff_t off,
void *ptr, size_t len,
bool write)
{
unsigned long pfn;
for (pfn = off >> PAGE_SHIFT; len; pfn++) {
unsigned int this =
min_t(size_t, PAGE_SIZE - offset_in_page(off), len);
struct page *page;
void *vaddr;
page = shmem_read_mapping_page_gfp(file->f_mapping, pfn,
GFP_KERNEL);
if (IS_ERR(page))
return PTR_ERR(page);
vaddr = kmap(page);
if (write) {
memcpy(vaddr + offset_in_page(off), ptr, this);
set_page_dirty(page);
} else {
memcpy(ptr, vaddr + offset_in_page(off), this);
}
mark_page_accessed(page);
kunmap(page);
put_page(page);
len -= this;
ptr += this;
off = 0;
}
return 0;
}
int shmem_read(struct file *file, loff_t off, void *dst, size_t len)
{
return __shmem_rw(file, off, dst, len, false);
}
int shmem_write(struct file *file, loff_t off, void *src, size_t len)
{
return __shmem_rw(file, off, src, len, true);
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "st_shmem_utils.c"
#endif