linux/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
Matthew Auld 1629224324 drm/i915/lmem: add the fake lmem region
Intended for upstream testing so that we can still exercise the LMEM
plumbing and !i915_ggtt_has_aperture paths. Smoke tested on Skull Canyon
device. This works by allocating an intel_memory_region for a reserved
portion of system memory, which we treat like LMEM. For the LMEMBAR we
steal the aperture and 1:1 it map to the stolen region.

To enable simply set the i915 modparam fake_lmem_start= on the kernel
cmdline with the start of reserved region(see memmap=). The size of the
region we can use is determined by the size of the mappable aperture, so
the size of reserved region should be >= mappable_end. For now we only
enable for the selftests. Depends on CONFIG_DRM_I915_UNSTABLE being
enabled.

eg. memmap=2G$16G i915.fake_lmem_start=0x400000000

v2: make fake_lmem_start an i915 modparam

Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Abdiel Janulgue <abdiel.janulgue@linux.intel.com>
Cc: Arkadiusz Hiler <arkadiusz.hiler@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20191030173320.8850-1-matthew.auld@intel.com
2019-10-31 20:41:47 +00:00

100 lines
2.5 KiB
C

// SPDX-License-Identifier: MIT
/*
* Copyright © 2019 Intel Corporation
*/
#include "intel_memory_region.h"
#include "gem/i915_gem_region.h"
#include "gem/i915_gem_lmem.h"
#include "i915_drv.h"
const struct drm_i915_gem_object_ops i915_gem_lmem_obj_ops = {
.flags = I915_GEM_OBJECT_HAS_IOMEM,
.get_pages = i915_gem_object_get_pages_buddy,
.put_pages = i915_gem_object_put_pages_buddy,
.release = i915_gem_object_release_memory_region,
};
/* XXX: Time to vfunc your life up? */
void __iomem *
i915_gem_object_lmem_io_map_page(struct drm_i915_gem_object *obj,
unsigned long n)
{
resource_size_t offset;
offset = i915_gem_object_get_dma_address(obj, n);
offset -= obj->mm.region->region.start;
return io_mapping_map_wc(&obj->mm.region->iomap, offset, PAGE_SIZE);
}
void __iomem *
i915_gem_object_lmem_io_map_page_atomic(struct drm_i915_gem_object *obj,
unsigned long n)
{
resource_size_t offset;
offset = i915_gem_object_get_dma_address(obj, n);
offset -= obj->mm.region->region.start;
return io_mapping_map_atomic_wc(&obj->mm.region->iomap, offset);
}
void __iomem *
i915_gem_object_lmem_io_map(struct drm_i915_gem_object *obj,
unsigned long n,
unsigned long size)
{
resource_size_t offset;
GEM_BUG_ON(!i915_gem_object_is_contiguous(obj));
offset = i915_gem_object_get_dma_address(obj, n);
offset -= obj->mm.region->region.start;
return io_mapping_map_wc(&obj->mm.region->iomap, offset, size);
}
bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj)
{
return obj->ops == &i915_gem_lmem_obj_ops;
}
struct drm_i915_gem_object *
i915_gem_object_create_lmem(struct drm_i915_private *i915,
resource_size_t size,
unsigned int flags)
{
return i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_LMEM],
size, flags);
}
struct drm_i915_gem_object *
__i915_gem_lmem_object_create(struct intel_memory_region *mem,
resource_size_t size,
unsigned int flags)
{
static struct lock_class_key lock_class;
struct drm_i915_private *i915 = mem->i915;
struct drm_i915_gem_object *obj;
if (size > BIT(mem->mm.max_order) * mem->mm.chunk_size)
return ERR_PTR(-E2BIG);
obj = i915_gem_object_alloc();
if (!obj)
return ERR_PTR(-ENOMEM);
drm_gem_private_object_init(&i915->drm, &obj->base, size);
i915_gem_object_init(obj, &i915_gem_lmem_obj_ops, &lock_class);
obj->read_domains = I915_GEM_DOMAIN_WC | I915_GEM_DOMAIN_GTT;
i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);
i915_gem_object_init_memory_region(obj, mem, flags);
return obj;
}