drm/i915: Use to_gt() helper for GGTT accesses
GGTT is currently available both through i915->ggtt and gt->ggtt, and we eventually want to get rid of the i915->ggtt one. Use to_gt() for all i915->ggtt accesses to help with the future refactoring. Signed-off-by: Michał Winiarski <michal.winiarski@intel.com> Cc: Michal Wajdeczko <michal.wajdeczko@intel.com> Signed-off-by: Andi Shyti <andi.shyti@linux.intel.com> Reviewed-by: Matt Roper <matthew.d.roper@intel.com> Signed-off-by: Matt Roper <matthew.d.roper@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20220104223550.56135-1-andi.shyti@linux.intel.com
This commit is contained in:
committed by
Matt Roper
parent
848915c35e
commit
204129a211
@@ -84,7 +84,7 @@ static int vgpu_gem_get_pages(
|
|||||||
kfree(st);
|
kfree(st);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
gtt_entries = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm +
|
gtt_entries = (gen8_pte_t __iomem *)to_gt(dev_priv)->ggtt->gsm +
|
||||||
(fb_info->start >> PAGE_SHIFT);
|
(fb_info->start >> PAGE_SHIFT);
|
||||||
for_each_sg(st->sgl, sg, page_num, i) {
|
for_each_sg(st->sgl, sg, page_num, i) {
|
||||||
dma_addr_t dma_addr =
|
dma_addr_t dma_addr =
|
||||||
|
|||||||
@@ -391,9 +391,9 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
|
|||||||
intel_wakeref_t wakeref;
|
intel_wakeref_t wakeref;
|
||||||
|
|
||||||
seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
|
seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
|
||||||
swizzle_string(dev_priv->ggtt.bit_6_swizzle_x));
|
swizzle_string(to_gt(dev_priv)->ggtt->bit_6_swizzle_x));
|
||||||
seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
|
seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
|
||||||
swizzle_string(dev_priv->ggtt.bit_6_swizzle_y));
|
swizzle_string(to_gt(dev_priv)->ggtt->bit_6_swizzle_y));
|
||||||
|
|
||||||
if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
|
if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
|
||||||
seq_puts(m, "L-shaped memory detected\n");
|
seq_puts(m, "L-shaped memory detected\n");
|
||||||
|
|||||||
@@ -1142,7 +1142,7 @@ static int i915_drm_suspend(struct drm_device *dev)
|
|||||||
|
|
||||||
intel_suspend_hw(dev_priv);
|
intel_suspend_hw(dev_priv);
|
||||||
|
|
||||||
i915_ggtt_suspend(&dev_priv->ggtt);
|
i915_ggtt_suspend(to_gt(dev_priv)->ggtt);
|
||||||
|
|
||||||
i915_save_display(dev_priv);
|
i915_save_display(dev_priv);
|
||||||
|
|
||||||
@@ -1257,7 +1257,7 @@ static int i915_drm_resume(struct drm_device *dev)
|
|||||||
if (ret)
|
if (ret)
|
||||||
drm_err(&dev_priv->drm, "failed to re-enable GGTT\n");
|
drm_err(&dev_priv->drm, "failed to re-enable GGTT\n");
|
||||||
|
|
||||||
i915_ggtt_resume(&dev_priv->ggtt);
|
i915_ggtt_resume(to_gt(dev_priv)->ggtt);
|
||||||
|
|
||||||
intel_dmc_ucode_resume(dev_priv);
|
intel_dmc_ucode_resume(dev_priv);
|
||||||
|
|
||||||
|
|||||||
@@ -1958,7 +1958,7 @@ static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_objec
|
|||||||
{
|
{
|
||||||
struct drm_i915_private *i915 = to_i915(obj->base.dev);
|
struct drm_i915_private *i915 = to_i915(obj->base.dev);
|
||||||
|
|
||||||
return i915->ggtt.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
|
return to_gt(i915)->ggtt->bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
|
||||||
i915_gem_object_is_tiled(obj);
|
i915_gem_object_is_tiled(obj);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -88,7 +88,8 @@ int
|
|||||||
i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
|
i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
|
||||||
struct drm_file *file)
|
struct drm_file *file)
|
||||||
{
|
{
|
||||||
struct i915_ggtt *ggtt = &to_i915(dev)->ggtt;
|
struct drm_i915_private *i915 = to_i915(dev);
|
||||||
|
struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
|
||||||
struct drm_i915_gem_get_aperture *args = data;
|
struct drm_i915_gem_get_aperture *args = data;
|
||||||
struct i915_vma *vma;
|
struct i915_vma *vma;
|
||||||
u64 pinned;
|
u64 pinned;
|
||||||
@@ -289,7 +290,7 @@ static struct i915_vma *i915_gem_gtt_prepare(struct drm_i915_gem_object *obj,
|
|||||||
bool write)
|
bool write)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *i915 = to_i915(obj->base.dev);
|
struct drm_i915_private *i915 = to_i915(obj->base.dev);
|
||||||
struct i915_ggtt *ggtt = &i915->ggtt;
|
struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
|
||||||
struct i915_vma *vma;
|
struct i915_vma *vma;
|
||||||
struct i915_gem_ww_ctx ww;
|
struct i915_gem_ww_ctx ww;
|
||||||
int ret;
|
int ret;
|
||||||
@@ -350,7 +351,7 @@ static void i915_gem_gtt_cleanup(struct drm_i915_gem_object *obj,
|
|||||||
struct i915_vma *vma)
|
struct i915_vma *vma)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *i915 = to_i915(obj->base.dev);
|
struct drm_i915_private *i915 = to_i915(obj->base.dev);
|
||||||
struct i915_ggtt *ggtt = &i915->ggtt;
|
struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
|
||||||
|
|
||||||
i915_gem_object_unpin_pages(obj);
|
i915_gem_object_unpin_pages(obj);
|
||||||
if (drm_mm_node_allocated(node)) {
|
if (drm_mm_node_allocated(node)) {
|
||||||
@@ -366,7 +367,7 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
|
|||||||
const struct drm_i915_gem_pread *args)
|
const struct drm_i915_gem_pread *args)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *i915 = to_i915(obj->base.dev);
|
struct drm_i915_private *i915 = to_i915(obj->base.dev);
|
||||||
struct i915_ggtt *ggtt = &i915->ggtt;
|
struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
|
||||||
intel_wakeref_t wakeref;
|
intel_wakeref_t wakeref;
|
||||||
struct drm_mm_node node;
|
struct drm_mm_node node;
|
||||||
void __user *user_data;
|
void __user *user_data;
|
||||||
@@ -522,7 +523,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
|
|||||||
const struct drm_i915_gem_pwrite *args)
|
const struct drm_i915_gem_pwrite *args)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *i915 = to_i915(obj->base.dev);
|
struct drm_i915_private *i915 = to_i915(obj->base.dev);
|
||||||
struct i915_ggtt *ggtt = &i915->ggtt;
|
struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
|
||||||
struct intel_runtime_pm *rpm = &i915->runtime_pm;
|
struct intel_runtime_pm *rpm = &i915->runtime_pm;
|
||||||
intel_wakeref_t wakeref;
|
intel_wakeref_t wakeref;
|
||||||
struct drm_mm_node node;
|
struct drm_mm_node node;
|
||||||
@@ -823,7 +824,7 @@ void i915_gem_runtime_suspend(struct drm_i915_private *i915)
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
list_for_each_entry_safe(obj, on,
|
list_for_each_entry_safe(obj, on,
|
||||||
&i915->ggtt.userfault_list, userfault_link)
|
&to_gt(i915)->ggtt->userfault_list, userfault_link)
|
||||||
__i915_gem_object_release_mmap_gtt(obj);
|
__i915_gem_object_release_mmap_gtt(obj);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -831,8 +832,8 @@ void i915_gem_runtime_suspend(struct drm_i915_private *i915)
|
|||||||
* in use by hardware (i.e. they are pinned), we should not be powering
|
* in use by hardware (i.e. they are pinned), we should not be powering
|
||||||
* down! All other fences will be reacquired by the user upon waking.
|
* down! All other fences will be reacquired by the user upon waking.
|
||||||
*/
|
*/
|
||||||
for (i = 0; i < i915->ggtt.num_fences; i++) {
|
for (i = 0; i < to_gt(i915)->ggtt->num_fences; i++) {
|
||||||
struct i915_fence_reg *reg = &i915->ggtt.fence_regs[i];
|
struct i915_fence_reg *reg = &to_gt(i915)->ggtt->fence_regs[i];
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Ideally we want to assert that the fence register is not
|
* Ideally we want to assert that the fence register is not
|
||||||
@@ -873,7 +874,7 @@ i915_gem_object_ggtt_pin_ww(struct drm_i915_gem_object *obj,
|
|||||||
u64 size, u64 alignment, u64 flags)
|
u64 size, u64 alignment, u64 flags)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *i915 = to_i915(obj->base.dev);
|
struct drm_i915_private *i915 = to_i915(obj->base.dev);
|
||||||
struct i915_ggtt *ggtt = &i915->ggtt;
|
struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
|
||||||
struct i915_vma *vma;
|
struct i915_vma *vma;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
@@ -1123,7 +1124,7 @@ err_unlock:
|
|||||||
|
|
||||||
/* Minimal basic recovery for KMS */
|
/* Minimal basic recovery for KMS */
|
||||||
ret = i915_ggtt_enable_hw(dev_priv);
|
ret = i915_ggtt_enable_hw(dev_priv);
|
||||||
i915_ggtt_resume(&dev_priv->ggtt);
|
i915_ggtt_resume(to_gt(dev_priv)->ggtt);
|
||||||
intel_init_clock_gating(dev_priv);
|
intel_init_clock_gating(dev_priv);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1146,7 +1147,7 @@ void i915_gem_driver_unregister(struct drm_i915_private *i915)
|
|||||||
|
|
||||||
void i915_gem_driver_remove(struct drm_i915_private *dev_priv)
|
void i915_gem_driver_remove(struct drm_i915_private *dev_priv)
|
||||||
{
|
{
|
||||||
intel_wakeref_auto_fini(&dev_priv->ggtt.userfault_wakeref);
|
intel_wakeref_auto_fini(&to_gt(dev_priv)->ggtt->userfault_wakeref);
|
||||||
|
|
||||||
i915_gem_suspend_late(dev_priv);
|
i915_gem_suspend_late(dev_priv);
|
||||||
intel_gt_driver_remove(to_gt(dev_priv));
|
intel_gt_driver_remove(to_gt(dev_priv));
|
||||||
|
|||||||
@@ -56,7 +56,7 @@ void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
|
|||||||
struct sg_table *pages)
|
struct sg_table *pages)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *i915 = to_i915(obj->base.dev);
|
struct drm_i915_private *i915 = to_i915(obj->base.dev);
|
||||||
struct i915_ggtt *ggtt = &i915->ggtt;
|
struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
|
||||||
|
|
||||||
/* XXX This does not prevent more requests being submitted! */
|
/* XXX This does not prevent more requests being submitted! */
|
||||||
if (unlikely(ggtt->do_idle_maps))
|
if (unlikely(ggtt->do_idle_maps))
|
||||||
@@ -103,7 +103,7 @@ int i915_gem_gtt_reserve(struct i915_address_space *vm,
|
|||||||
GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
|
GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
|
||||||
GEM_BUG_ON(!IS_ALIGNED(offset, I915_GTT_MIN_ALIGNMENT));
|
GEM_BUG_ON(!IS_ALIGNED(offset, I915_GTT_MIN_ALIGNMENT));
|
||||||
GEM_BUG_ON(range_overflows(offset, size, vm->total));
|
GEM_BUG_ON(range_overflows(offset, size, vm->total));
|
||||||
GEM_BUG_ON(vm == &vm->i915->ggtt.alias->vm);
|
GEM_BUG_ON(vm == &to_gt(vm->i915)->ggtt->alias->vm);
|
||||||
GEM_BUG_ON(drm_mm_node_allocated(node));
|
GEM_BUG_ON(drm_mm_node_allocated(node));
|
||||||
|
|
||||||
node->size = size;
|
node->size = size;
|
||||||
@@ -201,7 +201,7 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
|
|||||||
GEM_BUG_ON(start >= end);
|
GEM_BUG_ON(start >= end);
|
||||||
GEM_BUG_ON(start > 0 && !IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
|
GEM_BUG_ON(start > 0 && !IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
|
||||||
GEM_BUG_ON(end < U64_MAX && !IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
|
GEM_BUG_ON(end < U64_MAX && !IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
|
||||||
GEM_BUG_ON(vm == &vm->i915->ggtt.alias->vm);
|
GEM_BUG_ON(vm == &to_gt(vm->i915)->ggtt->alias->vm);
|
||||||
GEM_BUG_ON(drm_mm_node_allocated(node));
|
GEM_BUG_ON(drm_mm_node_allocated(node));
|
||||||
|
|
||||||
if (unlikely(range_overflows(start, size, end)))
|
if (unlikely(range_overflows(start, size, end)))
|
||||||
|
|||||||
@@ -31,7 +31,7 @@ int i915_getparam_ioctl(struct drm_device *dev, void *data,
|
|||||||
value = pdev->revision;
|
value = pdev->revision;
|
||||||
break;
|
break;
|
||||||
case I915_PARAM_NUM_FENCES_AVAIL:
|
case I915_PARAM_NUM_FENCES_AVAIL:
|
||||||
value = i915->ggtt.num_fences;
|
value = to_gt(i915)->ggtt->num_fences;
|
||||||
break;
|
break;
|
||||||
case I915_PARAM_HAS_OVERLAY:
|
case I915_PARAM_HAS_OVERLAY:
|
||||||
value = !!i915->overlay;
|
value = !!i915->overlay;
|
||||||
|
|||||||
@@ -1630,8 +1630,8 @@ static int alloc_noa_wait(struct i915_perf_stream *stream)
|
|||||||
struct drm_i915_gem_object *bo;
|
struct drm_i915_gem_object *bo;
|
||||||
struct i915_vma *vma;
|
struct i915_vma *vma;
|
||||||
const u64 delay_ticks = 0xffffffffffffffff -
|
const u64 delay_ticks = 0xffffffffffffffff -
|
||||||
intel_gt_ns_to_clock_interval(stream->perf->i915->ggtt.vm.gt,
|
intel_gt_ns_to_clock_interval(to_gt(stream->perf->i915),
|
||||||
atomic64_read(&stream->perf->noa_programming_delay));
|
atomic64_read(&stream->perf->noa_programming_delay));
|
||||||
const u32 base = stream->engine->mmio_base;
|
const u32 base = stream->engine->mmio_base;
|
||||||
#define CS_GPR(x) GEN8_RING_CS_GPR(base, x)
|
#define CS_GPR(x) GEN8_RING_CS_GPR(base, x)
|
||||||
u32 *batch, *ts0, *cs, *jump;
|
u32 *batch, *ts0, *cs, *jump;
|
||||||
@@ -3542,7 +3542,7 @@ err:
|
|||||||
|
|
||||||
static u64 oa_exponent_to_ns(struct i915_perf *perf, int exponent)
|
static u64 oa_exponent_to_ns(struct i915_perf *perf, int exponent)
|
||||||
{
|
{
|
||||||
return intel_gt_clock_interval_to_ns(perf->i915->ggtt.vm.gt,
|
return intel_gt_clock_interval_to_ns(to_gt(perf->i915),
|
||||||
2ULL << exponent);
|
2ULL << exponent);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user