mirror of
https://github.com/torvalds/linux.git
synced 2024-11-21 19:41:42 +00:00
Driver Changes:
- Fix unlock on exec ioctl error path (Matthew Brost) - Fix hibernation on LNL due to ggtt getting lost (Matthew Brost / Matthew Auld) - Fix missing runtime PM in OA release (Ashutosh) -----BEGIN PGP SIGNATURE----- iQJNBAABCAA3FiEE6rM8lpABPHM5FqyDm6KlpjDL6lMFAmc2jg4ZHGx1Y2FzLmRl bWFyY2hpQGludGVsLmNvbQAKCRCboqWmMMvqU/gxD/4xKW6qHJe3S9UwfIR7CPtw myrcxfwHG5ryzZsmGh6mwaFxYdGQ4ibdWAKZGF0NrZUsKig+8ADRRE7PoOyyZDV9 0uZ8lOIMPfMPZ8tHvQHbu7l/8LGXcV/R27ts7Rr9vGr2Ox8/5NTGtzykhUDb2xiO qZS4BPF/uuis9MLJRwoAiI2vwrUfFg16To+1NX3rmtBvzbH4DdY9l7QQV5B+22Rc UDQVRAUTXl3ER7TtwwVsHzuctsWJb/5H+1vM6QNIltWWTXPBA8qVC9092SwIWOoF 7c4Ap440V8AHyjjnzSM0N2rO86L6Mo5QY04gv4y7dywKHT+JW95QNWKPizJJeIHd to62AQdlVDCLQqc4jEpIPEorF5wP/qw8Polwa4eTlsGpGMoUkcRVEXO8JA8Rd7Rt EtHWb+ZFproBYtAvrCUyUMlmaviLK+eMsEaNlt+uQqQP3ZAfPvcItcxtSXn3Jsfw 47v/jvP3OddZgOmPHqo1jfZBqACfKt1trqSg2YYOwfGLaZ+KhgZz7rw8n5WKSmyK XEa5y9stffM/3g9PIvroVQc/INAhS++WhXnGRZ/fcxQrFrKQ7sFMXFUs1OUbgv7j GUVZ4V/BGJy/o58bblSla/NSwiX2bHB4d8grS5rtKMMNe9PxgG3ldn8YG4cFADmP NnR/YsBOf1mjvfdiUUMmxQ== =VWVg -----END PGP SIGNATURE----- Merge tag 'drm-xe-fixes-2024-11-14' of https://gitlab.freedesktop.org/drm/xe/kernel into drm-fixes Driver Changes: - Fix unlock on exec ioctl error path (Matthew Brost) - Fix hibernation on LNL due to ggtt getting lost (Matthew Brost / Matthew Auld) - Fix missing runtime PM in OA release (Ashutosh) Signed-off-by: Dave Airlie <airlied@redhat.com> From: Lucas De Marchi <lucas.demarchi@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/5ntcf2ssmmvo5dsf2mdcee4guwwmpbm3xrlufgt2pdfmznzjo3@62ygo3bxkock
This commit is contained in:
commit
21c1c6c7d7
@ -886,8 +886,8 @@ int xe_bo_evict_pinned(struct xe_bo *bo)
|
|||||||
if (WARN_ON(!xe_bo_is_pinned(bo)))
|
if (WARN_ON(!xe_bo_is_pinned(bo)))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
if (WARN_ON(!xe_bo_is_vram(bo)))
|
if (!xe_bo_is_vram(bo))
|
||||||
return -EINVAL;
|
return 0;
|
||||||
|
|
||||||
ret = ttm_bo_mem_space(&bo->ttm, &placement, &new_mem, &ctx);
|
ret = ttm_bo_mem_space(&bo->ttm, &placement, &new_mem, &ctx);
|
||||||
if (ret)
|
if (ret)
|
||||||
@ -937,6 +937,7 @@ int xe_bo_restore_pinned(struct xe_bo *bo)
|
|||||||
.interruptible = false,
|
.interruptible = false,
|
||||||
};
|
};
|
||||||
struct ttm_resource *new_mem;
|
struct ttm_resource *new_mem;
|
||||||
|
struct ttm_place *place = &bo->placements[0];
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
xe_bo_assert_held(bo);
|
xe_bo_assert_held(bo);
|
||||||
@ -947,9 +948,15 @@ int xe_bo_restore_pinned(struct xe_bo *bo)
|
|||||||
if (WARN_ON(!xe_bo_is_pinned(bo)))
|
if (WARN_ON(!xe_bo_is_pinned(bo)))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
if (WARN_ON(xe_bo_is_vram(bo) || !bo->ttm.ttm))
|
if (WARN_ON(xe_bo_is_vram(bo)))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (WARN_ON(!bo->ttm.ttm && !xe_bo_is_stolen(bo)))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (!mem_type_is_vram(place->mem_type))
|
||||||
|
return 0;
|
||||||
|
|
||||||
ret = ttm_bo_mem_space(&bo->ttm, &bo->placement, &new_mem, &ctx);
|
ret = ttm_bo_mem_space(&bo->ttm, &bo->placement, &new_mem, &ctx);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
@ -1719,6 +1726,7 @@ int xe_bo_pin_external(struct xe_bo *bo)
|
|||||||
|
|
||||||
int xe_bo_pin(struct xe_bo *bo)
|
int xe_bo_pin(struct xe_bo *bo)
|
||||||
{
|
{
|
||||||
|
struct ttm_place *place = &bo->placements[0];
|
||||||
struct xe_device *xe = xe_bo_device(bo);
|
struct xe_device *xe = xe_bo_device(bo);
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
@ -1749,21 +1757,21 @@ int xe_bo_pin(struct xe_bo *bo)
|
|||||||
*/
|
*/
|
||||||
if (IS_DGFX(xe) && !(IS_ENABLED(CONFIG_DRM_XE_DEBUG) &&
|
if (IS_DGFX(xe) && !(IS_ENABLED(CONFIG_DRM_XE_DEBUG) &&
|
||||||
bo->flags & XE_BO_FLAG_INTERNAL_TEST)) {
|
bo->flags & XE_BO_FLAG_INTERNAL_TEST)) {
|
||||||
struct ttm_place *place = &(bo->placements[0]);
|
|
||||||
|
|
||||||
if (mem_type_is_vram(place->mem_type)) {
|
if (mem_type_is_vram(place->mem_type)) {
|
||||||
xe_assert(xe, place->flags & TTM_PL_FLAG_CONTIGUOUS);
|
xe_assert(xe, place->flags & TTM_PL_FLAG_CONTIGUOUS);
|
||||||
|
|
||||||
place->fpfn = (xe_bo_addr(bo, 0, PAGE_SIZE) -
|
place->fpfn = (xe_bo_addr(bo, 0, PAGE_SIZE) -
|
||||||
vram_region_gpu_offset(bo->ttm.resource)) >> PAGE_SHIFT;
|
vram_region_gpu_offset(bo->ttm.resource)) >> PAGE_SHIFT;
|
||||||
place->lpfn = place->fpfn + (bo->size >> PAGE_SHIFT);
|
place->lpfn = place->fpfn + (bo->size >> PAGE_SHIFT);
|
||||||
|
|
||||||
spin_lock(&xe->pinned.lock);
|
|
||||||
list_add_tail(&bo->pinned_link, &xe->pinned.kernel_bo_present);
|
|
||||||
spin_unlock(&xe->pinned.lock);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (mem_type_is_vram(place->mem_type) || bo->flags & XE_BO_FLAG_GGTT) {
|
||||||
|
spin_lock(&xe->pinned.lock);
|
||||||
|
list_add_tail(&bo->pinned_link, &xe->pinned.kernel_bo_present);
|
||||||
|
spin_unlock(&xe->pinned.lock);
|
||||||
|
}
|
||||||
|
|
||||||
ttm_bo_pin(&bo->ttm);
|
ttm_bo_pin(&bo->ttm);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -1809,23 +1817,18 @@ void xe_bo_unpin_external(struct xe_bo *bo)
|
|||||||
|
|
||||||
void xe_bo_unpin(struct xe_bo *bo)
|
void xe_bo_unpin(struct xe_bo *bo)
|
||||||
{
|
{
|
||||||
|
struct ttm_place *place = &bo->placements[0];
|
||||||
struct xe_device *xe = xe_bo_device(bo);
|
struct xe_device *xe = xe_bo_device(bo);
|
||||||
|
|
||||||
xe_assert(xe, !bo->ttm.base.import_attach);
|
xe_assert(xe, !bo->ttm.base.import_attach);
|
||||||
xe_assert(xe, xe_bo_is_pinned(bo));
|
xe_assert(xe, xe_bo_is_pinned(bo));
|
||||||
|
|
||||||
if (IS_DGFX(xe) && !(IS_ENABLED(CONFIG_DRM_XE_DEBUG) &&
|
if (mem_type_is_vram(place->mem_type) || bo->flags & XE_BO_FLAG_GGTT) {
|
||||||
bo->flags & XE_BO_FLAG_INTERNAL_TEST)) {
|
spin_lock(&xe->pinned.lock);
|
||||||
struct ttm_place *place = &(bo->placements[0]);
|
xe_assert(xe, !list_empty(&bo->pinned_link));
|
||||||
|
list_del_init(&bo->pinned_link);
|
||||||
if (mem_type_is_vram(place->mem_type)) {
|
spin_unlock(&xe->pinned.lock);
|
||||||
spin_lock(&xe->pinned.lock);
|
|
||||||
xe_assert(xe, !list_empty(&bo->pinned_link));
|
|
||||||
list_del_init(&bo->pinned_link);
|
|
||||||
spin_unlock(&xe->pinned.lock);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ttm_bo_unpin(&bo->ttm);
|
ttm_bo_unpin(&bo->ttm);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -34,14 +34,22 @@ int xe_bo_evict_all(struct xe_device *xe)
|
|||||||
u8 id;
|
u8 id;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (!IS_DGFX(xe))
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
/* User memory */
|
/* User memory */
|
||||||
for (mem_type = XE_PL_VRAM0; mem_type <= XE_PL_VRAM1; ++mem_type) {
|
for (mem_type = XE_PL_TT; mem_type <= XE_PL_VRAM1; ++mem_type) {
|
||||||
struct ttm_resource_manager *man =
|
struct ttm_resource_manager *man =
|
||||||
ttm_manager_type(bdev, mem_type);
|
ttm_manager_type(bdev, mem_type);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* On igpu platforms with flat CCS we need to ensure we save and restore any CCS
|
||||||
|
* state since this state lives inside graphics stolen memory which doesn't survive
|
||||||
|
* hibernation.
|
||||||
|
*
|
||||||
|
* This can be further improved by only evicting objects that we know have actually
|
||||||
|
* used a compression enabled PAT index.
|
||||||
|
*/
|
||||||
|
if (mem_type == XE_PL_TT && (IS_DGFX(xe) || !xe_device_has_flat_ccs(xe)))
|
||||||
|
continue;
|
||||||
|
|
||||||
if (man) {
|
if (man) {
|
||||||
ret = ttm_resource_manager_evict_all(bdev, man);
|
ret = ttm_resource_manager_evict_all(bdev, man);
|
||||||
if (ret)
|
if (ret)
|
||||||
@ -125,9 +133,6 @@ int xe_bo_restore_kernel(struct xe_device *xe)
|
|||||||
struct xe_bo *bo;
|
struct xe_bo *bo;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (!IS_DGFX(xe))
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
spin_lock(&xe->pinned.lock);
|
spin_lock(&xe->pinned.lock);
|
||||||
for (;;) {
|
for (;;) {
|
||||||
bo = list_first_entry_or_null(&xe->pinned.evicted,
|
bo = list_first_entry_or_null(&xe->pinned.evicted,
|
||||||
@ -159,7 +164,6 @@ int xe_bo_restore_kernel(struct xe_device *xe)
|
|||||||
* should setup the iosys map.
|
* should setup the iosys map.
|
||||||
*/
|
*/
|
||||||
xe_assert(xe, !iosys_map_is_null(&bo->vmap));
|
xe_assert(xe, !iosys_map_is_null(&bo->vmap));
|
||||||
xe_assert(xe, xe_bo_is_vram(bo));
|
|
||||||
|
|
||||||
xe_bo_put(bo);
|
xe_bo_put(bo);
|
||||||
|
|
||||||
|
@ -203,14 +203,14 @@ retry:
|
|||||||
write_locked = false;
|
write_locked = false;
|
||||||
}
|
}
|
||||||
if (err)
|
if (err)
|
||||||
goto err_syncs;
|
goto err_hw_exec_mode;
|
||||||
|
|
||||||
if (write_locked) {
|
if (write_locked) {
|
||||||
err = xe_vm_userptr_pin(vm);
|
err = xe_vm_userptr_pin(vm);
|
||||||
downgrade_write(&vm->lock);
|
downgrade_write(&vm->lock);
|
||||||
write_locked = false;
|
write_locked = false;
|
||||||
if (err)
|
if (err)
|
||||||
goto err_hw_exec_mode;
|
goto err_unlock_list;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!args->num_batch_buffer) {
|
if (!args->num_batch_buffer) {
|
||||||
|
@ -1206,9 +1206,11 @@ static int xe_oa_release(struct inode *inode, struct file *file)
|
|||||||
struct xe_oa_stream *stream = file->private_data;
|
struct xe_oa_stream *stream = file->private_data;
|
||||||
struct xe_gt *gt = stream->gt;
|
struct xe_gt *gt = stream->gt;
|
||||||
|
|
||||||
|
xe_pm_runtime_get(gt_to_xe(gt));
|
||||||
mutex_lock(>->oa.gt_lock);
|
mutex_lock(>->oa.gt_lock);
|
||||||
xe_oa_destroy_locked(stream);
|
xe_oa_destroy_locked(stream);
|
||||||
mutex_unlock(>->oa.gt_lock);
|
mutex_unlock(>->oa.gt_lock);
|
||||||
|
xe_pm_runtime_put(gt_to_xe(gt));
|
||||||
|
|
||||||
/* Release the reference the OA stream kept on the driver */
|
/* Release the reference the OA stream kept on the driver */
|
||||||
drm_dev_put(>_to_xe(gt)->drm);
|
drm_dev_put(>_to_xe(gt)->drm);
|
||||||
|
Loading…
Reference in New Issue
Block a user