forked from Minki/linux
habanalabs: fix double unlock on error in map_device_va()
If hl_mmu_prefetch_cache_range() fails then this code calls
mutex_unlock(&ctx->mmu_lock) when it's no longer holding the mutex.
Fixes: 9e495e2400
("habanalabs: do MMU prefetch as deferred work")
Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
Reviewed-by: Oded Gabbay <ogabbay@kernel.org>
Signed-off-by: Oded Gabbay <ogabbay@kernel.org>
This commit is contained in:
parent
7e724422a7
commit
a43a9f6777
@ -1245,16 +1245,16 @@ static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args, u64 *device
|
||||
rc = map_phys_pg_pack(ctx, ret_vaddr, phys_pg_pack);
|
||||
if (rc) {
|
||||
dev_err(hdev->dev, "mapping page pack failed for handle %u\n", handle);
|
||||
mutex_unlock(&ctx->mmu_lock);
|
||||
goto map_err;
|
||||
}
|
||||
|
||||
rc = hl_mmu_invalidate_cache_range(hdev, false, *vm_type | MMU_OP_SKIP_LOW_CACHE_INV,
|
||||
ctx->asid, ret_vaddr, phys_pg_pack->total_size);
|
||||
mutex_unlock(&ctx->mmu_lock);
|
||||
if (rc)
|
||||
goto map_err;
|
||||
|
||||
mutex_unlock(&ctx->mmu_lock);
|
||||
|
||||
/*
|
||||
* prefetch is done upon user's request. it is performed in WQ as and so can
|
||||
* be outside the MMU lock. the operation itself is already protected by the mmu lock
|
||||
@ -1283,8 +1283,6 @@ static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args, u64 *device
|
||||
return rc;
|
||||
|
||||
map_err:
|
||||
mutex_unlock(&ctx->mmu_lock);
|
||||
|
||||
if (add_va_block(hdev, va_range, ret_vaddr,
|
||||
ret_vaddr + phys_pg_pack->total_size - 1))
|
||||
dev_warn(hdev->dev,
|
||||
|
Loading…
Reference in New Issue
Block a user