habanalabs: refactor MMU locks code

remove mmu_cache_lock as it protects a section which is already
protected by mmu_lock.

in addition, wrap mmu cache invalidate calls in hl_vm_ctx_fini with
mmu_lock.

Signed-off-by: Ohad Sharabi <osharabi@habana.ai>
Reviewed-by: Oded Gabbay <ogabbay@kernel.org>
Signed-off-by: Oded Gabbay <ogabbay@kernel.org>
This commit is contained in:
Ohad Sharabi 2020-11-26 09:39:26 +02:00 committed by Oded Gabbay
parent 4c998836d4
commit cb6ef0ee6d
5 changed files with 4 additions and 20 deletions

View File

@ -373,7 +373,6 @@ static int device_early_init(struct hl_device *hdev)
mutex_init(&hdev->send_cpu_message_lock);
mutex_init(&hdev->debug_lock);
mutex_init(&hdev->mmu_cache_lock);
INIT_LIST_HEAD(&hdev->cs_mirror_list);
spin_lock_init(&hdev->cs_mirror_lock);
INIT_LIST_HEAD(&hdev->fpriv_list);
@ -414,7 +413,6 @@ static void device_early_fini(struct hl_device *hdev)
{
int i;
mutex_destroy(&hdev->mmu_cache_lock);
mutex_destroy(&hdev->debug_lock);
mutex_destroy(&hdev->send_cpu_message_lock);

View File

@ -1764,7 +1764,6 @@ struct hl_mmu_funcs {
* @asic_funcs: ASIC specific functions.
* @asic_specific: ASIC specific information to use only from ASIC files.
* @vm: virtual memory manager for MMU.
* @mmu_cache_lock: protects MMU cache invalidation as it can serve one context.
* @hwmon_dev: H/W monitor device.
* @pm_mng_profile: current power management profile.
* @hl_chip_info: ASIC's sensors information.
@ -1879,7 +1878,6 @@ struct hl_device {
const struct hl_asic_funcs *asic_funcs;
void *asic_specific;
struct hl_vm vm;
struct mutex mmu_cache_lock;
struct device *hwmon_dev;
enum hl_pm_mng_profile pm_mng_profile;
struct hwmon_chip_info *hl_chip_info;

View File

@ -1895,10 +1895,14 @@ void hl_vm_ctx_fini(struct hl_ctx *ctx)
unmap_device_va(ctx, hnode->vaddr, true);
}
mutex_lock(&ctx->mmu_lock);
/* invalidate the cache once after the unmapping loop */
hdev->asic_funcs->mmu_invalidate_cache(hdev, true, VM_TYPE_USERPTR);
hdev->asic_funcs->mmu_invalidate_cache(hdev, true, VM_TYPE_PHYS_PACK);
mutex_unlock(&ctx->mmu_lock);
spin_lock(&vm->idr_lock);
idr_for_each_entry(&vm->phys_pg_pack_handles, phys_pg_list, i)
if (phys_pg_list->asid == ctx->asid) {

View File

@ -7330,8 +7330,6 @@ static int gaudi_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard,
else
timeout_usec = MMU_CONFIG_TIMEOUT_USEC;
mutex_lock(&hdev->mmu_cache_lock);
/* L0 & L1 invalidation */
WREG32(mmSTLB_INV_PS, 3);
WREG32(mmSTLB_CACHE_INV, gaudi->mmu_cache_inv_pi++);
@ -7347,8 +7345,6 @@ static int gaudi_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard,
WREG32(mmSTLB_INV_SET, 0);
mutex_unlock(&hdev->mmu_cache_lock);
if (rc) {
dev_err_ratelimited(hdev->dev,
"MMU cache invalidation timeout\n");
@ -7371,8 +7367,6 @@ static int gaudi_mmu_invalidate_cache_range(struct hl_device *hdev,
hdev->hard_reset_pending)
return 0;
mutex_lock(&hdev->mmu_cache_lock);
if (hdev->pldm)
timeout_usec = GAUDI_PLDM_MMU_TIMEOUT_USEC;
else
@ -7400,8 +7394,6 @@ static int gaudi_mmu_invalidate_cache_range(struct hl_device *hdev,
1000,
timeout_usec);
mutex_unlock(&hdev->mmu_cache_lock);
if (rc) {
dev_err_ratelimited(hdev->dev,
"MMU cache invalidation timeout\n");

View File

@ -5073,8 +5073,6 @@ static int goya_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard,
else
timeout_usec = MMU_CONFIG_TIMEOUT_USEC;
mutex_lock(&hdev->mmu_cache_lock);
/* L0 & L1 invalidation */
WREG32(mmSTLB_INV_ALL_START, 1);
@ -5086,8 +5084,6 @@ static int goya_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard,
1000,
timeout_usec);
mutex_unlock(&hdev->mmu_cache_lock);
if (rc) {
dev_err_ratelimited(hdev->dev,
"MMU cache invalidation timeout\n");
@ -5117,8 +5113,6 @@ static int goya_mmu_invalidate_cache_range(struct hl_device *hdev,
else
timeout_usec = MMU_CONFIG_TIMEOUT_USEC;
mutex_lock(&hdev->mmu_cache_lock);
/*
* TODO: currently invalidate entire L0 & L1 as in regular hard
* invalidation. Need to apply invalidation of specific cache lines with
@ -5141,8 +5135,6 @@ static int goya_mmu_invalidate_cache_range(struct hl_device *hdev,
1000,
timeout_usec);
mutex_unlock(&hdev->mmu_cache_lock);
if (rc) {
dev_err_ratelimited(hdev->dev,
"MMU cache invalidation timeout\n");