habanalabs: set command buffer host VA dynamically

Set the addresses for userspace command buffer dynamically
instead of hard-coded. There is no reason for it to
be hard-coded.

Signed-off-by: Dafna Hirschfeld <dhirschfeld@habana.ai>
Reviewed-by: Oded Gabbay <ogabbay@kernel.org>
Signed-off-by: Oded Gabbay <ogabbay@kernel.org>
This commit is contained in:
Dafna Hirschfeld 2022-05-23 08:59:19 +03:00 committed by Oded Gabbay
parent 0263256791
commit 262042af13
5 changed files with 18 additions and 18 deletions

View File

@ -12,6 +12,8 @@
#include <linux/slab.h>
#include <linux/uaccess.h>
#define CB_VA_POOL_SIZE (4UL * SZ_1G)
static int cb_map_mem(struct hl_ctx *ctx, struct hl_cb *cb)
{
struct hl_device *hdev = ctx->hdev;
@ -25,7 +27,7 @@ static int cb_map_mem(struct hl_ctx *ctx, struct hl_cb *cb)
if (!hdev->supports_cb_mapping) {
dev_err_ratelimited(hdev->dev,
"Cannot map CB because no VA range is allocated for CB mapping\n");
"Mapping a CB to the device's MMU is not supported\n");
return -EINVAL;
}
@ -566,16 +568,23 @@ int hl_cb_va_pool_init(struct hl_ctx *ctx)
return -ENOMEM;
}
rc = gen_pool_add(ctx->cb_va_pool, prop->cb_va_start_addr,
prop->cb_va_end_addr - prop->cb_va_start_addr, -1);
ctx->cb_va_pool_base = hl_reserve_va_block(hdev, ctx, HL_VA_RANGE_TYPE_HOST,
CB_VA_POOL_SIZE, HL_MMU_VA_ALIGNMENT_NOT_NEEDED);
if (!ctx->cb_va_pool_base) {
rc = -ENOMEM;
goto err_pool_destroy;
}
rc = gen_pool_add(ctx->cb_va_pool, ctx->cb_va_pool_base, CB_VA_POOL_SIZE, -1);
if (rc) {
dev_err(hdev->dev,
"Failed to add memory to VA gen pool for CB mapping\n");
goto err_pool_destroy;
goto err_unreserve_va_block;
}
return 0;
err_unreserve_va_block:
hl_unreserve_va_block(hdev, ctx, ctx->cb_va_pool_base, CB_VA_POOL_SIZE);
err_pool_destroy:
gen_pool_destroy(ctx->cb_va_pool);
@ -590,4 +599,5 @@ void hl_cb_va_pool_fini(struct hl_ctx *ctx)
return;
gen_pool_destroy(ctx->cb_va_pool);
hl_unreserve_va_block(hdev, ctx, ctx->cb_va_pool_base, CB_VA_POOL_SIZE);
}

View File

@ -567,10 +567,6 @@ struct hl_hints_range {
* @tpc_binning_mask: which TPCs are binned. 0 means usable and 1 means binned.
* @dram_enabled_mask: which DRAMs are enabled.
* @dram_binning_mask: which DRAMs are binned. 0 means usable, 1 means binned.
* @cb_va_start_addr: virtual start address of command buffers which are mapped
* to the device's MMU.
* @cb_va_end_addr: virtual end address of command buffers which are mapped to
* the device's MMU.
* @dram_hints_align_mask: dram va hint addresses alignment mask which is used
* for hints validity check.
* @cfg_base_address: config space base address.
@ -713,8 +709,6 @@ struct asic_fixed_properties {
u64 tpc_binning_mask;
u64 dram_enabled_mask;
u64 dram_binning_mask;
u64 cb_va_start_addr;
u64 cb_va_end_addr;
u64 dram_hints_align_mask;
u64 cfg_base_address;
u64 mmu_cache_mng_addr;
@ -1803,6 +1797,7 @@ struct hl_cs_outcome_store {
* @cb_va_pool: device VA pool for command buffers which are mapped to the
* device's MMU.
* @sig_mgr: encaps signals handle manager.
* @cb_va_pool_base: the base address for the device VA pool
* @cs_sequence: sequence number for CS. Value is assigned to a CS and passed
* to user so user could inquire about CS. It is used as
* index to cs_pending array.
@ -1838,6 +1833,7 @@ struct hl_ctx {
struct hl_cs_counters_atomic cs_counters;
struct gen_pool *cb_va_pool;
struct hl_encaps_signals_mgr sig_mgr;
u64 cb_va_pool_base;
u64 cs_sequence;
u64 *dram_default_hops;
spinlock_t cs_lock;
@ -3600,7 +3596,7 @@ void hl_hw_block_mem_init(struct hl_ctx *ctx);
void hl_hw_block_mem_fini(struct hl_ctx *ctx);
u64 hl_reserve_va_block(struct hl_device *hdev, struct hl_ctx *ctx,
enum hl_va_range_type type, u32 size, u32 alignment);
enum hl_va_range_type type, u64 size, u32 alignment);
int hl_unreserve_va_block(struct hl_device *hdev, struct hl_ctx *ctx,
u64 start_addr, u64 size);
int hl_pin_host_memory(struct hl_device *hdev, u64 addr, u64 size,

View File

@ -755,7 +755,7 @@ out:
* - Return the start address of the virtual block.
*/
u64 hl_reserve_va_block(struct hl_device *hdev, struct hl_ctx *ctx,
enum hl_va_range_type type, u32 size, u32 alignment)
enum hl_va_range_type type, u64 size, u32 alignment)
{
return get_va_block(hdev, ctx->va_range[type], size, 0,
max(alignment, ctx->va_range[type]->page_size),

View File

@ -2022,9 +2022,6 @@ static int gaudi2_set_fixed_properties(struct hl_device *hdev)
prop->server_type = HL_SERVER_TYPE_UNKNOWN;
prop->cb_va_start_addr = VA_HOST_SPACE_USER_MAPPED_CB_START;
prop->cb_va_end_addr = VA_HOST_SPACE_USER_MAPPED_CB_END;
prop->max_dec = NUMBER_OF_DEC;
prop->clk_pll_index = HL_GAUDI2_MME_PLL;

View File

@ -139,9 +139,6 @@
#define VA_HOST_SPACE_HPAGE_START 0xFFF0800000000000ull
#define VA_HOST_SPACE_HPAGE_END 0xFFF1000000000000ull /* 140TB */
#define VA_HOST_SPACE_USER_MAPPED_CB_START 0xFFF1000000000000ull
#define VA_HOST_SPACE_USER_MAPPED_CB_END 0xFFF1000100000000ull /* 4GB */
/* 140TB */
#define VA_HOST_SPACE_PAGE_SIZE (VA_HOST_SPACE_PAGE_END - VA_HOST_SPACE_PAGE_START)