forked from Minki/linux
habanalabs: get multiple fences under same cs_lock
To add proper support for wait-for-multi-CS, locking the CS lock for each CS fence in the list is not efficient. Instead, this patch add support to lock the CS lock once to get all required fences. Signed-off-by: Ohad Sharabi <osharabi@habana.ai> Reviewed-by: Oded Gabbay <ogabbay@kernel.org> Signed-off-by: Oded Gabbay <ogabbay@kernel.org>
This commit is contained in:
parent
a6cd2551d7
commit
c457d5abf8
@ -152,8 +152,17 @@ free:
|
||||
|
||||
void hl_fence_put(struct hl_fence *fence)
|
||||
{
|
||||
if (fence)
|
||||
kref_put(&fence->refcount, hl_fence_release);
|
||||
if (IS_ERR_OR_NULL(fence))
|
||||
return;
|
||||
kref_put(&fence->refcount, hl_fence_release);
|
||||
}
|
||||
|
||||
void hl_fences_put(struct hl_fence **fence, int len)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < len; i++, fence++)
|
||||
hl_fence_put(*fence);
|
||||
}
|
||||
|
||||
void hl_fence_get(struct hl_fence *fence)
|
||||
@ -1896,61 +1905,76 @@ out:
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int _hl_cs_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
|
||||
u64 timeout_us, u64 seq,
|
||||
enum hl_cs_wait_status *status, s64 *timestamp)
|
||||
static int hl_wait_for_fence(struct hl_ctx *ctx, u64 seq, struct hl_fence *fence,
|
||||
enum hl_cs_wait_status *status, u64 timeout_us,
|
||||
s64 *timestamp)
|
||||
{
|
||||
struct hl_fence *fence;
|
||||
unsigned long timeout;
|
||||
int rc = 0;
|
||||
struct hl_device *hdev = ctx->hdev;
|
||||
long completion_rc;
|
||||
int rc = 0;
|
||||
|
||||
if (timestamp)
|
||||
*timestamp = 0;
|
||||
|
||||
if (timeout_us == MAX_SCHEDULE_TIMEOUT)
|
||||
timeout = timeout_us;
|
||||
else
|
||||
timeout = usecs_to_jiffies(timeout_us);
|
||||
|
||||
hl_ctx_get(hdev, ctx);
|
||||
|
||||
fence = hl_ctx_get_fence(ctx, seq);
|
||||
if (IS_ERR(fence)) {
|
||||
rc = PTR_ERR(fence);
|
||||
if (rc == -EINVAL)
|
||||
dev_notice_ratelimited(hdev->dev,
|
||||
"Can't wait on CS %llu because current CS is at seq %llu\n",
|
||||
seq, ctx->cs_sequence);
|
||||
} else if (fence) {
|
||||
if (!timeout_us)
|
||||
completion_rc = completion_done(&fence->completion);
|
||||
else
|
||||
completion_rc =
|
||||
wait_for_completion_interruptible_timeout(
|
||||
&fence->completion, timeout);
|
||||
|
||||
if (completion_rc > 0) {
|
||||
*status = CS_WAIT_STATUS_COMPLETED;
|
||||
if (timestamp)
|
||||
*timestamp = ktime_to_ns(fence->timestamp);
|
||||
} else {
|
||||
*status = CS_WAIT_STATUS_BUSY;
|
||||
}
|
||||
|
||||
if (fence->error == -ETIMEDOUT)
|
||||
rc = -ETIMEDOUT;
|
||||
else if (fence->error == -EIO)
|
||||
rc = -EIO;
|
||||
|
||||
hl_fence_put(fence);
|
||||
} else {
|
||||
dev_dbg(hdev->dev,
|
||||
"Can't wait on seq %llu because current CS is at seq %llu (Fence is gone)\n",
|
||||
seq, ctx->cs_sequence);
|
||||
*status = CS_WAIT_STATUS_GONE;
|
||||
return rc;
|
||||
}
|
||||
|
||||
if (!fence) {
|
||||
dev_dbg(hdev->dev,
|
||||
"Can't wait on seq %llu because current CS is at seq %llu (Fence is gone)\n",
|
||||
seq, ctx->cs_sequence);
|
||||
|
||||
*status = CS_WAIT_STATUS_GONE;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!timeout_us) {
|
||||
completion_rc = completion_done(&fence->completion);
|
||||
} else {
|
||||
unsigned long timeout;
|
||||
|
||||
timeout = (timeout_us == MAX_SCHEDULE_TIMEOUT) ?
|
||||
timeout_us : usecs_to_jiffies(timeout_us);
|
||||
completion_rc =
|
||||
wait_for_completion_interruptible_timeout(
|
||||
&fence->completion, timeout);
|
||||
}
|
||||
|
||||
if (completion_rc > 0) {
|
||||
*status = CS_WAIT_STATUS_COMPLETED;
|
||||
if (timestamp)
|
||||
*timestamp = ktime_to_ns(fence->timestamp);
|
||||
} else {
|
||||
*status = CS_WAIT_STATUS_BUSY;
|
||||
}
|
||||
|
||||
if (fence->error == -ETIMEDOUT)
|
||||
rc = -ETIMEDOUT;
|
||||
else if (fence->error == -EIO)
|
||||
rc = -EIO;
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int _hl_cs_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
|
||||
u64 timeout_us, u64 seq,
|
||||
enum hl_cs_wait_status *status, s64 *timestamp)
|
||||
{
|
||||
struct hl_fence *fence;
|
||||
int rc = 0;
|
||||
|
||||
if (timestamp)
|
||||
*timestamp = 0;
|
||||
|
||||
hl_ctx_get(hdev, ctx);
|
||||
|
||||
fence = hl_ctx_get_fence(ctx, seq);
|
||||
|
||||
rc = hl_wait_for_fence(ctx, seq, fence, status, timeout_us, timestamp);
|
||||
hl_fence_put(fence);
|
||||
hl_ctx_put(ctx);
|
||||
|
||||
return rc;
|
||||
|
@ -229,31 +229,66 @@ int hl_ctx_put(struct hl_ctx *ctx)
|
||||
return kref_put(&ctx->refcount, hl_ctx_do_release);
|
||||
}
|
||||
|
||||
struct hl_fence *hl_ctx_get_fence(struct hl_ctx *ctx, u64 seq)
|
||||
/* this function shall be called with cs_lock locked */
|
||||
static struct hl_fence *hl_ctx_get_fence_locked(struct hl_ctx *ctx, u64 seq)
|
||||
{
|
||||
struct asic_fixed_properties *asic_prop = &ctx->hdev->asic_prop;
|
||||
struct hl_fence *fence;
|
||||
|
||||
spin_lock(&ctx->cs_lock);
|
||||
|
||||
if (seq >= ctx->cs_sequence) {
|
||||
spin_unlock(&ctx->cs_lock);
|
||||
if (seq >= ctx->cs_sequence)
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
if (seq + asic_prop->max_pending_cs < ctx->cs_sequence) {
|
||||
spin_unlock(&ctx->cs_lock);
|
||||
if (seq + asic_prop->max_pending_cs < ctx->cs_sequence)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
fence = ctx->cs_pending[seq & (asic_prop->max_pending_cs - 1)];
|
||||
hl_fence_get(fence);
|
||||
return fence;
|
||||
}
|
||||
|
||||
struct hl_fence *hl_ctx_get_fence(struct hl_ctx *ctx, u64 seq)
|
||||
{
|
||||
struct hl_fence *fence;
|
||||
|
||||
spin_lock(&ctx->cs_lock);
|
||||
|
||||
fence = hl_ctx_get_fence_locked(ctx, seq);
|
||||
|
||||
spin_unlock(&ctx->cs_lock);
|
||||
|
||||
return fence;
|
||||
}
|
||||
|
||||
int hl_ctx_get_fences(struct hl_ctx *ctx, u64 *seq_arr,
|
||||
struct hl_fence **fence, u32 arr_len)
|
||||
{
|
||||
struct hl_fence **fence_arr_base = fence;
|
||||
int i, rc = 0;
|
||||
|
||||
spin_lock(&ctx->cs_lock);
|
||||
|
||||
for (i = 0; i < arr_len; i++, fence++) {
|
||||
u64 seq = seq_arr[i];
|
||||
|
||||
*fence = hl_ctx_get_fence_locked(ctx, seq);
|
||||
|
||||
if (IS_ERR(*fence)) {
|
||||
dev_err(ctx->hdev->dev,
|
||||
"Failed to get fence for CS with seq 0x%llx\n",
|
||||
seq);
|
||||
rc = PTR_ERR(*fence);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
spin_unlock(&ctx->cs_lock);
|
||||
|
||||
if (rc)
|
||||
hl_fences_put(fence_arr_base, i);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
/*
|
||||
* hl_ctx_mgr_init - initialize the context manager
|
||||
*
|
||||
|
@ -2645,6 +2645,8 @@ void hl_ctx_do_release(struct kref *ref);
|
||||
void hl_ctx_get(struct hl_device *hdev, struct hl_ctx *ctx);
|
||||
int hl_ctx_put(struct hl_ctx *ctx);
|
||||
struct hl_fence *hl_ctx_get_fence(struct hl_ctx *ctx, u64 seq);
|
||||
int hl_ctx_get_fences(struct hl_ctx *ctx, u64 *seq_arr,
|
||||
struct hl_fence **fence, u32 arr_len);
|
||||
void hl_ctx_mgr_init(struct hl_ctx_mgr *mgr);
|
||||
void hl_ctx_mgr_fini(struct hl_device *hdev, struct hl_ctx_mgr *mgr);
|
||||
|
||||
@ -2692,6 +2694,7 @@ struct hl_cs_job *hl_cs_allocate_job(struct hl_device *hdev,
|
||||
void hl_sob_reset_error(struct kref *ref);
|
||||
int hl_gen_sob_mask(u16 sob_base, u8 sob_mask, u8 *mask);
|
||||
void hl_fence_put(struct hl_fence *fence);
|
||||
void hl_fences_put(struct hl_fence **fence, int len);
|
||||
void hl_fence_get(struct hl_fence *fence);
|
||||
void cs_get(struct hl_cs *cs);
|
||||
bool cs_needs_completion(struct hl_cs *cs);
|
||||
|
Loading…
Reference in New Issue
Block a user