forked from Minki/linux
habanalabs: Rename hw_queues_mirror to cs_mirror
Future command submission types might be submitted to HW not via the QMAN queues path. However, it would be still required to have the TDR mechanism for these CS, and thus the patch renames the TDR fields and replaces the hw_queues_ prefix with cs_. Signed-off-by: Tomer Tayar <ttayar@habana.ai> Reviewed-by: Oded Gabbay <ogabbay@kernel.org> Signed-off-by: Oded Gabbay <ogabbay@kernel.org>
This commit is contained in:
parent
784b916dad
commit
804a72276c
@ -343,25 +343,24 @@ static void cs_do_release(struct kref *ref)
|
||||
/* Need to update CI for internal queues */
|
||||
hl_int_hw_queue_update_ci(cs);
|
||||
|
||||
spin_lock(&hdev->hw_queues_mirror_lock);
|
||||
/* remove CS from hw_queues mirror list */
|
||||
/* remove CS from CS mirror list */
|
||||
spin_lock(&hdev->cs_mirror_lock);
|
||||
list_del_init(&cs->mirror_node);
|
||||
spin_unlock(&hdev->hw_queues_mirror_lock);
|
||||
spin_unlock(&hdev->cs_mirror_lock);
|
||||
|
||||
/* Don't cancel TDR in case this CS was timedout because we might be
|
||||
* running from the TDR context
|
||||
*/
|
||||
if (!cs->timedout &&
|
||||
hdev->timeout_jiffies != MAX_SCHEDULE_TIMEOUT) {
|
||||
if (!cs->timedout && hdev->timeout_jiffies != MAX_SCHEDULE_TIMEOUT) {
|
||||
struct hl_cs *next;
|
||||
|
||||
if (cs->tdr_active)
|
||||
cancel_delayed_work_sync(&cs->work_tdr);
|
||||
|
||||
spin_lock(&hdev->hw_queues_mirror_lock);
|
||||
spin_lock(&hdev->cs_mirror_lock);
|
||||
|
||||
/* queue TDR for next CS */
|
||||
next = list_first_entry_or_null(&hdev->hw_queues_mirror_list,
|
||||
next = list_first_entry_or_null(&hdev->cs_mirror_list,
|
||||
struct hl_cs, mirror_node);
|
||||
|
||||
if (next && !next->tdr_active) {
|
||||
@ -370,7 +369,7 @@ static void cs_do_release(struct kref *ref)
|
||||
hdev->timeout_jiffies);
|
||||
}
|
||||
|
||||
spin_unlock(&hdev->hw_queues_mirror_lock);
|
||||
spin_unlock(&hdev->cs_mirror_lock);
|
||||
}
|
||||
|
||||
out:
|
||||
@ -534,8 +533,7 @@ void hl_cs_rollback_all(struct hl_device *hdev)
|
||||
flush_workqueue(hdev->cq_wq[i]);
|
||||
|
||||
/* Make sure we don't have leftovers in the H/W queues mirror list */
|
||||
list_for_each_entry_safe(cs, tmp, &hdev->hw_queues_mirror_list,
|
||||
mirror_node) {
|
||||
list_for_each_entry_safe(cs, tmp, &hdev->cs_mirror_list, mirror_node) {
|
||||
cs_get(cs);
|
||||
cs->aborted = true;
|
||||
dev_warn_ratelimited(hdev->dev, "Killing CS %d.%llu\n",
|
||||
|
@ -343,8 +343,8 @@ static int device_early_init(struct hl_device *hdev)
|
||||
mutex_init(&hdev->send_cpu_message_lock);
|
||||
mutex_init(&hdev->debug_lock);
|
||||
mutex_init(&hdev->mmu_cache_lock);
|
||||
INIT_LIST_HEAD(&hdev->hw_queues_mirror_list);
|
||||
spin_lock_init(&hdev->hw_queues_mirror_lock);
|
||||
INIT_LIST_HEAD(&hdev->cs_mirror_list);
|
||||
spin_lock_init(&hdev->cs_mirror_lock);
|
||||
INIT_LIST_HEAD(&hdev->fpriv_list);
|
||||
mutex_init(&hdev->fpriv_list_lock);
|
||||
atomic_set(&hdev->in_reset, 0);
|
||||
|
@ -1699,8 +1699,8 @@ struct hl_mmu_funcs {
|
||||
* @eq_wq: work queue of event queue for executing work in process context.
|
||||
* @kernel_ctx: Kernel driver context structure.
|
||||
* @kernel_queues: array of hl_hw_queue.
|
||||
* @hw_queues_mirror_list: CS mirror list for TDR.
|
||||
* @hw_queues_mirror_lock: protects hw_queues_mirror_list.
|
||||
* @cs_mirror_list: CS mirror list for TDR.
|
||||
* @cs_mirror_lock: protects cs_mirror_list.
|
||||
* @kernel_cb_mgr: command buffer manager for creating/destroying/handling CGs.
|
||||
* @event_queue: event queue for IRQ from CPU-CP.
|
||||
* @dma_pool: DMA pool for small allocations.
|
||||
@ -1810,8 +1810,8 @@ struct hl_device {
|
||||
struct workqueue_struct *eq_wq;
|
||||
struct hl_ctx *kernel_ctx;
|
||||
struct hl_hw_queue *kernel_queues;
|
||||
struct list_head hw_queues_mirror_list;
|
||||
spinlock_t hw_queues_mirror_lock;
|
||||
struct list_head cs_mirror_list;
|
||||
spinlock_t cs_mirror_lock;
|
||||
struct hl_cb_mgr kernel_cb_mgr;
|
||||
struct hl_eq event_queue;
|
||||
struct dma_pool *dma_pool;
|
||||
|
@ -578,20 +578,20 @@ int hl_hw_queue_schedule_cs(struct hl_cs *cs)
|
||||
else if (cs->type == CS_TYPE_COLLECTIVE_WAIT)
|
||||
hdev->asic_funcs->collective_wait_init_cs(cs);
|
||||
|
||||
spin_lock(&hdev->hw_queues_mirror_lock);
|
||||
list_add_tail(&cs->mirror_node, &hdev->hw_queues_mirror_list);
|
||||
spin_lock(&hdev->cs_mirror_lock);
|
||||
list_add_tail(&cs->mirror_node, &hdev->cs_mirror_list);
|
||||
|
||||
/* Queue TDR if the CS is the first entry and if timeout is wanted */
|
||||
if ((hdev->timeout_jiffies != MAX_SCHEDULE_TIMEOUT) &&
|
||||
(list_first_entry(&hdev->hw_queues_mirror_list,
|
||||
(list_first_entry(&hdev->cs_mirror_list,
|
||||
struct hl_cs, mirror_node) == cs)) {
|
||||
cs->tdr_active = true;
|
||||
schedule_delayed_work(&cs->work_tdr, hdev->timeout_jiffies);
|
||||
spin_unlock(&hdev->hw_queues_mirror_lock);
|
||||
} else {
|
||||
spin_unlock(&hdev->hw_queues_mirror_lock);
|
||||
|
||||
}
|
||||
|
||||
spin_unlock(&hdev->cs_mirror_lock);
|
||||
|
||||
if (!hdev->cs_active_cnt++) {
|
||||
struct hl_device_idle_busy_ts *ts;
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user