2017-12-06 16:49:39 +00:00
|
|
|
/*
|
|
|
|
* Copyright 2015 Advanced Micro Devices, Inc.
|
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
|
|
* copy of this software and associated documentation files (the "Software"),
|
|
|
|
* to deal in the Software without restriction, including without limitation
|
|
|
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
|
|
* and/or sell copies of the Software, and to permit persons to whom the
|
|
|
|
* Software is furnished to do so, subject to the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice shall be included in
|
|
|
|
* all copies or substantial portions of the Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
|
|
|
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
|
|
|
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
|
|
|
* OTHER DEALINGS IN THE SOFTWARE.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef _DRM_GPU_SCHEDULER_H_
|
|
|
|
#define _DRM_GPU_SCHEDULER_H_
|
|
|
|
|
|
|
|
#include <drm/spsc_queue.h>
|
|
|
|
#include <linux/dma-fence.h>
|
2019-11-08 05:31:10 +00:00
|
|
|
#include <linux/completion.h>
|
2021-08-05 10:46:49 +00:00
|
|
|
#include <linux/xarray.h>
|
2022-04-11 22:15:36 +00:00
|
|
|
#include <linux/workqueue.h>
|
2017-12-06 16:49:39 +00:00
|
|
|
|
2018-05-30 19:11:01 +00:00
|
|
|
#define MAX_WAIT_SCHED_ENTITY_Q_EMPTY msecs_to_jiffies(1000)
|
|
|
|
|
2022-10-07 07:51:13 +00:00
|
|
|
/**
|
|
|
|
* DRM_SCHED_FENCE_DONT_PIPELINE - Prefent dependency pipelining
|
|
|
|
*
|
|
|
|
* Setting this flag on a scheduler fence prevents pipelining of jobs depending
|
|
|
|
* on this fence. In other words we always insert a full CPU round trip before
|
|
|
|
* dependen jobs are pushed to the hw queue.
|
|
|
|
*/
|
|
|
|
#define DRM_SCHED_FENCE_DONT_PIPELINE DMA_FENCE_FLAG_USER_BITS
|
|
|
|
|
2021-09-21 16:35:50 +00:00
|
|
|
/**
|
|
|
|
* DRM_SCHED_FENCE_FLAG_HAS_DEADLINE_BIT - A fence deadline hint has been set
|
|
|
|
*
|
|
|
|
* Because we could have a deadline hint can be set before the backing hw
|
|
|
|
* fence is created, we need to keep track of whether a deadline has already
|
|
|
|
* been set.
|
|
|
|
*/
|
|
|
|
#define DRM_SCHED_FENCE_FLAG_HAS_DEADLINE_BIT (DMA_FENCE_FLAG_USER_BITS + 1)
|
|
|
|
|
2022-09-28 08:17:40 +00:00
|
|
|
enum dma_resv_usage;
|
|
|
|
struct dma_resv;
|
2021-08-05 10:46:49 +00:00
|
|
|
struct drm_gem_object;
|
|
|
|
|
2017-12-06 16:49:39 +00:00
|
|
|
struct drm_gpu_scheduler;
|
|
|
|
struct drm_sched_rq;
|
|
|
|
|
2023-02-09 12:44:44 +00:00
|
|
|
struct drm_file;
|
|
|
|
|
2020-08-11 23:59:58 +00:00
|
|
|
/* These are often used as an (initial) index
|
|
|
|
* to an array, and as such should start at 0.
|
|
|
|
*/
|
2017-12-06 16:49:39 +00:00
|
|
|
enum drm_sched_priority {
|
|
|
|
DRM_SCHED_PRIORITY_KERNEL,
|
2023-11-23 03:08:48 +00:00
|
|
|
DRM_SCHED_PRIORITY_HIGH,
|
|
|
|
DRM_SCHED_PRIORITY_NORMAL,
|
|
|
|
DRM_SCHED_PRIORITY_LOW,
|
2020-08-11 23:59:58 +00:00
|
|
|
|
2023-10-17 02:48:56 +00:00
|
|
|
DRM_SCHED_PRIORITY_COUNT
|
2017-12-06 16:49:39 +00:00
|
|
|
};
|
|
|
|
|
2022-09-30 04:12:58 +00:00
|
|
|
/* Used to chose between FIFO and RR jobs scheduling */
|
|
|
|
extern int drm_sched_policy;
|
|
|
|
|
|
|
|
#define DRM_SCHED_POLICY_RR 0
|
|
|
|
#define DRM_SCHED_POLICY_FIFO 1
|
|
|
|
|
2017-12-06 16:49:39 +00:00
|
|
|
/**
|
2018-05-29 05:53:07 +00:00
|
|
|
* struct drm_sched_entity - A wrapper around a job queue (typically
|
|
|
|
* attached to the DRM file_priv).
|
|
|
|
*
|
2018-04-04 22:32:51 +00:00
|
|
|
* Entities will emit jobs in order to their corresponding hardware
|
|
|
|
* ring, and the scheduler will alternate between entities based on
|
|
|
|
* scheduling policy.
|
2018-05-29 05:53:07 +00:00
|
|
|
*/
|
2017-12-06 16:49:39 +00:00
|
|
|
struct drm_sched_entity {
|
2021-08-05 10:46:51 +00:00
|
|
|
/**
|
|
|
|
* @list:
|
|
|
|
*
|
|
|
|
* Used to append this struct to the list of entities in the runqueue
|
|
|
|
* @rq under &drm_sched_rq.entities.
|
|
|
|
*
|
|
|
|
* Protected by &drm_sched_rq.lock of @rq.
|
|
|
|
*/
|
2017-12-06 16:49:39 +00:00
|
|
|
struct list_head list;
|
2021-08-05 10:46:51 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @rq:
|
|
|
|
*
|
|
|
|
* Runqueue on which this entity is currently scheduled.
|
|
|
|
*
|
|
|
|
* FIXME: Locking is very unclear for this. Writers are protected by
|
|
|
|
* @rq_lock, but readers are generally lockless and seem to just race
|
|
|
|
* with not even a READ_ONCE.
|
|
|
|
*/
|
2017-12-06 16:49:39 +00:00
|
|
|
struct drm_sched_rq *rq;
|
2021-08-05 10:46:51 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @sched_list:
|
|
|
|
*
|
|
|
|
* A list of schedulers (struct drm_gpu_scheduler). Jobs from this entity can
|
|
|
|
* be scheduled on any scheduler on this list.
|
|
|
|
*
|
|
|
|
* This can be modified by calling drm_sched_entity_modify_sched().
|
|
|
|
* Locking is entirely up to the driver, see the above function for more
|
|
|
|
* details.
|
|
|
|
*
|
|
|
|
* This will be set to NULL if &num_sched_list equals 1 and @rq has been
|
|
|
|
* set already.
|
|
|
|
*
|
|
|
|
* FIXME: This means priority changes through
|
|
|
|
* drm_sched_entity_set_priority() will be lost henceforth in this case.
|
|
|
|
*/
|
2019-12-05 10:38:00 +00:00
|
|
|
struct drm_gpu_scheduler **sched_list;
|
2021-08-05 10:46:51 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @num_sched_list:
|
|
|
|
*
|
|
|
|
* Number of drm_gpu_schedulers in the @sched_list.
|
|
|
|
*/
|
2020-01-14 09:38:42 +00:00
|
|
|
unsigned int num_sched_list;
|
2021-08-05 10:46:51 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @priority:
|
|
|
|
*
|
|
|
|
* Priority of the entity. This can be modified by calling
|
|
|
|
* drm_sched_entity_set_priority(). Protected by &rq_lock.
|
|
|
|
*/
|
2019-12-05 10:38:00 +00:00
|
|
|
enum drm_sched_priority priority;
|
2021-08-05 10:46:51 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @rq_lock:
|
|
|
|
*
|
|
|
|
* Lock to modify the runqueue to which this entity belongs.
|
|
|
|
*/
|
2017-12-06 16:49:39 +00:00
|
|
|
spinlock_t rq_lock;
|
|
|
|
|
2021-08-05 10:46:51 +00:00
|
|
|
/**
|
|
|
|
* @job_queue: the list of jobs of this entity.
|
|
|
|
*/
|
2017-12-06 16:49:39 +00:00
|
|
|
struct spsc_queue job_queue;
|
|
|
|
|
2021-08-05 10:46:51 +00:00
|
|
|
/**
|
|
|
|
* @fence_seq:
|
|
|
|
*
|
|
|
|
* A linearly increasing seqno incremented with each new
|
|
|
|
* &drm_sched_fence which is part of the entity.
|
|
|
|
*
|
|
|
|
* FIXME: Callers of drm_sched_job_arm() need to ensure correct locking,
|
|
|
|
* this doesn't need to be atomic.
|
|
|
|
*/
|
2017-12-06 16:49:39 +00:00
|
|
|
atomic_t fence_seq;
|
2021-08-05 10:46:51 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @fence_context:
|
|
|
|
*
|
|
|
|
* A unique context for all the fences which belong to this entity. The
|
|
|
|
* &drm_sched_fence.scheduled uses the fence_context but
|
|
|
|
* &drm_sched_fence.finished uses fence_context + 1.
|
|
|
|
*/
|
2017-12-06 16:49:39 +00:00
|
|
|
uint64_t fence_context;
|
|
|
|
|
2021-08-05 10:46:51 +00:00
|
|
|
/**
|
|
|
|
* @dependency:
|
|
|
|
*
|
|
|
|
* The dependency fence of the job which is on the top of the job queue.
|
|
|
|
*/
|
2017-12-06 16:49:39 +00:00
|
|
|
struct dma_fence *dependency;
|
2021-08-05 10:46:51 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @cb:
|
|
|
|
*
|
|
|
|
* Callback for the dependency fence above.
|
|
|
|
*/
|
2017-12-06 16:49:39 +00:00
|
|
|
struct dma_fence_cb cb;
|
2021-08-05 10:46:51 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @guilty:
|
|
|
|
*
|
|
|
|
* Points to entities' guilty.
|
|
|
|
*/
|
2018-05-29 05:53:07 +00:00
|
|
|
atomic_t *guilty;
|
2021-08-05 10:46:51 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @last_scheduled:
|
|
|
|
*
|
|
|
|
* Points to the finished fence of the last scheduled job. Only written
|
|
|
|
* by the scheduler thread, can be accessed locklessly from
|
|
|
|
* drm_sched_job_arm() iff the queue is empty.
|
|
|
|
*/
|
2023-04-17 15:32:11 +00:00
|
|
|
struct dma_fence __rcu *last_scheduled;
|
2021-08-05 10:46:51 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @last_user: last group leader pushing a job into the entity.
|
|
|
|
*/
|
2018-07-26 11:43:49 +00:00
|
|
|
struct task_struct *last_user;
|
2021-08-05 10:46:51 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @stopped:
|
|
|
|
*
|
|
|
|
* Marks the enity as removed from rq and destined for
|
|
|
|
* termination. This is set by calling drm_sched_entity_flush() and by
|
|
|
|
* drm_sched_fini().
|
|
|
|
*/
|
2018-08-17 14:32:50 +00:00
|
|
|
bool stopped;
|
2021-08-05 10:46:51 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @entity_idle:
|
|
|
|
*
|
|
|
|
* Signals when entity is not in use, used to sequence entity cleanup in
|
|
|
|
* drm_sched_entity_fini().
|
|
|
|
*/
|
2019-11-04 21:30:05 +00:00
|
|
|
struct completion entity_idle;
|
2022-09-30 04:12:58 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @oldest_job_waiting:
|
|
|
|
*
|
|
|
|
* Marks earliest job waiting in SW queue
|
|
|
|
*/
|
|
|
|
ktime_t oldest_job_waiting;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @rb_tree_node:
|
|
|
|
*
|
|
|
|
* The node used to insert this entity into time based priority queue
|
|
|
|
*/
|
|
|
|
struct rb_node rb_tree_node;
|
|
|
|
|
2017-12-06 16:49:39 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
/**
|
2018-05-29 05:53:07 +00:00
|
|
|
* struct drm_sched_rq - queue of entities to be scheduled.
|
|
|
|
*
|
|
|
|
* @lock: to modify the entities list.
|
2018-07-13 09:51:13 +00:00
|
|
|
* @sched: the scheduler to which this rq belongs to.
|
2018-05-29 05:53:07 +00:00
|
|
|
* @entities: list of the entities to be scheduled.
|
|
|
|
* @current_entity: the entity which is to be scheduled.
|
2022-09-30 04:12:58 +00:00
|
|
|
* @rb_tree_root: root of time based priory queue of entities for FIFO scheduling
|
2018-05-29 05:53:07 +00:00
|
|
|
*
|
2017-12-06 16:49:39 +00:00
|
|
|
* Run queue is a set of entities scheduling command submissions for
|
|
|
|
* one specific ring. It implements the scheduling policy that selects
|
|
|
|
* the next entity to emit commands from.
|
2018-05-29 05:53:07 +00:00
|
|
|
*/
|
2017-12-06 16:49:39 +00:00
|
|
|
struct drm_sched_rq {
|
|
|
|
spinlock_t lock;
|
2018-07-13 09:51:13 +00:00
|
|
|
struct drm_gpu_scheduler *sched;
|
2017-12-06 16:49:39 +00:00
|
|
|
struct list_head entities;
|
|
|
|
struct drm_sched_entity *current_entity;
|
2022-09-30 04:12:58 +00:00
|
|
|
struct rb_root_cached rb_tree_root;
|
2017-12-06 16:49:39 +00:00
|
|
|
};
|
|
|
|
|
2018-05-29 05:53:07 +00:00
|
|
|
/**
|
|
|
|
* struct drm_sched_fence - fences corresponding to the scheduling of a job.
|
|
|
|
*/
|
2017-12-06 16:49:39 +00:00
|
|
|
struct drm_sched_fence {
|
2018-05-29 05:53:07 +00:00
|
|
|
/**
|
|
|
|
* @scheduled: this fence is what will be signaled by the scheduler
|
|
|
|
* when the job is scheduled.
|
|
|
|
*/
|
2017-12-06 16:49:39 +00:00
|
|
|
struct dma_fence scheduled;
|
2018-04-04 22:32:51 +00:00
|
|
|
|
2018-05-29 05:53:07 +00:00
|
|
|
/**
|
|
|
|
* @finished: this fence is what will be signaled by the scheduler
|
|
|
|
* when the job is completed.
|
|
|
|
*
|
|
|
|
* When setting up an out fence for the job, you should use
|
|
|
|
* this, since it's available immediately upon
|
|
|
|
* drm_sched_job_init(), and the fence returned by the driver
|
|
|
|
* from run_job() won't be created until the dependencies have
|
|
|
|
* resolved.
|
|
|
|
*/
|
2017-12-06 16:49:39 +00:00
|
|
|
struct dma_fence finished;
|
2018-04-04 22:32:51 +00:00
|
|
|
|
2021-09-21 16:35:50 +00:00
|
|
|
/**
|
|
|
|
* @deadline: deadline set on &drm_sched_fence.finished which
|
|
|
|
* potentially needs to be propagated to &drm_sched_fence.parent
|
|
|
|
*/
|
|
|
|
ktime_t deadline;
|
|
|
|
|
2018-05-29 05:53:07 +00:00
|
|
|
/**
|
|
|
|
* @parent: the fence returned by &drm_sched_backend_ops.run_job
|
|
|
|
* when scheduling the job on hardware. We signal the
|
|
|
|
* &drm_sched_fence.finished fence once parent is signalled.
|
|
|
|
*/
|
2017-12-06 16:49:39 +00:00
|
|
|
struct dma_fence *parent;
|
2018-05-29 05:53:07 +00:00
|
|
|
/**
|
|
|
|
* @sched: the scheduler instance to which the job having this struct
|
|
|
|
* belongs to.
|
|
|
|
*/
|
2017-12-06 16:49:39 +00:00
|
|
|
struct drm_gpu_scheduler *sched;
|
2018-05-29 05:53:07 +00:00
|
|
|
/**
|
|
|
|
* @lock: the lock used by the scheduled and the finished fences.
|
|
|
|
*/
|
2017-12-06 16:49:39 +00:00
|
|
|
spinlock_t lock;
|
2018-05-29 05:53:07 +00:00
|
|
|
/**
|
|
|
|
* @owner: job owner for debugging
|
|
|
|
*/
|
2017-12-06 16:49:39 +00:00
|
|
|
void *owner;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f);
|
|
|
|
|
2018-04-04 22:32:51 +00:00
|
|
|
/**
|
2018-05-29 05:53:07 +00:00
|
|
|
* struct drm_sched_job - A job to be run by an entity.
|
|
|
|
*
|
|
|
|
* @queue_node: used to append this struct to the queue of jobs in an entity.
|
2020-12-09 22:31:42 +00:00
|
|
|
* @list: a job participates in a "pending" and "done" lists.
|
2018-05-29 05:53:07 +00:00
|
|
|
* @sched: the scheduler instance on which this job is scheduled.
|
|
|
|
* @s_fence: contains the fences for the scheduling of job.
|
|
|
|
* @finish_cb: the callback for the finished fence.
|
drm/sched: implement dynamic job-flow control
Currently, job flow control is implemented simply by limiting the number
of jobs in flight. Therefore, a scheduler is initialized with a credit
limit that corresponds to the number of jobs which can be sent to the
hardware.
This implies that for each job, drivers need to account for the maximum
job size possible in order to not overflow the ring buffer.
However, there are drivers, such as Nouveau, where the job size has a
rather large range. For such drivers it can easily happen that job
submissions not even filling the ring by 1% can block subsequent
submissions, which, in the worst case, can lead to the ring run dry.
In order to overcome this issue, allow for tracking the actual job size
instead of the number of jobs. Therefore, add a field to track a job's
credit count, which represents the number of credits a job contributes
to the scheduler's credit limit.
Signed-off-by: Danilo Krummrich <dakr@redhat.com>
Reviewed-by: Luben Tuikov <ltuikov89@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20231110001638.71750-1-dakr@redhat.com
2023-11-10 00:16:33 +00:00
|
|
|
* @credits: the number of credits this job contributes to the scheduler
|
2022-03-28 13:25:32 +00:00
|
|
|
* @work: Helper to reschdeule job kill to different context.
|
2018-05-29 05:53:07 +00:00
|
|
|
* @id: a unique id assigned to each job scheduled on the scheduler.
|
|
|
|
* @karma: increment on every hang caused by this job. If this exceeds the hang
|
|
|
|
* limit of the scheduler then the job is marked guilty and will not
|
|
|
|
* be scheduled further.
|
|
|
|
* @s_priority: the priority of the job.
|
|
|
|
* @entity: the entity to which this job belongs.
|
2018-12-05 19:21:28 +00:00
|
|
|
* @cb: the callback for the parent fence in s_fence.
|
2018-04-04 22:32:51 +00:00
|
|
|
*
|
|
|
|
* A job is created by the driver using drm_sched_job_init(), and
|
|
|
|
* should call drm_sched_entity_push_job() once it wants the scheduler
|
|
|
|
* to schedule the job.
|
|
|
|
*/
|
2017-12-06 16:49:39 +00:00
|
|
|
struct drm_sched_job {
|
|
|
|
struct spsc_node queue_node;
|
2020-12-04 03:17:18 +00:00
|
|
|
struct list_head list;
|
2017-12-06 16:49:39 +00:00
|
|
|
struct drm_gpu_scheduler *sched;
|
|
|
|
struct drm_sched_fence *s_fence;
|
2021-10-28 16:24:03 +00:00
|
|
|
|
drm/sched: implement dynamic job-flow control
Currently, job flow control is implemented simply by limiting the number
of jobs in flight. Therefore, a scheduler is initialized with a credit
limit that corresponds to the number of jobs which can be sent to the
hardware.
This implies that for each job, drivers need to account for the maximum
job size possible in order to not overflow the ring buffer.
However, there are drivers, such as Nouveau, where the job size has a
rather large range. For such drivers it can easily happen that job
submissions not even filling the ring by 1% can block subsequent
submissions, which, in the worst case, can lead to the ring run dry.
In order to overcome this issue, allow for tracking the actual job size
instead of the number of jobs. Therefore, add a field to track a job's
credit count, which represents the number of credits a job contributes
to the scheduler's credit limit.
Signed-off-by: Danilo Krummrich <dakr@redhat.com>
Reviewed-by: Luben Tuikov <ltuikov89@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20231110001638.71750-1-dakr@redhat.com
2023-11-10 00:16:33 +00:00
|
|
|
u32 credits;
|
|
|
|
|
2021-10-28 16:24:03 +00:00
|
|
|
/*
|
|
|
|
* work is used only after finish_cb has been used and will not be
|
|
|
|
* accessed anymore.
|
|
|
|
*/
|
|
|
|
union {
|
|
|
|
struct dma_fence_cb finish_cb;
|
2022-09-29 13:01:57 +00:00
|
|
|
struct work_struct work;
|
2021-10-28 16:24:03 +00:00
|
|
|
};
|
|
|
|
|
2017-12-06 16:49:39 +00:00
|
|
|
uint64_t id;
|
|
|
|
atomic_t karma;
|
|
|
|
enum drm_sched_priority s_priority;
|
2020-12-04 03:17:18 +00:00
|
|
|
struct drm_sched_entity *entity;
|
2018-12-05 19:21:28 +00:00
|
|
|
struct dma_fence_cb cb;
|
2021-08-05 10:46:49 +00:00
|
|
|
/**
|
|
|
|
* @dependencies:
|
|
|
|
*
|
|
|
|
* Contains the dependencies as struct dma_fence for this job, see
|
|
|
|
* drm_sched_job_add_dependency() and
|
|
|
|
* drm_sched_job_add_implicit_dependencies().
|
|
|
|
*/
|
|
|
|
struct xarray dependencies;
|
|
|
|
|
|
|
|
/** @last_dependency: tracks @dependencies as they signal */
|
|
|
|
unsigned long last_dependency;
|
2022-09-30 04:12:58 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @submit_ts:
|
|
|
|
*
|
|
|
|
* When the job was pushed into the entity queue.
|
|
|
|
*/
|
|
|
|
ktime_t submit_ts;
|
2017-12-06 16:49:39 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job,
|
|
|
|
int threshold)
|
|
|
|
{
|
gpu/drm: ring_mirror_list --> pending_list
Rename "ring_mirror_list" to "pending_list",
to describe what something is, not what it does,
how it's used, or how the hardware implements it.
This also abstracts the actual hardware
implementation, i.e. how the low-level driver
communicates with the device it drives, ring, CAM,
etc., shouldn't be exposed to DRM.
The pending_list keeps jobs submitted, which are
out of our control. Usually this means they are
pending execution status in hardware, but the
latter definition is a more general (inclusive)
definition.
Signed-off-by: Luben Tuikov <luben.tuikov@amd.com>
Acked-by: Christian König <christian.koenig@amd.com>
Link: https://patchwork.freedesktop.org/patch/405573/
Cc: Alexander Deucher <Alexander.Deucher@amd.com>
Cc: Andrey Grodzovsky <Andrey.Grodzovsky@amd.com>
Cc: Christian König <christian.koenig@amd.com>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Signed-off-by: Christian König <christian.koenig@amd.com>
2020-12-04 03:17:19 +00:00
|
|
|
return s_job && atomic_inc_return(&s_job->karma) > threshold;
|
2017-12-06 16:49:39 +00:00
|
|
|
}
|
|
|
|
|
2021-01-20 20:09:59 +00:00
|
|
|
enum drm_gpu_sched_stat {
|
|
|
|
DRM_GPU_SCHED_STAT_NONE, /* Reserve 0 */
|
|
|
|
DRM_GPU_SCHED_STAT_NOMINAL,
|
|
|
|
DRM_GPU_SCHED_STAT_ENODEV,
|
|
|
|
};
|
|
|
|
|
2017-12-06 16:49:39 +00:00
|
|
|
/**
|
2022-04-04 21:30:40 +00:00
|
|
|
* struct drm_sched_backend_ops - Define the backend operations
|
|
|
|
* called by the scheduler
|
2018-05-29 05:53:07 +00:00
|
|
|
*
|
2022-04-04 21:30:40 +00:00
|
|
|
* These functions should be implemented in the driver side.
|
2018-05-29 05:53:07 +00:00
|
|
|
*/
|
2017-12-06 16:49:39 +00:00
|
|
|
struct drm_sched_backend_ops {
|
2018-05-29 05:53:07 +00:00
|
|
|
/**
|
2022-09-29 13:01:57 +00:00
|
|
|
* @prepare_job:
|
2021-08-05 10:46:49 +00:00
|
|
|
*
|
|
|
|
* Called when the scheduler is considering scheduling this job next, to
|
|
|
|
* get another struct dma_fence for this job to block on. Once it
|
|
|
|
* returns NULL, run_job() may be called.
|
|
|
|
*
|
2022-09-29 13:01:57 +00:00
|
|
|
* Can be NULL if no additional preparation to the dependencies are
|
|
|
|
* necessary. Skipped when jobs are killed instead of run.
|
2018-04-04 22:32:51 +00:00
|
|
|
*/
|
2022-09-29 13:01:57 +00:00
|
|
|
struct dma_fence *(*prepare_job)(struct drm_sched_job *sched_job,
|
|
|
|
struct drm_sched_entity *s_entity);
|
2018-04-04 22:32:51 +00:00
|
|
|
|
2018-05-29 05:53:07 +00:00
|
|
|
/**
|
|
|
|
* @run_job: Called to execute the job once all of the dependencies
|
|
|
|
* have been resolved. This may be called multiple times, if
|
2018-04-04 22:32:51 +00:00
|
|
|
* timedout_job() has happened and drm_sched_job_recovery()
|
|
|
|
* decides to try it again.
|
|
|
|
*/
|
2017-12-06 16:49:39 +00:00
|
|
|
struct dma_fence *(*run_job)(struct drm_sched_job *sched_job);
|
2018-04-04 22:32:51 +00:00
|
|
|
|
2018-05-29 05:53:07 +00:00
|
|
|
/**
|
2021-01-20 20:09:59 +00:00
|
|
|
* @timedout_job: Called when a job has taken too long to execute,
|
|
|
|
* to trigger GPU recovery.
|
|
|
|
*
|
2021-06-30 06:27:36 +00:00
|
|
|
* This method is called in a workqueue context.
|
|
|
|
*
|
|
|
|
* Drivers typically issue a reset to recover from GPU hangs, and this
|
|
|
|
* procedure usually follows the following workflow:
|
|
|
|
*
|
|
|
|
* 1. Stop the scheduler using drm_sched_stop(). This will park the
|
|
|
|
* scheduler thread and cancel the timeout work, guaranteeing that
|
|
|
|
* nothing is queued while we reset the hardware queue
|
|
|
|
* 2. Try to gracefully stop non-faulty jobs (optional)
|
|
|
|
* 3. Issue a GPU reset (driver-specific)
|
|
|
|
* 4. Re-submit jobs using drm_sched_resubmit_jobs()
|
|
|
|
* 5. Restart the scheduler using drm_sched_start(). At that point, new
|
|
|
|
* jobs can be queued, and the scheduler thread is unblocked
|
|
|
|
*
|
2021-06-30 06:27:37 +00:00
|
|
|
* Note that some GPUs have distinct hardware queues but need to reset
|
|
|
|
* the GPU globally, which requires extra synchronization between the
|
|
|
|
* timeout handler of the different &drm_gpu_scheduler. One way to
|
|
|
|
* achieve this synchronization is to create an ordered workqueue
|
|
|
|
* (using alloc_ordered_workqueue()) at the driver level, and pass this
|
|
|
|
* queue to drm_sched_init(), to guarantee that timeout handlers are
|
|
|
|
* executed sequentially. The above workflow needs to be slightly
|
|
|
|
* adjusted in that case:
|
|
|
|
*
|
|
|
|
* 1. Stop all schedulers impacted by the reset using drm_sched_stop()
|
|
|
|
* 2. Try to gracefully stop non-faulty jobs on all queues impacted by
|
|
|
|
* the reset (optional)
|
|
|
|
* 3. Issue a GPU reset on all faulty queues (driver-specific)
|
|
|
|
* 4. Re-submit jobs on all schedulers impacted by the reset using
|
|
|
|
* drm_sched_resubmit_jobs()
|
|
|
|
* 5. Restart all schedulers that were stopped in step #1 using
|
|
|
|
* drm_sched_start()
|
|
|
|
*
|
2021-01-20 20:09:59 +00:00
|
|
|
* Return DRM_GPU_SCHED_STAT_NOMINAL, when all is normal,
|
|
|
|
* and the underlying driver has started or completed recovery.
|
|
|
|
*
|
|
|
|
* Return DRM_GPU_SCHED_STAT_ENODEV, if the device is no longer
|
|
|
|
* available, i.e. has been unplugged.
|
2018-04-04 22:32:51 +00:00
|
|
|
*/
|
2021-01-20 20:09:59 +00:00
|
|
|
enum drm_gpu_sched_stat (*timedout_job)(struct drm_sched_job *sched_job);
|
2018-04-04 22:32:51 +00:00
|
|
|
|
2018-05-29 05:53:07 +00:00
|
|
|
/**
|
|
|
|
* @free_job: Called once the job's finished fence has been signaled
|
|
|
|
* and it's time to clean it up.
|
2018-04-04 22:32:51 +00:00
|
|
|
*/
|
2017-12-06 16:49:39 +00:00
|
|
|
void (*free_job)(struct drm_sched_job *sched_job);
|
drm/sched: implement dynamic job-flow control
Currently, job flow control is implemented simply by limiting the number
of jobs in flight. Therefore, a scheduler is initialized with a credit
limit that corresponds to the number of jobs which can be sent to the
hardware.
This implies that for each job, drivers need to account for the maximum
job size possible in order to not overflow the ring buffer.
However, there are drivers, such as Nouveau, where the job size has a
rather large range. For such drivers it can easily happen that job
submissions not even filling the ring by 1% can block subsequent
submissions, which, in the worst case, can lead to the ring run dry.
In order to overcome this issue, allow for tracking the actual job size
instead of the number of jobs. Therefore, add a field to track a job's
credit count, which represents the number of credits a job contributes
to the scheduler's credit limit.
Signed-off-by: Danilo Krummrich <dakr@redhat.com>
Reviewed-by: Luben Tuikov <ltuikov89@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20231110001638.71750-1-dakr@redhat.com
2023-11-10 00:16:33 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @update_job_credits: Called when the scheduler is considering this
|
|
|
|
* job for execution.
|
|
|
|
*
|
|
|
|
* This callback returns the number of credits the job would take if
|
|
|
|
* pushed to the hardware. Drivers may use this to dynamically update
|
|
|
|
* the job's credit count. For instance, deduct the number of credits
|
|
|
|
* for already signalled native fences.
|
|
|
|
*
|
|
|
|
* This callback is optional.
|
|
|
|
*/
|
|
|
|
u32 (*update_job_credits)(struct drm_sched_job *sched_job);
|
2017-12-06 16:49:39 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
/**
|
2022-04-04 21:30:40 +00:00
|
|
|
* struct drm_gpu_scheduler - scheduler instance-specific data
|
2018-05-29 05:53:07 +00:00
|
|
|
*
|
|
|
|
* @ops: backend operations provided by the driver.
|
drm/sched: implement dynamic job-flow control
Currently, job flow control is implemented simply by limiting the number
of jobs in flight. Therefore, a scheduler is initialized with a credit
limit that corresponds to the number of jobs which can be sent to the
hardware.
This implies that for each job, drivers need to account for the maximum
job size possible in order to not overflow the ring buffer.
However, there are drivers, such as Nouveau, where the job size has a
rather large range. For such drivers it can easily happen that job
submissions not even filling the ring by 1% can block subsequent
submissions, which, in the worst case, can lead to the ring run dry.
In order to overcome this issue, allow for tracking the actual job size
instead of the number of jobs. Therefore, add a field to track a job's
credit count, which represents the number of credits a job contributes
to the scheduler's credit limit.
Signed-off-by: Danilo Krummrich <dakr@redhat.com>
Reviewed-by: Luben Tuikov <ltuikov89@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20231110001638.71750-1-dakr@redhat.com
2023-11-10 00:16:33 +00:00
|
|
|
* @credit_limit: the credit limit of this scheduler
|
|
|
|
* @credit_count: the current credit count of this scheduler
|
2018-05-29 05:53:07 +00:00
|
|
|
* @timeout: the time after which a job is removed from the scheduler.
|
|
|
|
* @name: name of the ring for which this scheduler is being used.
|
2023-10-15 01:15:35 +00:00
|
|
|
* @num_rqs: Number of run-queues. This is at most DRM_SCHED_PRIORITY_COUNT,
|
|
|
|
* as there's usually one run-queue per priority, but could be less.
|
|
|
|
* @sched_rq: An allocated array of run-queues of size @num_rqs;
|
2018-05-29 05:53:07 +00:00
|
|
|
* @job_scheduled: once @drm_sched_entity_do_release is called the scheduler
|
|
|
|
* waits on this wait queue until all the scheduled jobs are
|
|
|
|
* finished.
|
|
|
|
* @job_id_count: used to assign unique id to the each job.
|
2023-10-31 03:24:37 +00:00
|
|
|
* @submit_wq: workqueue used to queue @work_run_job and @work_free_job
|
2021-06-30 06:27:37 +00:00
|
|
|
* @timeout_wq: workqueue used to queue @work_tdr
|
drm/sched: Convert drm scheduler to use a work queue rather than kthread
In Xe, the new Intel GPU driver, a choice has made to have a 1 to 1
mapping between a drm_gpu_scheduler and drm_sched_entity. At first this
seems a bit odd but let us explain the reasoning below.
1. In Xe the submission order from multiple drm_sched_entity is not
guaranteed to be the same completion even if targeting the same hardware
engine. This is because in Xe we have a firmware scheduler, the GuC,
which allowed to reorder, timeslice, and preempt submissions. If a using
shared drm_gpu_scheduler across multiple drm_sched_entity, the TDR falls
apart as the TDR expects submission order == completion order. Using a
dedicated drm_gpu_scheduler per drm_sched_entity solve this problem.
2. In Xe submissions are done via programming a ring buffer (circular
buffer), a drm_gpu_scheduler provides a limit on number of jobs, if the
limit of number jobs is set to RING_SIZE / MAX_SIZE_PER_JOB we get flow
control on the ring for free.
A problem with this design is currently a drm_gpu_scheduler uses a
kthread for submission / job cleanup. This doesn't scale if a large
number of drm_gpu_scheduler are used. To work around the scaling issue,
use a worker rather than kthread for submission / job cleanup.
v2:
- (Rob Clark) Fix msm build
- Pass in run work queue
v3:
- (Boris) don't have loop in worker
v4:
- (Tvrtko) break out submit ready, stop, start helpers into own patch
v5:
- (Boris) default to ordered work queue
v6:
- (Luben / checkpatch) fix alignment in msm_ringbuffer.c
- (Luben) s/drm_sched_submit_queue/drm_sched_wqueue_enqueue
- (Luben) Update comment for drm_sched_wqueue_enqueue
- (Luben) Positive check for submit_wq in drm_sched_init
- (Luben) s/alloc_submit_wq/own_submit_wq
v7:
- (Luben) s/drm_sched_wqueue_enqueue/drm_sched_run_job_queue
v8:
- (Luben) Adjust var names / comments
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Luben Tuikov <luben.tuikov@amd.com>
Link: https://lore.kernel.org/r/20231031032439.1558703-3-matthew.brost@intel.com
Signed-off-by: Luben Tuikov <ltuikov89@gmail.com>
2023-10-31 03:24:36 +00:00
|
|
|
* @work_run_job: work which calls run_job op of each scheduler.
|
2023-10-31 03:24:37 +00:00
|
|
|
* @work_free_job: work which calls free_job op of each scheduler.
|
2018-09-25 17:09:02 +00:00
|
|
|
* @work_tdr: schedules a delayed call to @drm_sched_job_timedout after the
|
|
|
|
* timeout interval is over.
|
gpu/drm: ring_mirror_list --> pending_list
Rename "ring_mirror_list" to "pending_list",
to describe what something is, not what it does,
how it's used, or how the hardware implements it.
This also abstracts the actual hardware
implementation, i.e. how the low-level driver
communicates with the device it drives, ring, CAM,
etc., shouldn't be exposed to DRM.
The pending_list keeps jobs submitted, which are
out of our control. Usually this means they are
pending execution status in hardware, but the
latter definition is a more general (inclusive)
definition.
Signed-off-by: Luben Tuikov <luben.tuikov@amd.com>
Acked-by: Christian König <christian.koenig@amd.com>
Link: https://patchwork.freedesktop.org/patch/405573/
Cc: Alexander Deucher <Alexander.Deucher@amd.com>
Cc: Andrey Grodzovsky <Andrey.Grodzovsky@amd.com>
Cc: Christian König <christian.koenig@amd.com>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Signed-off-by: Christian König <christian.koenig@amd.com>
2020-12-04 03:17:19 +00:00
|
|
|
* @pending_list: the list of jobs which are currently in the job queue.
|
|
|
|
* @job_list_lock: lock to protect the pending_list.
|
2018-05-29 05:53:07 +00:00
|
|
|
* @hang_limit: once the hangs by a job crosses this limit then it is marked
|
2021-05-28 23:51:52 +00:00
|
|
|
* guilty and it will no longer be considered for scheduling.
|
2020-06-25 12:07:23 +00:00
|
|
|
* @score: score to help loadbalancer pick a idle sched
|
2021-04-01 12:50:15 +00:00
|
|
|
* @_score: score used when the driver doesn't provide one
|
2018-10-18 16:32:46 +00:00
|
|
|
* @ready: marks if the underlying HW is ready to work
|
2019-04-18 15:00:23 +00:00
|
|
|
* @free_guilty: A hit to time out handler to free the guilty job.
|
drm/sched: Convert drm scheduler to use a work queue rather than kthread
In Xe, the new Intel GPU driver, a choice has made to have a 1 to 1
mapping between a drm_gpu_scheduler and drm_sched_entity. At first this
seems a bit odd but let us explain the reasoning below.
1. In Xe the submission order from multiple drm_sched_entity is not
guaranteed to be the same completion even if targeting the same hardware
engine. This is because in Xe we have a firmware scheduler, the GuC,
which allowed to reorder, timeslice, and preempt submissions. If a using
shared drm_gpu_scheduler across multiple drm_sched_entity, the TDR falls
apart as the TDR expects submission order == completion order. Using a
dedicated drm_gpu_scheduler per drm_sched_entity solve this problem.
2. In Xe submissions are done via programming a ring buffer (circular
buffer), a drm_gpu_scheduler provides a limit on number of jobs, if the
limit of number jobs is set to RING_SIZE / MAX_SIZE_PER_JOB we get flow
control on the ring for free.
A problem with this design is currently a drm_gpu_scheduler uses a
kthread for submission / job cleanup. This doesn't scale if a large
number of drm_gpu_scheduler are used. To work around the scaling issue,
use a worker rather than kthread for submission / job cleanup.
v2:
- (Rob Clark) Fix msm build
- Pass in run work queue
v3:
- (Boris) don't have loop in worker
v4:
- (Tvrtko) break out submit ready, stop, start helpers into own patch
v5:
- (Boris) default to ordered work queue
v6:
- (Luben / checkpatch) fix alignment in msm_ringbuffer.c
- (Luben) s/drm_sched_submit_queue/drm_sched_wqueue_enqueue
- (Luben) Update comment for drm_sched_wqueue_enqueue
- (Luben) Positive check for submit_wq in drm_sched_init
- (Luben) s/alloc_submit_wq/own_submit_wq
v7:
- (Luben) s/drm_sched_wqueue_enqueue/drm_sched_run_job_queue
v8:
- (Luben) Adjust var names / comments
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Luben Tuikov <luben.tuikov@amd.com>
Link: https://lore.kernel.org/r/20231031032439.1558703-3-matthew.brost@intel.com
Signed-off-by: Luben Tuikov <ltuikov89@gmail.com>
2023-10-31 03:24:36 +00:00
|
|
|
* @pause_submit: pause queuing of @work_run_job on @submit_wq
|
|
|
|
* @own_submit_wq: scheduler owns allocation of @submit_wq
|
2022-04-04 21:30:40 +00:00
|
|
|
* @dev: system &struct device
|
2018-05-29 05:53:07 +00:00
|
|
|
*
|
|
|
|
* One scheduler is implemented for each hardware ring.
|
|
|
|
*/
|
2017-12-06 16:49:39 +00:00
|
|
|
struct drm_gpu_scheduler {
|
|
|
|
const struct drm_sched_backend_ops *ops;
|
drm/sched: implement dynamic job-flow control
Currently, job flow control is implemented simply by limiting the number
of jobs in flight. Therefore, a scheduler is initialized with a credit
limit that corresponds to the number of jobs which can be sent to the
hardware.
This implies that for each job, drivers need to account for the maximum
job size possible in order to not overflow the ring buffer.
However, there are drivers, such as Nouveau, where the job size has a
rather large range. For such drivers it can easily happen that job
submissions not even filling the ring by 1% can block subsequent
submissions, which, in the worst case, can lead to the ring run dry.
In order to overcome this issue, allow for tracking the actual job size
instead of the number of jobs. Therefore, add a field to track a job's
credit count, which represents the number of credits a job contributes
to the scheduler's credit limit.
Signed-off-by: Danilo Krummrich <dakr@redhat.com>
Reviewed-by: Luben Tuikov <ltuikov89@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20231110001638.71750-1-dakr@redhat.com
2023-11-10 00:16:33 +00:00
|
|
|
u32 credit_limit;
|
|
|
|
atomic_t credit_count;
|
2017-12-06 16:49:39 +00:00
|
|
|
long timeout;
|
|
|
|
const char *name;
|
2023-10-15 01:15:35 +00:00
|
|
|
u32 num_rqs;
|
|
|
|
struct drm_sched_rq **sched_rq;
|
2017-12-06 16:49:39 +00:00
|
|
|
wait_queue_head_t job_scheduled;
|
|
|
|
atomic64_t job_id_count;
|
drm/sched: Convert drm scheduler to use a work queue rather than kthread
In Xe, the new Intel GPU driver, a choice has made to have a 1 to 1
mapping between a drm_gpu_scheduler and drm_sched_entity. At first this
seems a bit odd but let us explain the reasoning below.
1. In Xe the submission order from multiple drm_sched_entity is not
guaranteed to be the same completion even if targeting the same hardware
engine. This is because in Xe we have a firmware scheduler, the GuC,
which allowed to reorder, timeslice, and preempt submissions. If a using
shared drm_gpu_scheduler across multiple drm_sched_entity, the TDR falls
apart as the TDR expects submission order == completion order. Using a
dedicated drm_gpu_scheduler per drm_sched_entity solve this problem.
2. In Xe submissions are done via programming a ring buffer (circular
buffer), a drm_gpu_scheduler provides a limit on number of jobs, if the
limit of number jobs is set to RING_SIZE / MAX_SIZE_PER_JOB we get flow
control on the ring for free.
A problem with this design is currently a drm_gpu_scheduler uses a
kthread for submission / job cleanup. This doesn't scale if a large
number of drm_gpu_scheduler are used. To work around the scaling issue,
use a worker rather than kthread for submission / job cleanup.
v2:
- (Rob Clark) Fix msm build
- Pass in run work queue
v3:
- (Boris) don't have loop in worker
v4:
- (Tvrtko) break out submit ready, stop, start helpers into own patch
v5:
- (Boris) default to ordered work queue
v6:
- (Luben / checkpatch) fix alignment in msm_ringbuffer.c
- (Luben) s/drm_sched_submit_queue/drm_sched_wqueue_enqueue
- (Luben) Update comment for drm_sched_wqueue_enqueue
- (Luben) Positive check for submit_wq in drm_sched_init
- (Luben) s/alloc_submit_wq/own_submit_wq
v7:
- (Luben) s/drm_sched_wqueue_enqueue/drm_sched_run_job_queue
v8:
- (Luben) Adjust var names / comments
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Luben Tuikov <luben.tuikov@amd.com>
Link: https://lore.kernel.org/r/20231031032439.1558703-3-matthew.brost@intel.com
Signed-off-by: Luben Tuikov <ltuikov89@gmail.com>
2023-10-31 03:24:36 +00:00
|
|
|
struct workqueue_struct *submit_wq;
|
2021-06-30 06:27:37 +00:00
|
|
|
struct workqueue_struct *timeout_wq;
|
drm/sched: Convert drm scheduler to use a work queue rather than kthread
In Xe, the new Intel GPU driver, a choice has made to have a 1 to 1
mapping between a drm_gpu_scheduler and drm_sched_entity. At first this
seems a bit odd but let us explain the reasoning below.
1. In Xe the submission order from multiple drm_sched_entity is not
guaranteed to be the same completion even if targeting the same hardware
engine. This is because in Xe we have a firmware scheduler, the GuC,
which allowed to reorder, timeslice, and preempt submissions. If a using
shared drm_gpu_scheduler across multiple drm_sched_entity, the TDR falls
apart as the TDR expects submission order == completion order. Using a
dedicated drm_gpu_scheduler per drm_sched_entity solve this problem.
2. In Xe submissions are done via programming a ring buffer (circular
buffer), a drm_gpu_scheduler provides a limit on number of jobs, if the
limit of number jobs is set to RING_SIZE / MAX_SIZE_PER_JOB we get flow
control on the ring for free.
A problem with this design is currently a drm_gpu_scheduler uses a
kthread for submission / job cleanup. This doesn't scale if a large
number of drm_gpu_scheduler are used. To work around the scaling issue,
use a worker rather than kthread for submission / job cleanup.
v2:
- (Rob Clark) Fix msm build
- Pass in run work queue
v3:
- (Boris) don't have loop in worker
v4:
- (Tvrtko) break out submit ready, stop, start helpers into own patch
v5:
- (Boris) default to ordered work queue
v6:
- (Luben / checkpatch) fix alignment in msm_ringbuffer.c
- (Luben) s/drm_sched_submit_queue/drm_sched_wqueue_enqueue
- (Luben) Update comment for drm_sched_wqueue_enqueue
- (Luben) Positive check for submit_wq in drm_sched_init
- (Luben) s/alloc_submit_wq/own_submit_wq
v7:
- (Luben) s/drm_sched_wqueue_enqueue/drm_sched_run_job_queue
v8:
- (Luben) Adjust var names / comments
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Luben Tuikov <luben.tuikov@amd.com>
Link: https://lore.kernel.org/r/20231031032439.1558703-3-matthew.brost@intel.com
Signed-off-by: Luben Tuikov <ltuikov89@gmail.com>
2023-10-31 03:24:36 +00:00
|
|
|
struct work_struct work_run_job;
|
2023-10-31 03:24:37 +00:00
|
|
|
struct work_struct work_free_job;
|
2018-09-25 17:09:02 +00:00
|
|
|
struct delayed_work work_tdr;
|
gpu/drm: ring_mirror_list --> pending_list
Rename "ring_mirror_list" to "pending_list",
to describe what something is, not what it does,
how it's used, or how the hardware implements it.
This also abstracts the actual hardware
implementation, i.e. how the low-level driver
communicates with the device it drives, ring, CAM,
etc., shouldn't be exposed to DRM.
The pending_list keeps jobs submitted, which are
out of our control. Usually this means they are
pending execution status in hardware, but the
latter definition is a more general (inclusive)
definition.
Signed-off-by: Luben Tuikov <luben.tuikov@amd.com>
Acked-by: Christian König <christian.koenig@amd.com>
Link: https://patchwork.freedesktop.org/patch/405573/
Cc: Alexander Deucher <Alexander.Deucher@amd.com>
Cc: Andrey Grodzovsky <Andrey.Grodzovsky@amd.com>
Cc: Christian König <christian.koenig@amd.com>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Signed-off-by: Christian König <christian.koenig@amd.com>
2020-12-04 03:17:19 +00:00
|
|
|
struct list_head pending_list;
|
2017-12-06 16:49:39 +00:00
|
|
|
spinlock_t job_list_lock;
|
|
|
|
int hang_limit;
|
2021-02-02 11:40:01 +00:00
|
|
|
atomic_t *score;
|
|
|
|
atomic_t _score;
|
2020-06-25 12:07:23 +00:00
|
|
|
bool ready;
|
2019-04-18 15:00:23 +00:00
|
|
|
bool free_guilty;
|
drm/sched: Convert drm scheduler to use a work queue rather than kthread
In Xe, the new Intel GPU driver, a choice has made to have a 1 to 1
mapping between a drm_gpu_scheduler and drm_sched_entity. At first this
seems a bit odd but let us explain the reasoning below.
1. In Xe the submission order from multiple drm_sched_entity is not
guaranteed to be the same completion even if targeting the same hardware
engine. This is because in Xe we have a firmware scheduler, the GuC,
which allowed to reorder, timeslice, and preempt submissions. If a using
shared drm_gpu_scheduler across multiple drm_sched_entity, the TDR falls
apart as the TDR expects submission order == completion order. Using a
dedicated drm_gpu_scheduler per drm_sched_entity solve this problem.
2. In Xe submissions are done via programming a ring buffer (circular
buffer), a drm_gpu_scheduler provides a limit on number of jobs, if the
limit of number jobs is set to RING_SIZE / MAX_SIZE_PER_JOB we get flow
control on the ring for free.
A problem with this design is currently a drm_gpu_scheduler uses a
kthread for submission / job cleanup. This doesn't scale if a large
number of drm_gpu_scheduler are used. To work around the scaling issue,
use a worker rather than kthread for submission / job cleanup.
v2:
- (Rob Clark) Fix msm build
- Pass in run work queue
v3:
- (Boris) don't have loop in worker
v4:
- (Tvrtko) break out submit ready, stop, start helpers into own patch
v5:
- (Boris) default to ordered work queue
v6:
- (Luben / checkpatch) fix alignment in msm_ringbuffer.c
- (Luben) s/drm_sched_submit_queue/drm_sched_wqueue_enqueue
- (Luben) Update comment for drm_sched_wqueue_enqueue
- (Luben) Positive check for submit_wq in drm_sched_init
- (Luben) s/alloc_submit_wq/own_submit_wq
v7:
- (Luben) s/drm_sched_wqueue_enqueue/drm_sched_run_job_queue
v8:
- (Luben) Adjust var names / comments
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Luben Tuikov <luben.tuikov@amd.com>
Link: https://lore.kernel.org/r/20231031032439.1558703-3-matthew.brost@intel.com
Signed-off-by: Luben Tuikov <ltuikov89@gmail.com>
2023-10-31 03:24:36 +00:00
|
|
|
bool pause_submit;
|
|
|
|
bool own_submit_wq;
|
2022-02-22 09:42:52 +00:00
|
|
|
struct device *dev;
|
2017-12-06 16:49:39 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
int drm_sched_init(struct drm_gpu_scheduler *sched,
|
|
|
|
const struct drm_sched_backend_ops *ops,
|
drm/sched: Convert drm scheduler to use a work queue rather than kthread
In Xe, the new Intel GPU driver, a choice has made to have a 1 to 1
mapping between a drm_gpu_scheduler and drm_sched_entity. At first this
seems a bit odd but let us explain the reasoning below.
1. In Xe the submission order from multiple drm_sched_entity is not
guaranteed to be the same completion even if targeting the same hardware
engine. This is because in Xe we have a firmware scheduler, the GuC,
which allowed to reorder, timeslice, and preempt submissions. If a using
shared drm_gpu_scheduler across multiple drm_sched_entity, the TDR falls
apart as the TDR expects submission order == completion order. Using a
dedicated drm_gpu_scheduler per drm_sched_entity solve this problem.
2. In Xe submissions are done via programming a ring buffer (circular
buffer), a drm_gpu_scheduler provides a limit on number of jobs, if the
limit of number jobs is set to RING_SIZE / MAX_SIZE_PER_JOB we get flow
control on the ring for free.
A problem with this design is currently a drm_gpu_scheduler uses a
kthread for submission / job cleanup. This doesn't scale if a large
number of drm_gpu_scheduler are used. To work around the scaling issue,
use a worker rather than kthread for submission / job cleanup.
v2:
- (Rob Clark) Fix msm build
- Pass in run work queue
v3:
- (Boris) don't have loop in worker
v4:
- (Tvrtko) break out submit ready, stop, start helpers into own patch
v5:
- (Boris) default to ordered work queue
v6:
- (Luben / checkpatch) fix alignment in msm_ringbuffer.c
- (Luben) s/drm_sched_submit_queue/drm_sched_wqueue_enqueue
- (Luben) Update comment for drm_sched_wqueue_enqueue
- (Luben) Positive check for submit_wq in drm_sched_init
- (Luben) s/alloc_submit_wq/own_submit_wq
v7:
- (Luben) s/drm_sched_wqueue_enqueue/drm_sched_run_job_queue
v8:
- (Luben) Adjust var names / comments
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Luben Tuikov <luben.tuikov@amd.com>
Link: https://lore.kernel.org/r/20231031032439.1558703-3-matthew.brost@intel.com
Signed-off-by: Luben Tuikov <ltuikov89@gmail.com>
2023-10-31 03:24:36 +00:00
|
|
|
struct workqueue_struct *submit_wq,
|
drm/sched: implement dynamic job-flow control
Currently, job flow control is implemented simply by limiting the number
of jobs in flight. Therefore, a scheduler is initialized with a credit
limit that corresponds to the number of jobs which can be sent to the
hardware.
This implies that for each job, drivers need to account for the maximum
job size possible in order to not overflow the ring buffer.
However, there are drivers, such as Nouveau, where the job size has a
rather large range. For such drivers it can easily happen that job
submissions not even filling the ring by 1% can block subsequent
submissions, which, in the worst case, can lead to the ring run dry.
In order to overcome this issue, allow for tracking the actual job size
instead of the number of jobs. Therefore, add a field to track a job's
credit count, which represents the number of credits a job contributes
to the scheduler's credit limit.
Signed-off-by: Danilo Krummrich <dakr@redhat.com>
Reviewed-by: Luben Tuikov <ltuikov89@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20231110001638.71750-1-dakr@redhat.com
2023-11-10 00:16:33 +00:00
|
|
|
u32 num_rqs, u32 credit_limit, unsigned int hang_limit,
|
2021-06-30 06:27:37 +00:00
|
|
|
long timeout, struct workqueue_struct *timeout_wq,
|
2022-02-22 09:42:52 +00:00
|
|
|
atomic_t *score, const char *name, struct device *dev);
|
2018-10-18 16:32:46 +00:00
|
|
|
|
2017-12-06 16:49:39 +00:00
|
|
|
void drm_sched_fini(struct drm_gpu_scheduler *sched);
|
2018-08-06 12:25:32 +00:00
|
|
|
int drm_sched_job_init(struct drm_sched_job *job,
|
|
|
|
struct drm_sched_entity *entity,
|
drm/sched: implement dynamic job-flow control
Currently, job flow control is implemented simply by limiting the number
of jobs in flight. Therefore, a scheduler is initialized with a credit
limit that corresponds to the number of jobs which can be sent to the
hardware.
This implies that for each job, drivers need to account for the maximum
job size possible in order to not overflow the ring buffer.
However, there are drivers, such as Nouveau, where the job size has a
rather large range. For such drivers it can easily happen that job
submissions not even filling the ring by 1% can block subsequent
submissions, which, in the worst case, can lead to the ring run dry.
In order to overcome this issue, allow for tracking the actual job size
instead of the number of jobs. Therefore, add a field to track a job's
credit count, which represents the number of credits a job contributes
to the scheduler's credit limit.
Signed-off-by: Danilo Krummrich <dakr@redhat.com>
Reviewed-by: Luben Tuikov <ltuikov89@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20231110001638.71750-1-dakr@redhat.com
2023-11-10 00:16:33 +00:00
|
|
|
u32 credits, void *owner);
|
drm/sched: Split drm_sched_job_init
This is a very confusingly named function, because not just does it
init an object, it arms it and provides a point of no return for
pushing a job into the scheduler. It would be nice if that's a bit
clearer in the interface.
But the real reason is that I want to push the dependency tracking
helpers into the scheduler code, and that means drm_sched_job_init
must be called a lot earlier, without arming the job.
v2:
- don't change .gitignore (Steven)
- don't forget v3d (Emma)
v3: Emma noticed that I leak the memory allocated in
drm_sched_job_init if we bail out before the point of no return in
subsequent driver patches. To be able to fix this change
drm_sched_job_cleanup() so it can handle being called both before and
after drm_sched_job_arm().
Also improve the kerneldoc for this.
v4:
- Fix the drm_sched_job_cleanup logic, I inverted the booleans, as
usual (Melissa)
- Christian pointed out that drm_sched_entity_select_rq() also needs
to be moved into drm_sched_job_arm, which made me realize that the
job->id definitely needs to be moved too.
Shuffle things to fit between job_init and job_arm.
v5:
Reshuffle the split between init/arm once more, amdgpu abuses
drm_sched.ready to signal gpu reset failures. Also document this
somewhat. (Christian)
v6:
Rebase on top of the msm drm/sched support. Note that the
drm_sched_job_init() call is completely misplaced, and hence also the
split-out drm_sched_entity_push_job(). I've put in a FIXME which the next
patch will address.
v7: Drop the FIXME in msm, after discussions with Rob I agree it shouldn't
be a problem where it is now.
Acked-by: Christian König <christian.koenig@amd.com>
Acked-by: Melissa Wen <mwen@igalia.com>
Cc: Melissa Wen <melissa.srw@gmail.com>
Acked-by: Emma Anholt <emma@anholt.net>
Acked-by: Steven Price <steven.price@arm.com> (v2)
Reviewed-by: Boris Brezillon <boris.brezillon@collabora.com> (v5)
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Cc: Lucas Stach <l.stach@pengutronix.de>
Cc: Russell King <linux+etnaviv@armlinux.org.uk>
Cc: Christian Gmeiner <christian.gmeiner@gmail.com>
Cc: Qiang Yu <yuq825@gmail.com>
Cc: Rob Herring <robh@kernel.org>
Cc: Tomeu Vizoso <tomeu.vizoso@collabora.com>
Cc: Steven Price <steven.price@arm.com>
Cc: Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com>
Cc: David Airlie <airlied@linux.ie>
Cc: Daniel Vetter <daniel@ffwll.ch>
Cc: Sumit Semwal <sumit.semwal@linaro.org>
Cc: "Christian König" <christian.koenig@amd.com>
Cc: Masahiro Yamada <masahiroy@kernel.org>
Cc: Kees Cook <keescook@chromium.org>
Cc: Adam Borowski <kilobyte@angband.pl>
Cc: Nick Terrell <terrelln@fb.com>
Cc: Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
Cc: Paul Menzel <pmenzel@molgen.mpg.de>
Cc: Sami Tolvanen <samitolvanen@google.com>
Cc: Viresh Kumar <viresh.kumar@linaro.org>
Cc: Alex Deucher <alexander.deucher@amd.com>
Cc: Dave Airlie <airlied@redhat.com>
Cc: Nirmoy Das <nirmoy.das@amd.com>
Cc: Deepak R Varma <mh12gx2825@gmail.com>
Cc: Lee Jones <lee.jones@linaro.org>
Cc: Kevin Wang <kevin1.wang@amd.com>
Cc: Chen Li <chenli@uniontech.com>
Cc: Luben Tuikov <luben.tuikov@amd.com>
Cc: "Marek Olšák" <marek.olsak@amd.com>
Cc: Dennis Li <Dennis.Li@amd.com>
Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Cc: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
Cc: Sonny Jiang <sonny.jiang@amd.com>
Cc: Boris Brezillon <boris.brezillon@collabora.com>
Cc: Tian Tao <tiantao6@hisilicon.com>
Cc: etnaviv@lists.freedesktop.org
Cc: lima@lists.freedesktop.org
Cc: linux-media@vger.kernel.org
Cc: linaro-mm-sig@lists.linaro.org
Cc: Emma Anholt <emma@anholt.net>
Cc: Rob Clark <robdclark@gmail.com>
Cc: Sean Paul <sean@poorly.run>
Cc: linux-arm-msm@vger.kernel.org
Cc: freedreno@lists.freedesktop.org
Link: https://patchwork.freedesktop.org/patch/msgid/20210817084917.3555822-1-daniel.vetter@ffwll.ch
2021-08-17 08:49:16 +00:00
|
|
|
void drm_sched_job_arm(struct drm_sched_job *job);
|
2021-08-05 10:46:49 +00:00
|
|
|
int drm_sched_job_add_dependency(struct drm_sched_job *job,
|
|
|
|
struct dma_fence *fence);
|
2023-02-09 12:44:44 +00:00
|
|
|
int drm_sched_job_add_syncobj_dependency(struct drm_sched_job *job,
|
|
|
|
struct drm_file *file,
|
|
|
|
u32 handle,
|
|
|
|
u32 point);
|
2022-09-28 08:17:40 +00:00
|
|
|
int drm_sched_job_add_resv_dependencies(struct drm_sched_job *job,
|
|
|
|
struct dma_resv *resv,
|
|
|
|
enum dma_resv_usage usage);
|
2021-08-05 10:46:49 +00:00
|
|
|
int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job,
|
|
|
|
struct drm_gem_object *obj,
|
|
|
|
bool write);
|
|
|
|
|
|
|
|
|
2020-02-27 14:34:15 +00:00
|
|
|
void drm_sched_entity_modify_sched(struct drm_sched_entity *entity,
|
|
|
|
struct drm_gpu_scheduler **sched_list,
|
|
|
|
unsigned int num_sched_list);
|
|
|
|
|
2023-10-31 03:24:39 +00:00
|
|
|
void drm_sched_tdr_queue_imm(struct drm_gpu_scheduler *sched);
|
2018-10-29 09:32:28 +00:00
|
|
|
void drm_sched_job_cleanup(struct drm_sched_job *job);
|
2023-11-09 23:53:26 +00:00
|
|
|
void drm_sched_wakeup(struct drm_gpu_scheduler *sched, struct drm_sched_entity *entity);
|
2023-10-31 03:24:35 +00:00
|
|
|
bool drm_sched_wqueue_ready(struct drm_gpu_scheduler *sched);
|
|
|
|
void drm_sched_wqueue_stop(struct drm_gpu_scheduler *sched);
|
|
|
|
void drm_sched_wqueue_start(struct drm_gpu_scheduler *sched);
|
2019-04-18 15:00:21 +00:00
|
|
|
void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad);
|
2024-07-19 15:24:14 +00:00
|
|
|
void drm_sched_start(struct drm_gpu_scheduler *sched);
|
2018-12-04 21:56:14 +00:00
|
|
|
void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched);
|
|
|
|
void drm_sched_increase_karma(struct drm_sched_job *bad);
|
2021-03-08 04:41:27 +00:00
|
|
|
void drm_sched_reset_karma(struct drm_sched_job *bad);
|
|
|
|
void drm_sched_increase_karma_ext(struct drm_sched_job *bad, int type);
|
2018-08-06 12:25:32 +00:00
|
|
|
bool drm_sched_dependency_optimized(struct dma_fence* fence,
|
|
|
|
struct drm_sched_entity *entity);
|
2018-10-12 14:47:13 +00:00
|
|
|
void drm_sched_fault(struct drm_gpu_scheduler *sched);
|
2018-08-06 12:25:32 +00:00
|
|
|
|
|
|
|
void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
|
|
|
|
struct drm_sched_entity *entity);
|
|
|
|
void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
|
|
|
|
struct drm_sched_entity *entity);
|
2017-12-06 16:49:39 +00:00
|
|
|
|
2022-09-30 04:12:58 +00:00
|
|
|
void drm_sched_rq_update_fifo(struct drm_sched_entity *entity, ktime_t ts);
|
|
|
|
|
2018-07-13 09:51:14 +00:00
|
|
|
int drm_sched_entity_init(struct drm_sched_entity *entity,
|
2019-12-05 10:38:00 +00:00
|
|
|
enum drm_sched_priority priority,
|
|
|
|
struct drm_gpu_scheduler **sched_list,
|
2020-01-14 09:38:42 +00:00
|
|
|
unsigned int num_sched_list,
|
2018-03-29 17:06:32 +00:00
|
|
|
atomic_t *guilty);
|
2018-07-20 12:21:05 +00:00
|
|
|
long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout);
|
|
|
|
void drm_sched_entity_fini(struct drm_sched_entity *entity);
|
|
|
|
void drm_sched_entity_destroy(struct drm_sched_entity *entity);
|
2018-08-06 12:25:32 +00:00
|
|
|
void drm_sched_entity_select_rq(struct drm_sched_entity *entity);
|
|
|
|
struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity);
|
2021-08-05 10:46:50 +00:00
|
|
|
void drm_sched_entity_push_job(struct drm_sched_job *sched_job);
|
2018-08-01 14:22:39 +00:00
|
|
|
void drm_sched_entity_set_priority(struct drm_sched_entity *entity,
|
|
|
|
enum drm_sched_priority priority);
|
2018-08-06 12:25:32 +00:00
|
|
|
bool drm_sched_entity_is_ready(struct drm_sched_entity *entity);
|
2023-04-17 15:32:11 +00:00
|
|
|
int drm_sched_entity_error(struct drm_sched_entity *entity);
|
2018-08-06 12:25:32 +00:00
|
|
|
|
drm/sched: Split drm_sched_job_init
This is a very confusingly named function, because not just does it
init an object, it arms it and provides a point of no return for
pushing a job into the scheduler. It would be nice if that's a bit
clearer in the interface.
But the real reason is that I want to push the dependency tracking
helpers into the scheduler code, and that means drm_sched_job_init
must be called a lot earlier, without arming the job.
v2:
- don't change .gitignore (Steven)
- don't forget v3d (Emma)
v3: Emma noticed that I leak the memory allocated in
drm_sched_job_init if we bail out before the point of no return in
subsequent driver patches. To be able to fix this change
drm_sched_job_cleanup() so it can handle being called both before and
after drm_sched_job_arm().
Also improve the kerneldoc for this.
v4:
- Fix the drm_sched_job_cleanup logic, I inverted the booleans, as
usual (Melissa)
- Christian pointed out that drm_sched_entity_select_rq() also needs
to be moved into drm_sched_job_arm, which made me realize that the
job->id definitely needs to be moved too.
Shuffle things to fit between job_init and job_arm.
v5:
Reshuffle the split between init/arm once more, amdgpu abuses
drm_sched.ready to signal gpu reset failures. Also document this
somewhat. (Christian)
v6:
Rebase on top of the msm drm/sched support. Note that the
drm_sched_job_init() call is completely misplaced, and hence also the
split-out drm_sched_entity_push_job(). I've put in a FIXME which the next
patch will address.
v7: Drop the FIXME in msm, after discussions with Rob I agree it shouldn't
be a problem where it is now.
Acked-by: Christian König <christian.koenig@amd.com>
Acked-by: Melissa Wen <mwen@igalia.com>
Cc: Melissa Wen <melissa.srw@gmail.com>
Acked-by: Emma Anholt <emma@anholt.net>
Acked-by: Steven Price <steven.price@arm.com> (v2)
Reviewed-by: Boris Brezillon <boris.brezillon@collabora.com> (v5)
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Cc: Lucas Stach <l.stach@pengutronix.de>
Cc: Russell King <linux+etnaviv@armlinux.org.uk>
Cc: Christian Gmeiner <christian.gmeiner@gmail.com>
Cc: Qiang Yu <yuq825@gmail.com>
Cc: Rob Herring <robh@kernel.org>
Cc: Tomeu Vizoso <tomeu.vizoso@collabora.com>
Cc: Steven Price <steven.price@arm.com>
Cc: Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com>
Cc: David Airlie <airlied@linux.ie>
Cc: Daniel Vetter <daniel@ffwll.ch>
Cc: Sumit Semwal <sumit.semwal@linaro.org>
Cc: "Christian König" <christian.koenig@amd.com>
Cc: Masahiro Yamada <masahiroy@kernel.org>
Cc: Kees Cook <keescook@chromium.org>
Cc: Adam Borowski <kilobyte@angband.pl>
Cc: Nick Terrell <terrelln@fb.com>
Cc: Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
Cc: Paul Menzel <pmenzel@molgen.mpg.de>
Cc: Sami Tolvanen <samitolvanen@google.com>
Cc: Viresh Kumar <viresh.kumar@linaro.org>
Cc: Alex Deucher <alexander.deucher@amd.com>
Cc: Dave Airlie <airlied@redhat.com>
Cc: Nirmoy Das <nirmoy.das@amd.com>
Cc: Deepak R Varma <mh12gx2825@gmail.com>
Cc: Lee Jones <lee.jones@linaro.org>
Cc: Kevin Wang <kevin1.wang@amd.com>
Cc: Chen Li <chenli@uniontech.com>
Cc: Luben Tuikov <luben.tuikov@amd.com>
Cc: "Marek Olšák" <marek.olsak@amd.com>
Cc: Dennis Li <Dennis.Li@amd.com>
Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Cc: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
Cc: Sonny Jiang <sonny.jiang@amd.com>
Cc: Boris Brezillon <boris.brezillon@collabora.com>
Cc: Tian Tao <tiantao6@hisilicon.com>
Cc: etnaviv@lists.freedesktop.org
Cc: lima@lists.freedesktop.org
Cc: linux-media@vger.kernel.org
Cc: linaro-mm-sig@lists.linaro.org
Cc: Emma Anholt <emma@anholt.net>
Cc: Rob Clark <robdclark@gmail.com>
Cc: Sean Paul <sean@poorly.run>
Cc: linux-arm-msm@vger.kernel.org
Cc: freedreno@lists.freedesktop.org
Link: https://patchwork.freedesktop.org/patch/msgid/20210817084917.3555822-1-daniel.vetter@ffwll.ch
2021-08-17 08:49:16 +00:00
|
|
|
struct drm_sched_fence *drm_sched_fence_alloc(
|
2017-12-06 16:49:39 +00:00
|
|
|
struct drm_sched_entity *s_entity, void *owner);
|
drm/sched: Split drm_sched_job_init
This is a very confusingly named function, because not just does it
init an object, it arms it and provides a point of no return for
pushing a job into the scheduler. It would be nice if that's a bit
clearer in the interface.
But the real reason is that I want to push the dependency tracking
helpers into the scheduler code, and that means drm_sched_job_init
must be called a lot earlier, without arming the job.
v2:
- don't change .gitignore (Steven)
- don't forget v3d (Emma)
v3: Emma noticed that I leak the memory allocated in
drm_sched_job_init if we bail out before the point of no return in
subsequent driver patches. To be able to fix this change
drm_sched_job_cleanup() so it can handle being called both before and
after drm_sched_job_arm().
Also improve the kerneldoc for this.
v4:
- Fix the drm_sched_job_cleanup logic, I inverted the booleans, as
usual (Melissa)
- Christian pointed out that drm_sched_entity_select_rq() also needs
to be moved into drm_sched_job_arm, which made me realize that the
job->id definitely needs to be moved too.
Shuffle things to fit between job_init and job_arm.
v5:
Reshuffle the split between init/arm once more, amdgpu abuses
drm_sched.ready to signal gpu reset failures. Also document this
somewhat. (Christian)
v6:
Rebase on top of the msm drm/sched support. Note that the
drm_sched_job_init() call is completely misplaced, and hence also the
split-out drm_sched_entity_push_job(). I've put in a FIXME which the next
patch will address.
v7: Drop the FIXME in msm, after discussions with Rob I agree it shouldn't
be a problem where it is now.
Acked-by: Christian König <christian.koenig@amd.com>
Acked-by: Melissa Wen <mwen@igalia.com>
Cc: Melissa Wen <melissa.srw@gmail.com>
Acked-by: Emma Anholt <emma@anholt.net>
Acked-by: Steven Price <steven.price@arm.com> (v2)
Reviewed-by: Boris Brezillon <boris.brezillon@collabora.com> (v5)
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Cc: Lucas Stach <l.stach@pengutronix.de>
Cc: Russell King <linux+etnaviv@armlinux.org.uk>
Cc: Christian Gmeiner <christian.gmeiner@gmail.com>
Cc: Qiang Yu <yuq825@gmail.com>
Cc: Rob Herring <robh@kernel.org>
Cc: Tomeu Vizoso <tomeu.vizoso@collabora.com>
Cc: Steven Price <steven.price@arm.com>
Cc: Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com>
Cc: David Airlie <airlied@linux.ie>
Cc: Daniel Vetter <daniel@ffwll.ch>
Cc: Sumit Semwal <sumit.semwal@linaro.org>
Cc: "Christian König" <christian.koenig@amd.com>
Cc: Masahiro Yamada <masahiroy@kernel.org>
Cc: Kees Cook <keescook@chromium.org>
Cc: Adam Borowski <kilobyte@angband.pl>
Cc: Nick Terrell <terrelln@fb.com>
Cc: Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
Cc: Paul Menzel <pmenzel@molgen.mpg.de>
Cc: Sami Tolvanen <samitolvanen@google.com>
Cc: Viresh Kumar <viresh.kumar@linaro.org>
Cc: Alex Deucher <alexander.deucher@amd.com>
Cc: Dave Airlie <airlied@redhat.com>
Cc: Nirmoy Das <nirmoy.das@amd.com>
Cc: Deepak R Varma <mh12gx2825@gmail.com>
Cc: Lee Jones <lee.jones@linaro.org>
Cc: Kevin Wang <kevin1.wang@amd.com>
Cc: Chen Li <chenli@uniontech.com>
Cc: Luben Tuikov <luben.tuikov@amd.com>
Cc: "Marek Olšák" <marek.olsak@amd.com>
Cc: Dennis Li <Dennis.Li@amd.com>
Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Cc: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
Cc: Sonny Jiang <sonny.jiang@amd.com>
Cc: Boris Brezillon <boris.brezillon@collabora.com>
Cc: Tian Tao <tiantao6@hisilicon.com>
Cc: etnaviv@lists.freedesktop.org
Cc: lima@lists.freedesktop.org
Cc: linux-media@vger.kernel.org
Cc: linaro-mm-sig@lists.linaro.org
Cc: Emma Anholt <emma@anholt.net>
Cc: Rob Clark <robdclark@gmail.com>
Cc: Sean Paul <sean@poorly.run>
Cc: linux-arm-msm@vger.kernel.org
Cc: freedreno@lists.freedesktop.org
Link: https://patchwork.freedesktop.org/patch/msgid/20210817084917.3555822-1-daniel.vetter@ffwll.ch
2021-08-17 08:49:16 +00:00
|
|
|
void drm_sched_fence_init(struct drm_sched_fence *fence,
|
|
|
|
struct drm_sched_entity *entity);
|
2021-09-03 12:05:54 +00:00
|
|
|
void drm_sched_fence_free(struct drm_sched_fence *fence);
|
drm/sched: Split drm_sched_job_init
This is a very confusingly named function, because not just does it
init an object, it arms it and provides a point of no return for
pushing a job into the scheduler. It would be nice if that's a bit
clearer in the interface.
But the real reason is that I want to push the dependency tracking
helpers into the scheduler code, and that means drm_sched_job_init
must be called a lot earlier, without arming the job.
v2:
- don't change .gitignore (Steven)
- don't forget v3d (Emma)
v3: Emma noticed that I leak the memory allocated in
drm_sched_job_init if we bail out before the point of no return in
subsequent driver patches. To be able to fix this change
drm_sched_job_cleanup() so it can handle being called both before and
after drm_sched_job_arm().
Also improve the kerneldoc for this.
v4:
- Fix the drm_sched_job_cleanup logic, I inverted the booleans, as
usual (Melissa)
- Christian pointed out that drm_sched_entity_select_rq() also needs
to be moved into drm_sched_job_arm, which made me realize that the
job->id definitely needs to be moved too.
Shuffle things to fit between job_init and job_arm.
v5:
Reshuffle the split between init/arm once more, amdgpu abuses
drm_sched.ready to signal gpu reset failures. Also document this
somewhat. (Christian)
v6:
Rebase on top of the msm drm/sched support. Note that the
drm_sched_job_init() call is completely misplaced, and hence also the
split-out drm_sched_entity_push_job(). I've put in a FIXME which the next
patch will address.
v7: Drop the FIXME in msm, after discussions with Rob I agree it shouldn't
be a problem where it is now.
Acked-by: Christian König <christian.koenig@amd.com>
Acked-by: Melissa Wen <mwen@igalia.com>
Cc: Melissa Wen <melissa.srw@gmail.com>
Acked-by: Emma Anholt <emma@anholt.net>
Acked-by: Steven Price <steven.price@arm.com> (v2)
Reviewed-by: Boris Brezillon <boris.brezillon@collabora.com> (v5)
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Cc: Lucas Stach <l.stach@pengutronix.de>
Cc: Russell King <linux+etnaviv@armlinux.org.uk>
Cc: Christian Gmeiner <christian.gmeiner@gmail.com>
Cc: Qiang Yu <yuq825@gmail.com>
Cc: Rob Herring <robh@kernel.org>
Cc: Tomeu Vizoso <tomeu.vizoso@collabora.com>
Cc: Steven Price <steven.price@arm.com>
Cc: Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com>
Cc: David Airlie <airlied@linux.ie>
Cc: Daniel Vetter <daniel@ffwll.ch>
Cc: Sumit Semwal <sumit.semwal@linaro.org>
Cc: "Christian König" <christian.koenig@amd.com>
Cc: Masahiro Yamada <masahiroy@kernel.org>
Cc: Kees Cook <keescook@chromium.org>
Cc: Adam Borowski <kilobyte@angband.pl>
Cc: Nick Terrell <terrelln@fb.com>
Cc: Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
Cc: Paul Menzel <pmenzel@molgen.mpg.de>
Cc: Sami Tolvanen <samitolvanen@google.com>
Cc: Viresh Kumar <viresh.kumar@linaro.org>
Cc: Alex Deucher <alexander.deucher@amd.com>
Cc: Dave Airlie <airlied@redhat.com>
Cc: Nirmoy Das <nirmoy.das@amd.com>
Cc: Deepak R Varma <mh12gx2825@gmail.com>
Cc: Lee Jones <lee.jones@linaro.org>
Cc: Kevin Wang <kevin1.wang@amd.com>
Cc: Chen Li <chenli@uniontech.com>
Cc: Luben Tuikov <luben.tuikov@amd.com>
Cc: "Marek Olšák" <marek.olsak@amd.com>
Cc: Dennis Li <Dennis.Li@amd.com>
Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Cc: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
Cc: Sonny Jiang <sonny.jiang@amd.com>
Cc: Boris Brezillon <boris.brezillon@collabora.com>
Cc: Tian Tao <tiantao6@hisilicon.com>
Cc: etnaviv@lists.freedesktop.org
Cc: lima@lists.freedesktop.org
Cc: linux-media@vger.kernel.org
Cc: linaro-mm-sig@lists.linaro.org
Cc: Emma Anholt <emma@anholt.net>
Cc: Rob Clark <robdclark@gmail.com>
Cc: Sean Paul <sean@poorly.run>
Cc: linux-arm-msm@vger.kernel.org
Cc: freedreno@lists.freedesktop.org
Link: https://patchwork.freedesktop.org/patch/msgid/20210817084917.3555822-1-daniel.vetter@ffwll.ch
2021-08-17 08:49:16 +00:00
|
|
|
|
2023-06-23 07:52:04 +00:00
|
|
|
void drm_sched_fence_scheduled(struct drm_sched_fence *fence,
|
|
|
|
struct dma_fence *parent);
|
2023-04-17 11:36:02 +00:00
|
|
|
void drm_sched_fence_finished(struct drm_sched_fence *fence, int result);
|
2017-12-06 16:49:39 +00:00
|
|
|
|
2018-11-29 10:05:20 +00:00
|
|
|
unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched);
|
|
|
|
void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
|
|
|
|
unsigned long remaining);
|
2020-03-13 10:39:27 +00:00
|
|
|
struct drm_gpu_scheduler *
|
|
|
|
drm_sched_pick_best(struct drm_gpu_scheduler **sched_list,
|
|
|
|
unsigned int num_sched_list);
|
2018-11-29 10:05:20 +00:00
|
|
|
|
2017-12-06 16:49:39 +00:00
|
|
|
#endif
|