2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #ifndef _GPU_SCHEDULER_H_
25 #define _GPU_SCHEDULER_H_
27 #include <linux/kfifo.h>
28 #include <linux/fence.h>
30 #define AMD_SCHED_FENCE_SCHEDULED_BIT FENCE_FLAG_USER_BITS
32 struct amd_gpu_scheduler;
35 extern struct kmem_cache *sched_fence_slab;
36 extern atomic_t sched_fence_slab_ref;
39 * A scheduler entity is a wrapper around a job queue or a group
40 * of other entities. Entities take turns emitting jobs from their
41 * job queues to corresponding hardware ring based on scheduling
44 struct amd_sched_entity {
45 struct list_head list;
46 struct amd_sched_rq *rq;
47 struct amd_gpu_scheduler *sched;
49 spinlock_t queue_lock;
50 struct kfifo job_queue;
53 uint64_t fence_context;
55 struct fence *dependency;
60 * Run queue is a set of entities scheduling command submissions for
61 * one specific ring. It implements the scheduling policy that selects
62 * the next entity to emit commands from.
66 struct list_head entities;
67 struct amd_sched_entity *current_entity;
70 struct amd_sched_fence {
73 struct list_head scheduled_cb;
74 struct amd_gpu_scheduler *sched;
77 struct amd_sched_job *s_job;
80 struct amd_sched_job {
82 struct amd_gpu_scheduler *sched;
83 struct amd_sched_entity *s_entity;
84 struct amd_sched_fence *s_fence;
85 bool use_sched; /* true if the job goes to scheduler */
86 struct fence_cb cb_free_job;
87 struct work_struct work_free_job;
88 struct list_head node;
89 struct delayed_work work_tdr;
90 void (*timeout_callback) (struct work_struct *work);
91 void (*free_callback)(struct kref *refcount);
94 extern const struct fence_ops amd_sched_fence_ops;
95 static inline struct amd_sched_fence *to_amd_sched_fence(struct fence *f)
97 struct amd_sched_fence *__f = container_of(f, struct amd_sched_fence, base);
99 if (__f->base.ops == &amd_sched_fence_ops)
106 * Define the backend operations called by the scheduler,
107 * these functions should be implemented in driver side
109 struct amd_sched_backend_ops {
110 struct fence *(*dependency)(struct amd_sched_job *sched_job);
111 struct fence *(*run_job)(struct amd_sched_job *sched_job);
112 void (*begin_job)(struct amd_sched_job *sched_job);
113 void (*finish_job)(struct amd_sched_job *sched_job);
116 enum amd_sched_priority {
117 AMD_SCHED_PRIORITY_KERNEL = 0,
118 AMD_SCHED_PRIORITY_NORMAL,
119 AMD_SCHED_MAX_PRIORITY
123 * One scheduler is implemented for each hardware ring
125 struct amd_gpu_scheduler {
126 const struct amd_sched_backend_ops *ops;
127 uint32_t hw_submission_limit;
130 struct amd_sched_rq sched_rq[AMD_SCHED_MAX_PRIORITY];
131 wait_queue_head_t wake_up_worker;
132 wait_queue_head_t job_scheduled;
133 atomic_t hw_rq_count;
134 struct task_struct *thread;
135 struct list_head ring_mirror_list;
136 spinlock_t job_list_lock;
139 int amd_sched_init(struct amd_gpu_scheduler *sched,
140 const struct amd_sched_backend_ops *ops,
141 uint32_t hw_submission, long timeout, const char *name);
142 void amd_sched_fini(struct amd_gpu_scheduler *sched);
144 int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
145 struct amd_sched_entity *entity,
146 struct amd_sched_rq *rq,
148 void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
149 struct amd_sched_entity *entity);
150 void amd_sched_entity_push_job(struct amd_sched_job *sched_job);
152 struct amd_sched_fence *amd_sched_fence_create(
153 struct amd_sched_entity *s_entity, void *owner);
154 void amd_sched_fence_scheduled(struct amd_sched_fence *fence);
155 void amd_sched_fence_signal(struct amd_sched_fence *fence);
156 int amd_sched_job_init(struct amd_sched_job *job,
157 struct amd_gpu_scheduler *sched,
158 struct amd_sched_entity *entity,
159 void (*timeout_cb)(struct work_struct *work),
160 void (*free_cb)(struct kref* refcount),
161 void *owner, struct fence **fence);
162 void amd_sched_job_pre_schedule(struct amd_gpu_scheduler *sched ,
163 struct amd_sched_job *s_job);
164 void amd_sched_job_finish(struct amd_sched_job *s_job);
165 void amd_sched_job_begin(struct amd_sched_job *s_job);
166 static inline void amd_sched_job_get(struct amd_sched_job *job) {
168 kref_get(&job->refcount);
171 static inline void amd_sched_job_put(struct amd_sched_job *job) {
173 kref_put(&job->refcount, job->free_callback);