24593fd29150520bf4426abc1fe6c9ec0564a0c1
[cascardo/linux.git] / drivers / gpu / drm / amd / scheduler / gpu_scheduler.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  *
23  */
24 #include <linux/kthread.h>
25 #include <linux/wait.h>
26 #include <linux/sched.h>
27 #include <drm/drmP.h>
28 #include "gpu_scheduler.h"
29
30 /* Initialize a given run queue struct */
31 static void amd_sched_rq_init(struct amd_sched_rq *rq)
32 {
33         spin_lock_init(&rq->lock);
34         INIT_LIST_HEAD(&rq->entities);
35         rq->current_entity = NULL;
36 }
37
38 static void amd_sched_rq_add_entity(struct amd_sched_rq *rq,
39                                     struct amd_sched_entity *entity)
40 {
41         spin_lock(&rq->lock);
42         list_add_tail(&entity->list, &rq->entities);
43         spin_unlock(&rq->lock);
44 }
45
46 static void amd_sched_rq_remove_entity(struct amd_sched_rq *rq,
47                                        struct amd_sched_entity *entity)
48 {
49         spin_lock(&rq->lock);
50         list_del_init(&entity->list);
51         if (rq->current_entity == entity)
52                 rq->current_entity = NULL;
53         spin_unlock(&rq->lock);
54 }
55
56 /**
57  * Select next entity from a specified run queue with round robin policy.
58  * It could return the same entity as current one if current is the only
59  * available one in the queue. Return NULL if nothing available.
60  */
61 static struct amd_sched_entity *
62 amd_sched_rq_select_entity(struct amd_sched_rq *rq)
63 {
64         struct amd_sched_entity *entity;
65
66         spin_lock(&rq->lock);
67
68         entity = rq->current_entity;
69         if (entity) {
70                 list_for_each_entry_continue(entity, &rq->entities, list) {
71                         if (!kfifo_is_empty(&entity->job_queue)) {
72                                 rq->current_entity = entity;
73                                 spin_unlock(&rq->lock);
74                                 return rq->current_entity;
75                         }
76                 }
77         }
78
79         list_for_each_entry(entity, &rq->entities, list) {
80
81                 if (!kfifo_is_empty(&entity->job_queue)) {
82                         rq->current_entity = entity;
83                         spin_unlock(&rq->lock);
84                         return rq->current_entity;
85                 }
86
87                 if (entity == rq->current_entity)
88                         break;
89         }
90
91         spin_unlock(&rq->lock);
92
93         return NULL;
94 }
95
96 /**
97  * Init a context entity used by scheduler when submit to HW ring.
98  *
99  * @sched       The pointer to the scheduler
100  * @entity      The pointer to a valid amd_sched_entity
101  * @rq          The run queue this entity belongs
102  * @kernel      If this is an entity for the kernel
103  * @jobs        The max number of jobs in the job queue
104  *
105  * return 0 if succeed. negative error code on failure
106 */
107 int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
108                           struct amd_sched_entity *entity,
109                           struct amd_sched_rq *rq,
110                           uint32_t jobs)
111 {
112         if (!(sched && entity && rq))
113                 return -EINVAL;
114
115         memset(entity, 0, sizeof(struct amd_sched_entity));
116         entity->belongto_rq = rq;
117         entity->scheduler = sched;
118         init_waitqueue_head(&entity->wait_queue);
119         entity->fence_context = fence_context_alloc(1);
120         if(kfifo_alloc(&entity->job_queue,
121                        jobs * sizeof(void *),
122                        GFP_KERNEL))
123                 return -EINVAL;
124
125         spin_lock_init(&entity->queue_lock);
126         atomic_set(&entity->fence_seq, 0);
127
128         /* Add the entity to the run queue */
129         amd_sched_rq_add_entity(rq, entity);
130         return 0;
131 }
132
133 /**
134  * Query if entity is initialized
135  *
136  * @sched       Pointer to scheduler instance
137  * @entity      The pointer to a valid scheduler entity
138  *
139  * return true if entity is initialized, false otherwise
140 */
141 static bool amd_sched_entity_is_initialized(struct amd_gpu_scheduler *sched,
142                                             struct amd_sched_entity *entity)
143 {
144         return entity->scheduler == sched &&
145                 entity->belongto_rq != NULL;
146 }
147
148 /**
149  * Check if entity is idle
150  *
151  * @entity      The pointer to a valid scheduler entity
152  *
153  * Return true if entity don't has any unscheduled jobs.
154  */
155 static bool amd_sched_entity_is_idle(struct amd_sched_entity *entity)
156 {
157         rmb();
158         if (kfifo_is_empty(&entity->job_queue))
159                 return true;
160
161         return false;
162 }
163
164 /**
165  * Destroy a context entity
166  *
167  * @sched       Pointer to scheduler instance
168  * @entity      The pointer to a valid scheduler entity
169  *
170  * return 0 if succeed. negative error code on failure
171  */
172 int amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
173                             struct amd_sched_entity *entity)
174 {
175         struct amd_sched_rq *rq = entity->belongto_rq;
176         long r;
177
178         if (!amd_sched_entity_is_initialized(sched, entity))
179                 return 0;
180
181         /**
182          * The client will not queue more IBs during this fini, consume existing
183          * queued IBs
184         */
185         r = wait_event_timeout(entity->wait_queue,
186                 amd_sched_entity_is_idle(entity),
187                 msecs_to_jiffies(AMD_GPU_WAIT_IDLE_TIMEOUT_IN_MS));
188
189         if (r <= 0)
190                 DRM_INFO("Entity %p is in waiting state during fini\n",
191                          entity);
192
193         amd_sched_rq_remove_entity(rq, entity);
194         kfifo_free(&entity->job_queue);
195         return r;
196 }
197
198 /**
199  * Helper to submit a job to the job queue
200  *
201  * @job         The pointer to job required to submit
202  *
203  * Returns true if we could submit the job.
204  */
205 static bool amd_sched_entity_in(struct amd_sched_job *job)
206 {
207         struct amd_sched_entity *entity = job->s_entity;
208         bool added, first = false;
209
210         spin_lock(&entity->queue_lock);
211         added = kfifo_in(&entity->job_queue, &job, sizeof(job)) == sizeof(job);
212
213         if (added && kfifo_len(&entity->job_queue) == sizeof(job))
214                 first = true;
215
216         spin_unlock(&entity->queue_lock);
217
218         /* first job wakes up scheduler */
219         if (first)
220                 wake_up_interruptible(&job->sched->wait_queue);
221
222         return added;
223 }
224
225 /**
226  * Submit a job to the job queue
227  *
228  * @job         The pointer to job required to submit
229  *
230  * Returns 0 for success, negative error code otherwise.
231  */
232 int amd_sched_entity_push_job(struct amd_sched_job *sched_job)
233 {
234         struct amd_sched_entity *entity = sched_job->s_entity;
235         struct amd_sched_fence *fence = amd_sched_fence_create(
236                 entity, sched_job->owner);
237         int r;
238
239         if (!fence)
240                 return -ENOMEM;
241
242         fence_get(&fence->base);
243         sched_job->s_fence = fence;
244
245         r = wait_event_interruptible(entity->wait_queue,
246                                      amd_sched_entity_in(sched_job));
247
248         return r;
249 }
250
251 /**
252  * Return ture if we can push more jobs to the hw.
253  */
254 static bool amd_sched_ready(struct amd_gpu_scheduler *sched)
255 {
256         return atomic_read(&sched->hw_rq_count) <
257                 sched->hw_submission_limit;
258 }
259
260 /**
261  * Select next entity containing real IB submissions
262 */
263 static struct amd_sched_entity *
264 amd_sched_select_context(struct amd_gpu_scheduler *sched)
265 {
266         struct amd_sched_entity *tmp;
267
268         if (!amd_sched_ready(sched))
269                 return NULL;
270
271         /* Kernel run queue has higher priority than normal run queue*/
272         tmp = amd_sched_rq_select_entity(&sched->kernel_rq);
273         if (tmp == NULL)
274                 tmp = amd_sched_rq_select_entity(&sched->sched_rq);
275
276         return tmp;
277 }
278
279 static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
280 {
281         struct amd_sched_job *sched_job =
282                 container_of(cb, struct amd_sched_job, cb);
283         struct amd_gpu_scheduler *sched;
284
285         sched = sched_job->sched;
286         amd_sched_fence_signal(sched_job->s_fence);
287         atomic_dec(&sched->hw_rq_count);
288         fence_put(&sched_job->s_fence->base);
289         sched->ops->process_job(sched, sched_job);
290         wake_up_interruptible(&sched->wait_queue);
291 }
292
293 static int amd_sched_main(void *param)
294 {
295         struct sched_param sparam = {.sched_priority = 1};
296         struct amd_gpu_scheduler *sched = (struct amd_gpu_scheduler *)param;
297         int r;
298
299         sched_setscheduler(current, SCHED_FIFO, &sparam);
300
301         while (!kthread_should_stop()) {
302                 struct amd_sched_entity *c_entity = NULL;
303                 struct amd_sched_job *job;
304                 struct fence *fence;
305
306                 wait_event_interruptible(sched->wait_queue,
307                         kthread_should_stop() ||
308                         (c_entity = amd_sched_select_context(sched)));
309
310                 if (!c_entity)
311                         continue;
312
313                 r = kfifo_out(&c_entity->job_queue, &job, sizeof(void *));
314                 if (r != sizeof(void *))
315                         continue;
316                 atomic_inc(&sched->hw_rq_count);
317
318                 fence = sched->ops->run_job(sched, c_entity, job);
319                 if (fence) {
320                         r = fence_add_callback(fence, &job->cb,
321                                                amd_sched_process_job);
322                         if (r == -ENOENT)
323                                 amd_sched_process_job(fence, &job->cb);
324                         else if (r)
325                                 DRM_ERROR("fence add callback failed (%d)\n", r);
326                         fence_put(fence);
327                 }
328
329                 wake_up(&c_entity->wait_queue);
330         }
331         return 0;
332 }
333
334 /**
335  * Create a gpu scheduler
336  *
337  * @ops                 The backend operations for this scheduler.
338  * @ring                The the ring id for the scheduler.
339  * @hw_submissions      Number of hw submissions to do.
340  *
341  * Return the pointer to scheduler for success, otherwise return NULL
342 */
343 struct amd_gpu_scheduler *amd_sched_create(struct amd_sched_backend_ops *ops,
344                                            unsigned ring, unsigned hw_submission)
345 {
346         struct amd_gpu_scheduler *sched;
347
348         sched = kzalloc(sizeof(struct amd_gpu_scheduler), GFP_KERNEL);
349         if (!sched)
350                 return NULL;
351
352         sched->ops = ops;
353         sched->ring_id = ring;
354         sched->hw_submission_limit = hw_submission;
355         snprintf(sched->name, sizeof(sched->name), "amdgpu[%d]", ring);
356         amd_sched_rq_init(&sched->sched_rq);
357         amd_sched_rq_init(&sched->kernel_rq);
358
359         init_waitqueue_head(&sched->wait_queue);
360         atomic_set(&sched->hw_rq_count, 0);
361         /* Each scheduler will run on a seperate kernel thread */
362         sched->thread = kthread_run(amd_sched_main, sched, sched->name);
363         if (IS_ERR(sched->thread)) {
364                 DRM_ERROR("Failed to create scheduler for id %d.\n", ring);
365                 kfree(sched);
366                 return NULL;
367         }
368
369         return sched;
370 }
371
372 /**
373  * Destroy a gpu scheduler
374  *
375  * @sched       The pointer to the scheduler
376  *
377  * return 0 if succeed. -1 if failed.
378  */
379 int amd_sched_destroy(struct amd_gpu_scheduler *sched)
380 {
381         kthread_stop(sched->thread);
382         kfree(sched);
383         return  0;
384 }