Merge remote-tracking branch 'mkp-scsi/4.9/scsi-fixes' into fixes
[cascardo/linux.git] / drivers / gpu / drm / i915 / intel_breadcrumbs.c
1 /*
2  * Copyright © 2015 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24
25 #include <linux/kthread.h>
26
27 #include "i915_drv.h"
28
29 static void intel_breadcrumbs_hangcheck(unsigned long data)
30 {
31         struct intel_engine_cs *engine = (struct intel_engine_cs *)data;
32         struct intel_breadcrumbs *b = &engine->breadcrumbs;
33
34         if (!b->irq_enabled)
35                 return;
36
37         if (time_before(jiffies, b->timeout)) {
38                 mod_timer(&b->hangcheck, b->timeout);
39                 return;
40         }
41
42         DRM_DEBUG("Hangcheck timer elapsed... %s idle\n", engine->name);
43         set_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings);
44         mod_timer(&engine->breadcrumbs.fake_irq, jiffies + 1);
45
46         /* Ensure that even if the GPU hangs, we get woken up.
47          *
48          * However, note that if no one is waiting, we never notice
49          * a gpu hang. Eventually, we will have to wait for a resource
50          * held by the GPU and so trigger a hangcheck. In the most
51          * pathological case, this will be upon memory starvation! To
52          * prevent this, we also queue the hangcheck from the retire
53          * worker.
54          */
55         i915_queue_hangcheck(engine->i915);
56 }
57
58 static unsigned long wait_timeout(void)
59 {
60         return round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES);
61 }
62
63 static void intel_breadcrumbs_fake_irq(unsigned long data)
64 {
65         struct intel_engine_cs *engine = (struct intel_engine_cs *)data;
66
67         /*
68          * The timer persists in case we cannot enable interrupts,
69          * or if we have previously seen seqno/interrupt incoherency
70          * ("missed interrupt" syndrome). Here the worker will wake up
71          * every jiffie in order to kick the oldest waiter to do the
72          * coherent seqno check.
73          */
74         if (intel_engine_wakeup(engine))
75                 mod_timer(&engine->breadcrumbs.fake_irq, jiffies + 1);
76 }
77
78 static void irq_enable(struct intel_engine_cs *engine)
79 {
80         /* Enabling the IRQ may miss the generation of the interrupt, but
81          * we still need to force the barrier before reading the seqno,
82          * just in case.
83          */
84         engine->breadcrumbs.irq_posted = true;
85
86         spin_lock_irq(&engine->i915->irq_lock);
87         engine->irq_enable(engine);
88         spin_unlock_irq(&engine->i915->irq_lock);
89 }
90
91 static void irq_disable(struct intel_engine_cs *engine)
92 {
93         spin_lock_irq(&engine->i915->irq_lock);
94         engine->irq_disable(engine);
95         spin_unlock_irq(&engine->i915->irq_lock);
96
97         engine->breadcrumbs.irq_posted = false;
98 }
99
100 static void __intel_breadcrumbs_enable_irq(struct intel_breadcrumbs *b)
101 {
102         struct intel_engine_cs *engine =
103                 container_of(b, struct intel_engine_cs, breadcrumbs);
104         struct drm_i915_private *i915 = engine->i915;
105
106         assert_spin_locked(&b->lock);
107         if (b->rpm_wakelock)
108                 return;
109
110         /* Since we are waiting on a request, the GPU should be busy
111          * and should have its own rpm reference. For completeness,
112          * record an rpm reference for ourselves to cover the
113          * interrupt we unmask.
114          */
115         intel_runtime_pm_get_noresume(i915);
116         b->rpm_wakelock = true;
117
118         /* No interrupts? Kick the waiter every jiffie! */
119         if (intel_irqs_enabled(i915)) {
120                 if (!test_bit(engine->id, &i915->gpu_error.test_irq_rings))
121                         irq_enable(engine);
122                 b->irq_enabled = true;
123         }
124
125         if (!b->irq_enabled ||
126             test_bit(engine->id, &i915->gpu_error.missed_irq_rings)) {
127                 mod_timer(&b->fake_irq, jiffies + 1);
128         } else {
129                 /* Ensure we never sleep indefinitely */
130                 GEM_BUG_ON(!time_after(b->timeout, jiffies));
131                 mod_timer(&b->hangcheck, b->timeout);
132         }
133 }
134
135 static void __intel_breadcrumbs_disable_irq(struct intel_breadcrumbs *b)
136 {
137         struct intel_engine_cs *engine =
138                 container_of(b, struct intel_engine_cs, breadcrumbs);
139
140         assert_spin_locked(&b->lock);
141         if (!b->rpm_wakelock)
142                 return;
143
144         if (b->irq_enabled) {
145                 irq_disable(engine);
146                 b->irq_enabled = false;
147         }
148
149         intel_runtime_pm_put(engine->i915);
150         b->rpm_wakelock = false;
151 }
152
153 static inline struct intel_wait *to_wait(struct rb_node *node)
154 {
155         return container_of(node, struct intel_wait, node);
156 }
157
158 static inline void __intel_breadcrumbs_finish(struct intel_breadcrumbs *b,
159                                               struct intel_wait *wait)
160 {
161         assert_spin_locked(&b->lock);
162
163         /* This request is completed, so remove it from the tree, mark it as
164          * complete, and *then* wake up the associated task.
165          */
166         rb_erase(&wait->node, &b->waiters);
167         RB_CLEAR_NODE(&wait->node);
168
169         wake_up_process(wait->tsk); /* implicit smp_wmb() */
170 }
171
172 static bool __intel_engine_add_wait(struct intel_engine_cs *engine,
173                                     struct intel_wait *wait)
174 {
175         struct intel_breadcrumbs *b = &engine->breadcrumbs;
176         struct rb_node **p, *parent, *completed;
177         bool first;
178         u32 seqno;
179
180         /* Insert the request into the retirement ordered list
181          * of waiters by walking the rbtree. If we are the oldest
182          * seqno in the tree (the first to be retired), then
183          * set ourselves as the bottom-half.
184          *
185          * As we descend the tree, prune completed branches since we hold the
186          * spinlock we know that the first_waiter must be delayed and can
187          * reduce some of the sequential wake up latency if we take action
188          * ourselves and wake up the completed tasks in parallel. Also, by
189          * removing stale elements in the tree, we may be able to reduce the
190          * ping-pong between the old bottom-half and ourselves as first-waiter.
191          */
192         first = true;
193         parent = NULL;
194         completed = NULL;
195         seqno = intel_engine_get_seqno(engine);
196
197          /* If the request completed before we managed to grab the spinlock,
198           * return now before adding ourselves to the rbtree. We let the
199           * current bottom-half handle any pending wakeups and instead
200           * try and get out of the way quickly.
201           */
202         if (i915_seqno_passed(seqno, wait->seqno)) {
203                 RB_CLEAR_NODE(&wait->node);
204                 return first;
205         }
206
207         p = &b->waiters.rb_node;
208         while (*p) {
209                 parent = *p;
210                 if (wait->seqno == to_wait(parent)->seqno) {
211                         /* We have multiple waiters on the same seqno, select
212                          * the highest priority task (that with the smallest
213                          * task->prio) to serve as the bottom-half for this
214                          * group.
215                          */
216                         if (wait->tsk->prio > to_wait(parent)->tsk->prio) {
217                                 p = &parent->rb_right;
218                                 first = false;
219                         } else {
220                                 p = &parent->rb_left;
221                         }
222                 } else if (i915_seqno_passed(wait->seqno,
223                                              to_wait(parent)->seqno)) {
224                         p = &parent->rb_right;
225                         if (i915_seqno_passed(seqno, to_wait(parent)->seqno))
226                                 completed = parent;
227                         else
228                                 first = false;
229                 } else {
230                         p = &parent->rb_left;
231                 }
232         }
233         rb_link_node(&wait->node, parent, p);
234         rb_insert_color(&wait->node, &b->waiters);
235         GEM_BUG_ON(!first && !rcu_access_pointer(b->irq_seqno_bh));
236
237         if (completed) {
238                 struct rb_node *next = rb_next(completed);
239
240                 GEM_BUG_ON(!next && !first);
241                 if (next && next != &wait->node) {
242                         GEM_BUG_ON(first);
243                         b->timeout = wait_timeout();
244                         b->first_wait = to_wait(next);
245                         rcu_assign_pointer(b->irq_seqno_bh, b->first_wait->tsk);
246                         /* As there is a delay between reading the current
247                          * seqno, processing the completed tasks and selecting
248                          * the next waiter, we may have missed the interrupt
249                          * and so need for the next bottom-half to wakeup.
250                          *
251                          * Also as we enable the IRQ, we may miss the
252                          * interrupt for that seqno, so we have to wake up
253                          * the next bottom-half in order to do a coherent check
254                          * in case the seqno passed.
255                          */
256                         __intel_breadcrumbs_enable_irq(b);
257                         if (READ_ONCE(b->irq_posted))
258                                 wake_up_process(to_wait(next)->tsk);
259                 }
260
261                 do {
262                         struct intel_wait *crumb = to_wait(completed);
263                         completed = rb_prev(completed);
264                         __intel_breadcrumbs_finish(b, crumb);
265                 } while (completed);
266         }
267
268         if (first) {
269                 GEM_BUG_ON(rb_first(&b->waiters) != &wait->node);
270                 b->timeout = wait_timeout();
271                 b->first_wait = wait;
272                 rcu_assign_pointer(b->irq_seqno_bh, wait->tsk);
273                 /* After assigning ourselves as the new bottom-half, we must
274                  * perform a cursory check to prevent a missed interrupt.
275                  * Either we miss the interrupt whilst programming the hardware,
276                  * or if there was a previous waiter (for a later seqno) they
277                  * may be woken instead of us (due to the inherent race
278                  * in the unlocked read of b->irq_seqno_bh in the irq handler)
279                  * and so we miss the wake up.
280                  */
281                 __intel_breadcrumbs_enable_irq(b);
282         }
283         GEM_BUG_ON(!rcu_access_pointer(b->irq_seqno_bh));
284         GEM_BUG_ON(!b->first_wait);
285         GEM_BUG_ON(rb_first(&b->waiters) != &b->first_wait->node);
286
287         return first;
288 }
289
290 bool intel_engine_add_wait(struct intel_engine_cs *engine,
291                            struct intel_wait *wait)
292 {
293         struct intel_breadcrumbs *b = &engine->breadcrumbs;
294         bool first;
295
296         spin_lock(&b->lock);
297         first = __intel_engine_add_wait(engine, wait);
298         spin_unlock(&b->lock);
299
300         return first;
301 }
302
303 static inline bool chain_wakeup(struct rb_node *rb, int priority)
304 {
305         return rb && to_wait(rb)->tsk->prio <= priority;
306 }
307
308 static inline int wakeup_priority(struct intel_breadcrumbs *b,
309                                   struct task_struct *tsk)
310 {
311         if (tsk == b->signaler)
312                 return INT_MIN;
313         else
314                 return tsk->prio;
315 }
316
317 void intel_engine_remove_wait(struct intel_engine_cs *engine,
318                               struct intel_wait *wait)
319 {
320         struct intel_breadcrumbs *b = &engine->breadcrumbs;
321
322         /* Quick check to see if this waiter was already decoupled from
323          * the tree by the bottom-half to avoid contention on the spinlock
324          * by the herd.
325          */
326         if (RB_EMPTY_NODE(&wait->node))
327                 return;
328
329         spin_lock(&b->lock);
330
331         if (RB_EMPTY_NODE(&wait->node))
332                 goto out_unlock;
333
334         if (b->first_wait == wait) {
335                 const int priority = wakeup_priority(b, wait->tsk);
336                 struct rb_node *next;
337
338                 GEM_BUG_ON(rcu_access_pointer(b->irq_seqno_bh) != wait->tsk);
339
340                 /* We are the current bottom-half. Find the next candidate,
341                  * the first waiter in the queue on the remaining oldest
342                  * request. As multiple seqnos may complete in the time it
343                  * takes us to wake up and find the next waiter, we have to
344                  * wake up that waiter for it to perform its own coherent
345                  * completion check.
346                  */
347                 next = rb_next(&wait->node);
348                 if (chain_wakeup(next, priority)) {
349                         /* If the next waiter is already complete,
350                          * wake it up and continue onto the next waiter. So
351                          * if have a small herd, they will wake up in parallel
352                          * rather than sequentially, which should reduce
353                          * the overall latency in waking all the completed
354                          * clients.
355                          *
356                          * However, waking up a chain adds extra latency to
357                          * the first_waiter. This is undesirable if that
358                          * waiter is a high priority task.
359                          */
360                         u32 seqno = intel_engine_get_seqno(engine);
361
362                         while (i915_seqno_passed(seqno, to_wait(next)->seqno)) {
363                                 struct rb_node *n = rb_next(next);
364
365                                 __intel_breadcrumbs_finish(b, to_wait(next));
366                                 next = n;
367                                 if (!chain_wakeup(next, priority))
368                                         break;
369                         }
370                 }
371
372                 if (next) {
373                         /* In our haste, we may have completed the first waiter
374                          * before we enabled the interrupt. Do so now as we
375                          * have a second waiter for a future seqno. Afterwards,
376                          * we have to wake up that waiter in case we missed
377                          * the interrupt, or if we have to handle an
378                          * exception rather than a seqno completion.
379                          */
380                         b->timeout = wait_timeout();
381                         b->first_wait = to_wait(next);
382                         rcu_assign_pointer(b->irq_seqno_bh, b->first_wait->tsk);
383                         if (b->first_wait->seqno != wait->seqno)
384                                 __intel_breadcrumbs_enable_irq(b);
385                         wake_up_process(b->first_wait->tsk);
386                 } else {
387                         b->first_wait = NULL;
388                         rcu_assign_pointer(b->irq_seqno_bh, NULL);
389                         __intel_breadcrumbs_disable_irq(b);
390                 }
391         } else {
392                 GEM_BUG_ON(rb_first(&b->waiters) == &wait->node);
393         }
394
395         GEM_BUG_ON(RB_EMPTY_NODE(&wait->node));
396         rb_erase(&wait->node, &b->waiters);
397
398 out_unlock:
399         GEM_BUG_ON(b->first_wait == wait);
400         GEM_BUG_ON(rb_first(&b->waiters) !=
401                    (b->first_wait ? &b->first_wait->node : NULL));
402         GEM_BUG_ON(!rcu_access_pointer(b->irq_seqno_bh) ^ RB_EMPTY_ROOT(&b->waiters));
403         spin_unlock(&b->lock);
404 }
405
406 static bool signal_complete(struct drm_i915_gem_request *request)
407 {
408         if (!request)
409                 return false;
410
411         /* If another process served as the bottom-half it may have already
412          * signalled that this wait is already completed.
413          */
414         if (intel_wait_complete(&request->signaling.wait))
415                 return true;
416
417         /* Carefully check if the request is complete, giving time for the
418          * seqno to be visible or if the GPU hung.
419          */
420         if (__i915_request_irq_complete(request))
421                 return true;
422
423         return false;
424 }
425
426 static struct drm_i915_gem_request *to_signaler(struct rb_node *rb)
427 {
428         return container_of(rb, struct drm_i915_gem_request, signaling.node);
429 }
430
431 static void signaler_set_rtpriority(void)
432 {
433          struct sched_param param = { .sched_priority = 1 };
434
435          sched_setscheduler_nocheck(current, SCHED_FIFO, &param);
436 }
437
438 static int intel_breadcrumbs_signaler(void *arg)
439 {
440         struct intel_engine_cs *engine = arg;
441         struct intel_breadcrumbs *b = &engine->breadcrumbs;
442         struct drm_i915_gem_request *request;
443
444         /* Install ourselves with high priority to reduce signalling latency */
445         signaler_set_rtpriority();
446
447         do {
448                 set_current_state(TASK_INTERRUPTIBLE);
449
450                 /* We are either woken up by the interrupt bottom-half,
451                  * or by a client adding a new signaller. In both cases,
452                  * the GPU seqno may have advanced beyond our oldest signal.
453                  * If it has, propagate the signal, remove the waiter and
454                  * check again with the next oldest signal. Otherwise we
455                  * need to wait for a new interrupt from the GPU or for
456                  * a new client.
457                  */
458                 request = READ_ONCE(b->first_signal);
459                 if (signal_complete(request)) {
460                         /* Wake up all other completed waiters and select the
461                          * next bottom-half for the next user interrupt.
462                          */
463                         intel_engine_remove_wait(engine,
464                                                  &request->signaling.wait);
465
466                         local_bh_disable();
467                         fence_signal(&request->fence);
468                         local_bh_enable(); /* kick start the tasklets */
469
470                         /* Find the next oldest signal. Note that as we have
471                          * not been holding the lock, another client may
472                          * have installed an even older signal than the one
473                          * we just completed - so double check we are still
474                          * the oldest before picking the next one.
475                          */
476                         spin_lock(&b->lock);
477                         if (request == b->first_signal) {
478                                 struct rb_node *rb =
479                                         rb_next(&request->signaling.node);
480                                 b->first_signal = rb ? to_signaler(rb) : NULL;
481                         }
482                         rb_erase(&request->signaling.node, &b->signals);
483                         spin_unlock(&b->lock);
484
485                         i915_gem_request_put(request);
486                 } else {
487                         if (kthread_should_stop())
488                                 break;
489
490                         schedule();
491                 }
492         } while (1);
493         __set_current_state(TASK_RUNNING);
494
495         return 0;
496 }
497
498 void intel_engine_enable_signaling(struct drm_i915_gem_request *request)
499 {
500         struct intel_engine_cs *engine = request->engine;
501         struct intel_breadcrumbs *b = &engine->breadcrumbs;
502         struct rb_node *parent, **p;
503         bool first, wakeup;
504
505         /* locked by fence_enable_sw_signaling() */
506         assert_spin_locked(&request->lock);
507
508         request->signaling.wait.tsk = b->signaler;
509         request->signaling.wait.seqno = request->fence.seqno;
510         i915_gem_request_get(request);
511
512         spin_lock(&b->lock);
513
514         /* First add ourselves into the list of waiters, but register our
515          * bottom-half as the signaller thread. As per usual, only the oldest
516          * waiter (not just signaller) is tasked as the bottom-half waking
517          * up all completed waiters after the user interrupt.
518          *
519          * If we are the oldest waiter, enable the irq (after which we
520          * must double check that the seqno did not complete).
521          */
522         wakeup = __intel_engine_add_wait(engine, &request->signaling.wait);
523
524         /* Now insert ourselves into the retirement ordered list of signals
525          * on this engine. We track the oldest seqno as that will be the
526          * first signal to complete.
527          */
528         parent = NULL;
529         first = true;
530         p = &b->signals.rb_node;
531         while (*p) {
532                 parent = *p;
533                 if (i915_seqno_passed(request->fence.seqno,
534                                       to_signaler(parent)->fence.seqno)) {
535                         p = &parent->rb_right;
536                         first = false;
537                 } else {
538                         p = &parent->rb_left;
539                 }
540         }
541         rb_link_node(&request->signaling.node, parent, p);
542         rb_insert_color(&request->signaling.node, &b->signals);
543         if (first)
544                 smp_store_mb(b->first_signal, request);
545
546         spin_unlock(&b->lock);
547
548         if (wakeup)
549                 wake_up_process(b->signaler);
550 }
551
552 int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine)
553 {
554         struct intel_breadcrumbs *b = &engine->breadcrumbs;
555         struct task_struct *tsk;
556
557         spin_lock_init(&b->lock);
558         setup_timer(&b->fake_irq,
559                     intel_breadcrumbs_fake_irq,
560                     (unsigned long)engine);
561         setup_timer(&b->hangcheck,
562                     intel_breadcrumbs_hangcheck,
563                     (unsigned long)engine);
564
565         /* Spawn a thread to provide a common bottom-half for all signals.
566          * As this is an asynchronous interface we cannot steal the current
567          * task for handling the bottom-half to the user interrupt, therefore
568          * we create a thread to do the coherent seqno dance after the
569          * interrupt and then signal the waitqueue (via the dma-buf/fence).
570          */
571         tsk = kthread_run(intel_breadcrumbs_signaler, engine,
572                           "i915/signal:%d", engine->id);
573         if (IS_ERR(tsk))
574                 return PTR_ERR(tsk);
575
576         b->signaler = tsk;
577
578         return 0;
579 }
580
581 static void cancel_fake_irq(struct intel_engine_cs *engine)
582 {
583         struct intel_breadcrumbs *b = &engine->breadcrumbs;
584
585         del_timer_sync(&b->hangcheck);
586         del_timer_sync(&b->fake_irq);
587         clear_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings);
588 }
589
590 void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine)
591 {
592         struct intel_breadcrumbs *b = &engine->breadcrumbs;
593
594         cancel_fake_irq(engine);
595         spin_lock(&b->lock);
596
597         __intel_breadcrumbs_disable_irq(b);
598         if (intel_engine_has_waiter(engine)) {
599                 b->timeout = wait_timeout();
600                 __intel_breadcrumbs_enable_irq(b);
601                 if (READ_ONCE(b->irq_posted))
602                         wake_up_process(b->first_wait->tsk);
603         } else {
604                 /* sanitize the IMR and unmask any auxiliary interrupts */
605                 irq_disable(engine);
606         }
607
608         spin_unlock(&b->lock);
609 }
610
611 void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine)
612 {
613         struct intel_breadcrumbs *b = &engine->breadcrumbs;
614
615         if (!IS_ERR_OR_NULL(b->signaler))
616                 kthread_stop(b->signaler);
617
618         cancel_fake_irq(engine);
619 }
620
621 unsigned int intel_kick_waiters(struct drm_i915_private *i915)
622 {
623         struct intel_engine_cs *engine;
624         unsigned int mask = 0;
625
626         /* To avoid the task_struct disappearing beneath us as we wake up
627          * the process, we must first inspect the task_struct->state under the
628          * RCU lock, i.e. as we call wake_up_process() we must be holding the
629          * rcu_read_lock().
630          */
631         for_each_engine(engine, i915)
632                 if (unlikely(intel_engine_wakeup(engine)))
633                         mask |= intel_engine_flag(engine);
634
635         return mask;
636 }
637
638 unsigned int intel_kick_signalers(struct drm_i915_private *i915)
639 {
640         struct intel_engine_cs *engine;
641         unsigned int mask = 0;
642
643         for_each_engine(engine, i915) {
644                 if (unlikely(READ_ONCE(engine->breadcrumbs.first_signal))) {
645                         wake_up_process(engine->breadcrumbs.signaler);
646                         mask |= intel_engine_flag(engine);
647                 }
648         }
649
650         return mask;
651 }