2 * Copyright 2009 Jerome Glisse.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
28 * Jerome Glisse <glisse@freedesktop.org>
31 #include <linux/seq_file.h>
32 #include <linux/atomic.h>
33 #include <linux/wait.h>
34 #include <linux/list.h>
35 #include <linux/kref.h>
36 #include <linux/slab.h>
39 #include "radeon_reg.h"
41 #include "radeon_trace.h"
43 static void radeon_fence_write(struct radeon_device *rdev, u32 seq, int ring)
45 struct radeon_fence_driver *drv = &rdev->fence_drv[ring];
46 if (likely(rdev->wb.enabled || !drv->scratch_reg)) {
47 *drv->cpu_addr = cpu_to_le32(seq);
49 WREG32(drv->scratch_reg, seq);
53 static u32 radeon_fence_read(struct radeon_device *rdev, int ring)
55 struct radeon_fence_driver *drv = &rdev->fence_drv[ring];
58 if (likely(rdev->wb.enabled || !drv->scratch_reg)) {
59 seq = le32_to_cpu(*drv->cpu_addr);
61 seq = RREG32(drv->scratch_reg);
66 int radeon_fence_emit(struct radeon_device *rdev,
67 struct radeon_fence **fence,
70 /* we are protected by the ring emission mutex */
71 *fence = kmalloc(sizeof(struct radeon_fence), GFP_KERNEL);
72 if ((*fence) == NULL) {
75 kref_init(&((*fence)->kref));
76 (*fence)->rdev = rdev;
77 (*fence)->seq = ++rdev->fence_drv[ring].sync_seq[ring];
78 (*fence)->ring = ring;
79 radeon_fence_ring_emit(rdev, ring, *fence);
80 trace_radeon_fence_emit(rdev->ddev, (*fence)->seq);
84 void radeon_fence_process(struct radeon_device *rdev, int ring)
86 uint64_t seq, last_seq;
87 unsigned count_loop = 0;
90 /* Note there is a scenario here for an infinite loop but it's
91 * very unlikely to happen. For it to happen, the current polling
92 * process need to be interrupted by another process and another
93 * process needs to update the last_seq btw the atomic read and
94 * xchg of the current process.
96 * More over for this to go in infinite loop there need to be
97 * continuously new fence signaled ie radeon_fence_read needs
98 * to return a different value each time for both the currently
99 * polling process and the other process that xchg the last_seq
100 * btw atomic read and xchg of the current process. And the
101 * value the other process set as last seq must be higher than
102 * the seq value we just read. Which means that current process
103 * need to be interrupted after radeon_fence_read and before
106 * To be even more safe we count the number of time we loop and
107 * we bail after 10 loop just accepting the fact that we might
108 * have temporarly set the last_seq not to the true real last
109 * seq but to an older one.
111 last_seq = atomic64_read(&rdev->fence_drv[ring].last_seq);
113 seq = radeon_fence_read(rdev, ring);
114 seq |= last_seq & 0xffffffff00000000LL;
115 if (seq < last_seq) {
116 seq += 0x100000000LL;
119 if (seq == last_seq) {
122 /* If we loop over we don't want to return without
123 * checking if a fence is signaled as it means that the
124 * seq we just read is different from the previous on.
128 if ((count_loop++) > 10) {
129 /* We looped over too many time leave with the
130 * fact that we might have set an older fence
131 * seq then the current real last seq as signaled
136 } while (atomic64_xchg(&rdev->fence_drv[ring].last_seq, seq) > seq);
139 rdev->fence_drv[ring].last_activity = jiffies;
140 wake_up_all(&rdev->fence_queue);
144 static void radeon_fence_destroy(struct kref *kref)
146 struct radeon_fence *fence;
148 fence = container_of(kref, struct radeon_fence, kref);
152 static bool radeon_fence_seq_signaled(struct radeon_device *rdev,
153 u64 seq, unsigned ring)
155 if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
158 /* poll new last sequence at least once */
159 radeon_fence_process(rdev, ring);
160 if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
166 bool radeon_fence_signaled(struct radeon_fence *fence)
171 if (fence->seq == RADEON_FENCE_SIGNALED_SEQ) {
174 if (radeon_fence_seq_signaled(fence->rdev, fence->seq, fence->ring)) {
175 fence->seq = RADEON_FENCE_SIGNALED_SEQ;
181 static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 target_seq,
182 unsigned ring, bool intr, bool lock_ring)
184 unsigned long timeout, last_activity;
190 while (target_seq > atomic64_read(&rdev->fence_drv[ring].last_seq)) {
191 if (!rdev->ring[ring].ready) {
195 timeout = jiffies - RADEON_FENCE_JIFFIES_TIMEOUT;
196 if (time_after(rdev->fence_drv[ring].last_activity, timeout)) {
197 /* the normal case, timeout is somewhere before last_activity */
198 timeout = rdev->fence_drv[ring].last_activity - timeout;
200 /* either jiffies wrapped around, or no fence was signaled in the last 500ms
201 * anyway we will just wait for the minimum amount and then check for a lockup
205 seq = atomic64_read(&rdev->fence_drv[ring].last_seq);
206 /* Save current last activity valuee, used to check for GPU lockups */
207 last_activity = rdev->fence_drv[ring].last_activity;
209 trace_radeon_fence_wait_begin(rdev->ddev, seq);
210 radeon_irq_kms_sw_irq_get(rdev, ring);
212 r = wait_event_interruptible_timeout(rdev->fence_queue,
213 (signaled = radeon_fence_seq_signaled(rdev, target_seq, ring)),
216 r = wait_event_timeout(rdev->fence_queue,
217 (signaled = radeon_fence_seq_signaled(rdev, target_seq, ring)),
220 radeon_irq_kms_sw_irq_put(rdev, ring);
221 if (unlikely(r < 0)) {
224 trace_radeon_fence_wait_end(rdev->ddev, seq);
226 if (unlikely(!signaled)) {
227 /* we were interrupted for some reason and fence
228 * isn't signaled yet, resume waiting */
233 /* check if sequence value has changed since last_activity */
234 if (seq != atomic64_read(&rdev->fence_drv[ring].last_seq)) {
239 mutex_lock(&rdev->ring_lock);
242 /* test if somebody else has already decided that this is a lockup */
243 if (last_activity != rdev->fence_drv[ring].last_activity) {
245 mutex_unlock(&rdev->ring_lock);
250 if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) {
251 /* good news we believe it's a lockup */
252 dev_warn(rdev->dev, "GPU lockup (waiting for 0x%016llx last fence id 0x%016llx)\n",
255 /* change last activity so nobody else think there is a lockup */
256 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
257 rdev->fence_drv[i].last_activity = jiffies;
260 /* mark the ring as not ready any more */
261 rdev->ring[ring].ready = false;
263 mutex_unlock(&rdev->ring_lock);
269 mutex_unlock(&rdev->ring_lock);
276 int radeon_fence_wait(struct radeon_fence *fence, bool intr)
281 WARN(1, "Querying an invalid fence : %p !\n", fence);
285 r = radeon_fence_wait_seq(fence->rdev, fence->seq,
286 fence->ring, intr, true);
290 fence->seq = RADEON_FENCE_SIGNALED_SEQ;
294 bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq)
298 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
299 if (seq[i] && radeon_fence_seq_signaled(rdev, seq[i], i)) {
306 static int radeon_fence_wait_any_seq(struct radeon_device *rdev,
307 u64 *target_seq, bool intr)
309 unsigned long timeout, last_activity, tmp;
310 unsigned i, ring = RADEON_NUM_RINGS;
314 for (i = 0, last_activity = 0; i < RADEON_NUM_RINGS; ++i) {
315 if (!target_seq[i]) {
319 /* use the most recent one as indicator */
320 if (time_after(rdev->fence_drv[i].last_activity, last_activity)) {
321 last_activity = rdev->fence_drv[i].last_activity;
324 /* For lockup detection just pick the lowest ring we are
325 * actively waiting for
332 /* nothing to wait for ? */
333 if (ring == RADEON_NUM_RINGS) {
337 while (!radeon_fence_any_seq_signaled(rdev, target_seq)) {
338 timeout = jiffies - RADEON_FENCE_JIFFIES_TIMEOUT;
339 if (time_after(last_activity, timeout)) {
340 /* the normal case, timeout is somewhere before last_activity */
341 timeout = last_activity - timeout;
343 /* either jiffies wrapped around, or no fence was signaled in the last 500ms
344 * anyway we will just wait for the minimum amount and then check for a lockup
349 trace_radeon_fence_wait_begin(rdev->ddev, target_seq[ring]);
350 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
352 radeon_irq_kms_sw_irq_get(rdev, i);
356 r = wait_event_interruptible_timeout(rdev->fence_queue,
357 (signaled = radeon_fence_any_seq_signaled(rdev, target_seq)),
360 r = wait_event_timeout(rdev->fence_queue,
361 (signaled = radeon_fence_any_seq_signaled(rdev, target_seq)),
364 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
366 radeon_irq_kms_sw_irq_put(rdev, i);
369 if (unlikely(r < 0)) {
372 trace_radeon_fence_wait_end(rdev->ddev, target_seq[ring]);
374 if (unlikely(!signaled)) {
375 /* we were interrupted for some reason and fence
376 * isn't signaled yet, resume waiting */
381 mutex_lock(&rdev->ring_lock);
382 for (i = 0, tmp = 0; i < RADEON_NUM_RINGS; ++i) {
383 if (time_after(rdev->fence_drv[i].last_activity, tmp)) {
384 tmp = rdev->fence_drv[i].last_activity;
387 /* test if somebody else has already decided that this is a lockup */
388 if (last_activity != tmp) {
390 mutex_unlock(&rdev->ring_lock);
394 if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) {
395 /* good news we believe it's a lockup */
396 dev_warn(rdev->dev, "GPU lockup (waiting for 0x%016llx)\n",
399 /* change last activity so nobody else think there is a lockup */
400 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
401 rdev->fence_drv[i].last_activity = jiffies;
404 /* mark the ring as not ready any more */
405 rdev->ring[ring].ready = false;
406 mutex_unlock(&rdev->ring_lock);
409 mutex_unlock(&rdev->ring_lock);
415 int radeon_fence_wait_any(struct radeon_device *rdev,
416 struct radeon_fence **fences,
419 uint64_t seq[RADEON_NUM_RINGS];
423 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
430 if (fences[i]->seq == RADEON_FENCE_SIGNALED_SEQ) {
431 /* something was allready signaled */
435 seq[i] = fences[i]->seq;
438 r = radeon_fence_wait_any_seq(rdev, seq, intr);
445 /* caller must hold ring lock */
446 int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring)
450 seq = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL;
451 if (seq >= rdev->fence_drv[ring].sync_seq[ring]) {
452 /* nothing to wait for, last_seq is
453 already the last emited fence */
456 return radeon_fence_wait_seq(rdev, seq, ring, false, false);
459 /* caller must hold ring lock */
460 void radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring)
462 uint64_t seq = rdev->fence_drv[ring].sync_seq[ring];
466 r = radeon_fence_wait_seq(rdev, seq, ring, false, false);
468 mutex_unlock(&rdev->ring_lock);
469 r = radeon_gpu_reset(rdev);
470 mutex_lock(&rdev->ring_lock);
475 dev_err(rdev->dev, "error waiting for ring to become"
482 struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence)
484 kref_get(&fence->kref);
488 void radeon_fence_unref(struct radeon_fence **fence)
490 struct radeon_fence *tmp = *fence;
494 kref_put(&tmp->kref, radeon_fence_destroy);
498 unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring)
502 /* We are not protected by ring lock when reading the last sequence
503 * but it's ok to report slightly wrong fence count here.
505 radeon_fence_process(rdev, ring);
506 emitted = rdev->fence_drv[ring].sync_seq[ring]
507 - atomic64_read(&rdev->fence_drv[ring].last_seq);
508 /* to avoid 32bits warp around */
509 if (emitted > 0x10000000) {
510 emitted = 0x10000000;
512 return (unsigned)emitted;
515 bool radeon_fence_need_sync(struct radeon_fence *fence, int dst_ring)
517 struct radeon_fence_driver *fdrv;
523 if (fence->ring == dst_ring) {
527 /* we are protected by the ring mutex */
528 fdrv = &fence->rdev->fence_drv[dst_ring];
529 if (fence->seq <= fdrv->sync_seq[fence->ring]) {
536 void radeon_fence_note_sync(struct radeon_fence *fence, int dst_ring)
538 struct radeon_fence_driver *dst, *src;
545 if (fence->ring == dst_ring) {
549 /* we are protected by the ring mutex */
550 src = &fence->rdev->fence_drv[fence->ring];
551 dst = &fence->rdev->fence_drv[dst_ring];
552 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
556 dst->sync_seq[i] = max(dst->sync_seq[i], src->sync_seq[i]);
560 int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
565 radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
566 if (rdev->wb.use_event) {
567 rdev->fence_drv[ring].scratch_reg = 0;
568 index = R600_WB_EVENT_OFFSET + ring * 4;
570 r = radeon_scratch_get(rdev, &rdev->fence_drv[ring].scratch_reg);
572 dev_err(rdev->dev, "fence failed to get scratch register\n");
575 index = RADEON_WB_SCRATCH_OFFSET +
576 rdev->fence_drv[ring].scratch_reg -
577 rdev->scratch.reg_base;
579 rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
580 rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index;
581 radeon_fence_write(rdev, atomic64_read(&rdev->fence_drv[ring].last_seq), ring);
582 rdev->fence_drv[ring].initialized = true;
583 dev_info(rdev->dev, "fence driver on ring %d use gpu addr 0x%016llx and cpu addr 0x%p\n",
584 ring, rdev->fence_drv[ring].gpu_addr, rdev->fence_drv[ring].cpu_addr);
588 static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring)
592 rdev->fence_drv[ring].scratch_reg = -1;
593 rdev->fence_drv[ring].cpu_addr = NULL;
594 rdev->fence_drv[ring].gpu_addr = 0;
595 for (i = 0; i < RADEON_NUM_RINGS; ++i)
596 rdev->fence_drv[ring].sync_seq[i] = 0;
597 atomic64_set(&rdev->fence_drv[ring].last_seq, 0);
598 rdev->fence_drv[ring].last_activity = jiffies;
599 rdev->fence_drv[ring].initialized = false;
602 int radeon_fence_driver_init(struct radeon_device *rdev)
606 init_waitqueue_head(&rdev->fence_queue);
607 for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
608 radeon_fence_driver_init_ring(rdev, ring);
610 if (radeon_debugfs_fence_init(rdev)) {
611 dev_err(rdev->dev, "fence debugfs file creation failed\n");
616 void radeon_fence_driver_fini(struct radeon_device *rdev)
620 mutex_lock(&rdev->ring_lock);
621 for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
622 if (!rdev->fence_drv[ring].initialized)
624 radeon_fence_wait_empty_locked(rdev, ring);
625 wake_up_all(&rdev->fence_queue);
626 radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
627 rdev->fence_drv[ring].initialized = false;
629 mutex_unlock(&rdev->ring_lock);
636 #if defined(CONFIG_DEBUG_FS)
637 static int radeon_debugfs_fence_info(struct seq_file *m, void *data)
639 struct drm_info_node *node = (struct drm_info_node *)m->private;
640 struct drm_device *dev = node->minor->dev;
641 struct radeon_device *rdev = dev->dev_private;
644 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
645 if (!rdev->fence_drv[i].initialized)
648 seq_printf(m, "--- ring %d ---\n", i);
649 seq_printf(m, "Last signaled fence 0x%016llx\n",
650 (unsigned long long)atomic64_read(&rdev->fence_drv[i].last_seq));
651 seq_printf(m, "Last emitted 0x%016llx\n",
652 rdev->fence_drv[i].sync_seq[i]);
654 for (j = 0; j < RADEON_NUM_RINGS; ++j) {
655 if (i != j && rdev->fence_drv[j].initialized)
656 seq_printf(m, "Last sync to ring %d 0x%016llx\n",
657 j, rdev->fence_drv[i].sync_seq[j]);
663 static struct drm_info_list radeon_debugfs_fence_list[] = {
664 {"radeon_fence_info", &radeon_debugfs_fence_info, 0, NULL},
668 int radeon_debugfs_fence_init(struct radeon_device *rdev)
670 #if defined(CONFIG_DEBUG_FS)
671 return radeon_debugfs_add_files(rdev, radeon_debugfs_fence_list, 1);