76c5b22bfd22de0501ad275fa5f021ca3e583fbe
[cascardo/linux.git] / drivers / gpu / drm / radeon / radeon_fence.c
1 /*
2  * Copyright 2009 Jerome Glisse.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  */
26 /*
27  * Authors:
28  *    Jerome Glisse <glisse@freedesktop.org>
29  *    Dave Airlie
30  */
31 #include <linux/seq_file.h>
32 #include <linux/atomic.h>
33 #include <linux/wait.h>
34 #include <linux/list.h>
35 #include <linux/kref.h>
36 #include <linux/slab.h>
37 #include "drmP.h"
38 #include "drm.h"
39 #include "radeon_reg.h"
40 #include "radeon.h"
41 #include "radeon_trace.h"
42
43 static void radeon_fence_write(struct radeon_device *rdev, u32 seq, int ring)
44 {
45         struct radeon_fence_driver *drv = &rdev->fence_drv[ring];
46         if (likely(rdev->wb.enabled || !drv->scratch_reg)) {
47                 *drv->cpu_addr = cpu_to_le32(seq);
48         } else {
49                 WREG32(drv->scratch_reg, seq);
50         }
51 }
52
53 static u32 radeon_fence_read(struct radeon_device *rdev, int ring)
54 {
55         struct radeon_fence_driver *drv = &rdev->fence_drv[ring];
56         u32 seq = 0;
57
58         if (likely(rdev->wb.enabled || !drv->scratch_reg)) {
59                 seq = le32_to_cpu(*drv->cpu_addr);
60         } else {
61                 seq = RREG32(drv->scratch_reg);
62         }
63         return seq;
64 }
65
66 int radeon_fence_emit(struct radeon_device *rdev,
67                       struct radeon_fence **fence,
68                       int ring)
69 {
70         /* we are protected by the ring emission mutex */
71         *fence = kmalloc(sizeof(struct radeon_fence), GFP_KERNEL);
72         if ((*fence) == NULL) {
73                 return -ENOMEM;
74         }
75         kref_init(&((*fence)->kref));
76         (*fence)->rdev = rdev;
77         (*fence)->seq = ++rdev->fence_drv[ring].sync_seq[ring];
78         (*fence)->ring = ring;
79         radeon_fence_ring_emit(rdev, ring, *fence);
80         trace_radeon_fence_emit(rdev->ddev, (*fence)->seq);
81         return 0;
82 }
83
84 void radeon_fence_process(struct radeon_device *rdev, int ring)
85 {
86         uint64_t seq, last_seq;
87         unsigned count_loop = 0;
88         bool wake = false;
89
90         /* Note there is a scenario here for an infinite loop but it's
91          * very unlikely to happen. For it to happen, the current polling
92          * process need to be interrupted by another process and another
93          * process needs to update the last_seq btw the atomic read and
94          * xchg of the current process.
95          *
96          * More over for this to go in infinite loop there need to be
97          * continuously new fence signaled ie radeon_fence_read needs
98          * to return a different value each time for both the currently
99          * polling process and the other process that xchg the last_seq
100          * btw atomic read and xchg of the current process. And the
101          * value the other process set as last seq must be higher than
102          * the seq value we just read. Which means that current process
103          * need to be interrupted after radeon_fence_read and before
104          * atomic xchg.
105          *
106          * To be even more safe we count the number of time we loop and
107          * we bail after 10 loop just accepting the fact that we might
108          * have temporarly set the last_seq not to the true real last
109          * seq but to an older one.
110          */
111         last_seq = atomic64_read(&rdev->fence_drv[ring].last_seq);
112         do {
113                 seq = radeon_fence_read(rdev, ring);
114                 seq |= last_seq & 0xffffffff00000000LL;
115                 if (seq < last_seq) {
116                         seq += 0x100000000LL;
117                 }
118
119                 if (seq == last_seq) {
120                         break;
121                 }
122                 /* If we loop over we don't want to return without
123                  * checking if a fence is signaled as it means that the
124                  * seq we just read is different from the previous on.
125                  */
126                 wake = true;
127                 last_seq = seq;
128                 if ((count_loop++) > 10) {
129                         /* We looped over too many time leave with the
130                          * fact that we might have set an older fence
131                          * seq then the current real last seq as signaled
132                          * by the hw.
133                          */
134                         break;
135                 }
136         } while (atomic64_xchg(&rdev->fence_drv[ring].last_seq, seq) > seq);
137
138         if (wake) {
139                 rdev->fence_drv[ring].last_activity = jiffies;
140                 wake_up_all(&rdev->fence_queue);
141         }
142 }
143
144 static void radeon_fence_destroy(struct kref *kref)
145 {
146         struct radeon_fence *fence;
147
148         fence = container_of(kref, struct radeon_fence, kref);
149         kfree(fence);
150 }
151
152 static bool radeon_fence_seq_signaled(struct radeon_device *rdev,
153                                       u64 seq, unsigned ring)
154 {
155         if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
156                 return true;
157         }
158         /* poll new last sequence at least once */
159         radeon_fence_process(rdev, ring);
160         if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
161                 return true;
162         }
163         return false;
164 }
165
166 bool radeon_fence_signaled(struct radeon_fence *fence)
167 {
168         if (!fence) {
169                 return true;
170         }
171         if (fence->seq == RADEON_FENCE_SIGNALED_SEQ) {
172                 return true;
173         }
174         if (radeon_fence_seq_signaled(fence->rdev, fence->seq, fence->ring)) {
175                 fence->seq = RADEON_FENCE_SIGNALED_SEQ;
176                 return true;
177         }
178         return false;
179 }
180
181 static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 target_seq,
182                                  unsigned ring, bool intr, bool lock_ring)
183 {
184         unsigned long timeout, last_activity;
185         uint64_t seq;
186         unsigned i;
187         bool signaled;
188         int r;
189
190         while (target_seq > atomic64_read(&rdev->fence_drv[ring].last_seq)) {
191                 if (!rdev->ring[ring].ready) {
192                         return -EBUSY;
193                 }
194
195                 timeout = jiffies - RADEON_FENCE_JIFFIES_TIMEOUT;
196                 if (time_after(rdev->fence_drv[ring].last_activity, timeout)) {
197                         /* the normal case, timeout is somewhere before last_activity */
198                         timeout = rdev->fence_drv[ring].last_activity - timeout;
199                 } else {
200                         /* either jiffies wrapped around, or no fence was signaled in the last 500ms
201                          * anyway we will just wait for the minimum amount and then check for a lockup
202                          */
203                         timeout = 1;
204                 }
205                 seq = atomic64_read(&rdev->fence_drv[ring].last_seq);
206                 /* Save current last activity valuee, used to check for GPU lockups */
207                 last_activity = rdev->fence_drv[ring].last_activity;
208
209                 trace_radeon_fence_wait_begin(rdev->ddev, seq);
210                 radeon_irq_kms_sw_irq_get(rdev, ring);
211                 if (intr) {
212                         r = wait_event_interruptible_timeout(rdev->fence_queue,
213                                 (signaled = radeon_fence_seq_signaled(rdev, target_seq, ring)),
214                                 timeout);
215                 } else {
216                         r = wait_event_timeout(rdev->fence_queue,
217                                 (signaled = radeon_fence_seq_signaled(rdev, target_seq, ring)),
218                                 timeout);
219                 }
220                 radeon_irq_kms_sw_irq_put(rdev, ring);
221                 if (unlikely(r < 0)) {
222                         return r;
223                 }
224                 trace_radeon_fence_wait_end(rdev->ddev, seq);
225
226                 if (unlikely(!signaled)) {
227                         /* we were interrupted for some reason and fence
228                          * isn't signaled yet, resume waiting */
229                         if (r) {
230                                 continue;
231                         }
232
233                         /* check if sequence value has changed since last_activity */
234                         if (seq != atomic64_read(&rdev->fence_drv[ring].last_seq)) {
235                                 continue;
236                         }
237
238                         if (lock_ring) {
239                                 mutex_lock(&rdev->ring_lock);
240                         }
241
242                         /* test if somebody else has already decided that this is a lockup */
243                         if (last_activity != rdev->fence_drv[ring].last_activity) {
244                                 if (lock_ring) {
245                                         mutex_unlock(&rdev->ring_lock);
246                                 }
247                                 continue;
248                         }
249
250                         if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) {
251                                 /* good news we believe it's a lockup */
252                                 dev_warn(rdev->dev, "GPU lockup (waiting for 0x%016llx last fence id 0x%016llx)\n",
253                                          target_seq, seq);
254
255                                 /* change last activity so nobody else think there is a lockup */
256                                 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
257                                         rdev->fence_drv[i].last_activity = jiffies;
258                                 }
259
260                                 /* mark the ring as not ready any more */
261                                 rdev->ring[ring].ready = false;
262                                 if (lock_ring) {
263                                         mutex_unlock(&rdev->ring_lock);
264                                 }
265                                 return -EDEADLK;
266                         }
267
268                         if (lock_ring) {
269                                 mutex_unlock(&rdev->ring_lock);
270                         }
271                 }
272         }
273         return 0;
274 }
275
276 int radeon_fence_wait(struct radeon_fence *fence, bool intr)
277 {
278         int r;
279
280         if (fence == NULL) {
281                 WARN(1, "Querying an invalid fence : %p !\n", fence);
282                 return -EINVAL;
283         }
284
285         r = radeon_fence_wait_seq(fence->rdev, fence->seq,
286                                   fence->ring, intr, true);
287         if (r) {
288                 return r;
289         }
290         fence->seq = RADEON_FENCE_SIGNALED_SEQ;
291         return 0;
292 }
293
294 bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq)
295 {
296         unsigned i;
297
298         for (i = 0; i < RADEON_NUM_RINGS; ++i) {
299                 if (seq[i] && radeon_fence_seq_signaled(rdev, seq[i], i)) {
300                         return true;
301                 }
302         }
303         return false;
304 }
305
306 static int radeon_fence_wait_any_seq(struct radeon_device *rdev,
307                                      u64 *target_seq, bool intr)
308 {
309         unsigned long timeout, last_activity, tmp;
310         unsigned i, ring = RADEON_NUM_RINGS;
311         bool signaled;
312         int r;
313
314         for (i = 0, last_activity = 0; i < RADEON_NUM_RINGS; ++i) {
315                 if (!target_seq[i]) {
316                         continue;
317                 }
318
319                 /* use the most recent one as indicator */
320                 if (time_after(rdev->fence_drv[i].last_activity, last_activity)) {
321                         last_activity = rdev->fence_drv[i].last_activity;
322                 }
323
324                 /* For lockup detection just pick the lowest ring we are
325                  * actively waiting for
326                  */
327                 if (i < ring) {
328                         ring = i;
329                 }
330         }
331
332         /* nothing to wait for ? */
333         if (ring == RADEON_NUM_RINGS) {
334                 return 0;
335         }
336
337         while (!radeon_fence_any_seq_signaled(rdev, target_seq)) {
338                 timeout = jiffies - RADEON_FENCE_JIFFIES_TIMEOUT;
339                 if (time_after(last_activity, timeout)) {
340                         /* the normal case, timeout is somewhere before last_activity */
341                         timeout = last_activity - timeout;
342                 } else {
343                         /* either jiffies wrapped around, or no fence was signaled in the last 500ms
344                          * anyway we will just wait for the minimum amount and then check for a lockup
345                          */
346                         timeout = 1;
347                 }
348
349                 trace_radeon_fence_wait_begin(rdev->ddev, target_seq[ring]);
350                 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
351                         if (target_seq[i]) {
352                                 radeon_irq_kms_sw_irq_get(rdev, i);
353                         }
354                 }
355                 if (intr) {
356                         r = wait_event_interruptible_timeout(rdev->fence_queue,
357                                 (signaled = radeon_fence_any_seq_signaled(rdev, target_seq)),
358                                 timeout);
359                 } else {
360                         r = wait_event_timeout(rdev->fence_queue,
361                                 (signaled = radeon_fence_any_seq_signaled(rdev, target_seq)),
362                                 timeout);
363                 }
364                 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
365                         if (target_seq[i]) {
366                                 radeon_irq_kms_sw_irq_put(rdev, i);
367                         }
368                 }
369                 if (unlikely(r < 0)) {
370                         return r;
371                 }
372                 trace_radeon_fence_wait_end(rdev->ddev, target_seq[ring]);
373
374                 if (unlikely(!signaled)) {
375                         /* we were interrupted for some reason and fence
376                          * isn't signaled yet, resume waiting */
377                         if (r) {
378                                 continue;
379                         }
380
381                         mutex_lock(&rdev->ring_lock);
382                         for (i = 0, tmp = 0; i < RADEON_NUM_RINGS; ++i) {
383                                 if (time_after(rdev->fence_drv[i].last_activity, tmp)) {
384                                         tmp = rdev->fence_drv[i].last_activity;
385                                 }
386                         }
387                         /* test if somebody else has already decided that this is a lockup */
388                         if (last_activity != tmp) {
389                                 last_activity = tmp;
390                                 mutex_unlock(&rdev->ring_lock);
391                                 continue;
392                         }
393
394                         if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) {
395                                 /* good news we believe it's a lockup */
396                                 dev_warn(rdev->dev, "GPU lockup (waiting for 0x%016llx)\n",
397                                          target_seq[ring]);
398
399                                 /* change last activity so nobody else think there is a lockup */
400                                 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
401                                         rdev->fence_drv[i].last_activity = jiffies;
402                                 }
403
404                                 /* mark the ring as not ready any more */
405                                 rdev->ring[ring].ready = false;
406                                 mutex_unlock(&rdev->ring_lock);
407                                 return -EDEADLK;
408                         }
409                         mutex_unlock(&rdev->ring_lock);
410                 }
411         }
412         return 0;
413 }
414
415 int radeon_fence_wait_any(struct radeon_device *rdev,
416                           struct radeon_fence **fences,
417                           bool intr)
418 {
419         uint64_t seq[RADEON_NUM_RINGS];
420         unsigned i;
421         int r;
422
423         for (i = 0; i < RADEON_NUM_RINGS; ++i) {
424                 seq[i] = 0;
425
426                 if (!fences[i]) {
427                         continue;
428                 }
429
430                 if (fences[i]->seq == RADEON_FENCE_SIGNALED_SEQ) {
431                         /* something was allready signaled */
432                         return 0;
433                 }
434
435                 seq[i] = fences[i]->seq;
436         }
437
438         r = radeon_fence_wait_any_seq(rdev, seq, intr);
439         if (r) {
440                 return r;
441         }
442         return 0;
443 }
444
445 /* caller must hold ring lock */
446 int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring)
447 {
448         uint64_t seq;
449
450         seq = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL;
451         if (seq >= rdev->fence_drv[ring].sync_seq[ring]) {
452                 /* nothing to wait for, last_seq is
453                    already the last emited fence */
454                 return -ENOENT;
455         }
456         return radeon_fence_wait_seq(rdev, seq, ring, false, false);
457 }
458
459 /* caller must hold ring lock */
460 void radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring)
461 {
462         uint64_t seq = rdev->fence_drv[ring].sync_seq[ring];
463
464         while(1) {
465                 int r;
466                 r = radeon_fence_wait_seq(rdev, seq, ring, false, false);
467                 if (r == -EDEADLK) {
468                         mutex_unlock(&rdev->ring_lock);
469                         r = radeon_gpu_reset(rdev);
470                         mutex_lock(&rdev->ring_lock);
471                         if (!r)
472                                 continue;
473                 }
474                 if (r) {
475                         dev_err(rdev->dev, "error waiting for ring to become"
476                                 " idle (%d)\n", r);
477                 }
478                 return;
479         }
480 }
481
482 struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence)
483 {
484         kref_get(&fence->kref);
485         return fence;
486 }
487
488 void radeon_fence_unref(struct radeon_fence **fence)
489 {
490         struct radeon_fence *tmp = *fence;
491
492         *fence = NULL;
493         if (tmp) {
494                 kref_put(&tmp->kref, radeon_fence_destroy);
495         }
496 }
497
498 unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring)
499 {
500         uint64_t emitted;
501
502         /* We are not protected by ring lock when reading the last sequence
503          * but it's ok to report slightly wrong fence count here.
504          */
505         radeon_fence_process(rdev, ring);
506         emitted = rdev->fence_drv[ring].sync_seq[ring]
507                 - atomic64_read(&rdev->fence_drv[ring].last_seq);
508         /* to avoid 32bits warp around */
509         if (emitted > 0x10000000) {
510                 emitted = 0x10000000;
511         }
512         return (unsigned)emitted;
513 }
514
515 bool radeon_fence_need_sync(struct radeon_fence *fence, int dst_ring)
516 {
517         struct radeon_fence_driver *fdrv;
518
519         if (!fence) {
520                 return false;
521         }
522
523         if (fence->ring == dst_ring) {
524                 return false;
525         }
526
527         /* we are protected by the ring mutex */
528         fdrv = &fence->rdev->fence_drv[dst_ring];
529         if (fence->seq <= fdrv->sync_seq[fence->ring]) {
530                 return false;
531         }
532
533         return true;
534 }
535
536 void radeon_fence_note_sync(struct radeon_fence *fence, int dst_ring)
537 {
538         struct radeon_fence_driver *dst, *src;
539         unsigned i;
540
541         if (!fence) {
542                 return;
543         }
544
545         if (fence->ring == dst_ring) {
546                 return;
547         }
548
549         /* we are protected by the ring mutex */
550         src = &fence->rdev->fence_drv[fence->ring];
551         dst = &fence->rdev->fence_drv[dst_ring];
552         for (i = 0; i < RADEON_NUM_RINGS; ++i) {
553                 if (i == dst_ring) {
554                         continue;
555                 }
556                 dst->sync_seq[i] = max(dst->sync_seq[i], src->sync_seq[i]);
557         }
558 }
559
560 int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
561 {
562         uint64_t index;
563         int r;
564
565         radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
566         if (rdev->wb.use_event) {
567                 rdev->fence_drv[ring].scratch_reg = 0;
568                 index = R600_WB_EVENT_OFFSET + ring * 4;
569         } else {
570                 r = radeon_scratch_get(rdev, &rdev->fence_drv[ring].scratch_reg);
571                 if (r) {
572                         dev_err(rdev->dev, "fence failed to get scratch register\n");
573                         return r;
574                 }
575                 index = RADEON_WB_SCRATCH_OFFSET +
576                         rdev->fence_drv[ring].scratch_reg -
577                         rdev->scratch.reg_base;
578         }
579         rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
580         rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index;
581         radeon_fence_write(rdev, atomic64_read(&rdev->fence_drv[ring].last_seq), ring);
582         rdev->fence_drv[ring].initialized = true;
583         dev_info(rdev->dev, "fence driver on ring %d use gpu addr 0x%016llx and cpu addr 0x%p\n",
584                  ring, rdev->fence_drv[ring].gpu_addr, rdev->fence_drv[ring].cpu_addr);
585         return 0;
586 }
587
588 static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring)
589 {
590         int i;
591
592         rdev->fence_drv[ring].scratch_reg = -1;
593         rdev->fence_drv[ring].cpu_addr = NULL;
594         rdev->fence_drv[ring].gpu_addr = 0;
595         for (i = 0; i < RADEON_NUM_RINGS; ++i)
596                 rdev->fence_drv[ring].sync_seq[i] = 0;
597         atomic64_set(&rdev->fence_drv[ring].last_seq, 0);
598         rdev->fence_drv[ring].last_activity = jiffies;
599         rdev->fence_drv[ring].initialized = false;
600 }
601
602 int radeon_fence_driver_init(struct radeon_device *rdev)
603 {
604         int ring;
605
606         init_waitqueue_head(&rdev->fence_queue);
607         for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
608                 radeon_fence_driver_init_ring(rdev, ring);
609         }
610         if (radeon_debugfs_fence_init(rdev)) {
611                 dev_err(rdev->dev, "fence debugfs file creation failed\n");
612         }
613         return 0;
614 }
615
616 void radeon_fence_driver_fini(struct radeon_device *rdev)
617 {
618         int ring;
619
620         mutex_lock(&rdev->ring_lock);
621         for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
622                 if (!rdev->fence_drv[ring].initialized)
623                         continue;
624                 radeon_fence_wait_empty_locked(rdev, ring);
625                 wake_up_all(&rdev->fence_queue);
626                 radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
627                 rdev->fence_drv[ring].initialized = false;
628         }
629         mutex_unlock(&rdev->ring_lock);
630 }
631
632
633 /*
634  * Fence debugfs
635  */
636 #if defined(CONFIG_DEBUG_FS)
637 static int radeon_debugfs_fence_info(struct seq_file *m, void *data)
638 {
639         struct drm_info_node *node = (struct drm_info_node *)m->private;
640         struct drm_device *dev = node->minor->dev;
641         struct radeon_device *rdev = dev->dev_private;
642         int i, j;
643
644         for (i = 0; i < RADEON_NUM_RINGS; ++i) {
645                 if (!rdev->fence_drv[i].initialized)
646                         continue;
647
648                 seq_printf(m, "--- ring %d ---\n", i);
649                 seq_printf(m, "Last signaled fence 0x%016llx\n",
650                            (unsigned long long)atomic64_read(&rdev->fence_drv[i].last_seq));
651                 seq_printf(m, "Last emitted        0x%016llx\n",
652                            rdev->fence_drv[i].sync_seq[i]);
653
654                 for (j = 0; j < RADEON_NUM_RINGS; ++j) {
655                         if (i != j && rdev->fence_drv[j].initialized)
656                                 seq_printf(m, "Last sync to ring %d 0x%016llx\n",
657                                            j, rdev->fence_drv[i].sync_seq[j]);
658                 }
659         }
660         return 0;
661 }
662
663 static struct drm_info_list radeon_debugfs_fence_list[] = {
664         {"radeon_fence_info", &radeon_debugfs_fence_info, 0, NULL},
665 };
666 #endif
667
668 int radeon_debugfs_fence_init(struct radeon_device *rdev)
669 {
670 #if defined(CONFIG_DEBUG_FS)
671         return radeon_debugfs_add_files(rdev, radeon_debugfs_fence_list, 1);
672 #else
673         return 0;
674 #endif
675 }