Merge tag 'mmc-v4.9-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/ulfh/mmc
[cascardo/linux.git] / drivers / gpu / drm / amd / amdgpu / vce_v2_0.c
1 /*
2  * Copyright 2013 Advanced Micro Devices, Inc.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  * Authors: Christian König <christian.koenig@amd.com>
26  */
27
28 #include <linux/firmware.h>
29 #include <drm/drmP.h>
30 #include "amdgpu.h"
31 #include "amdgpu_vce.h"
32 #include "cikd.h"
33 #include "vce/vce_2_0_d.h"
34 #include "vce/vce_2_0_sh_mask.h"
35 #include "smu/smu_7_0_1_d.h"
36 #include "smu/smu_7_0_1_sh_mask.h"
37 #include "oss/oss_2_0_d.h"
38 #include "oss/oss_2_0_sh_mask.h"
39
40 #define VCE_V2_0_FW_SIZE        (256 * 1024)
41 #define VCE_V2_0_STACK_SIZE     (64 * 1024)
42 #define VCE_V2_0_DATA_SIZE      (23552 * AMDGPU_MAX_VCE_HANDLES)
43 #define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK   0x02
44
45 static void vce_v2_0_mc_resume(struct amdgpu_device *adev);
46 static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev);
47 static void vce_v2_0_set_irq_funcs(struct amdgpu_device *adev);
48 static int vce_v2_0_wait_for_idle(void *handle);
49 /**
50  * vce_v2_0_ring_get_rptr - get read pointer
51  *
52  * @ring: amdgpu_ring pointer
53  *
54  * Returns the current hardware read pointer
55  */
56 static uint32_t vce_v2_0_ring_get_rptr(struct amdgpu_ring *ring)
57 {
58         struct amdgpu_device *adev = ring->adev;
59
60         if (ring == &adev->vce.ring[0])
61                 return RREG32(mmVCE_RB_RPTR);
62         else
63                 return RREG32(mmVCE_RB_RPTR2);
64 }
65
66 /**
67  * vce_v2_0_ring_get_wptr - get write pointer
68  *
69  * @ring: amdgpu_ring pointer
70  *
71  * Returns the current hardware write pointer
72  */
73 static uint32_t vce_v2_0_ring_get_wptr(struct amdgpu_ring *ring)
74 {
75         struct amdgpu_device *adev = ring->adev;
76
77         if (ring == &adev->vce.ring[0])
78                 return RREG32(mmVCE_RB_WPTR);
79         else
80                 return RREG32(mmVCE_RB_WPTR2);
81 }
82
83 /**
84  * vce_v2_0_ring_set_wptr - set write pointer
85  *
86  * @ring: amdgpu_ring pointer
87  *
88  * Commits the write pointer to the hardware
89  */
90 static void vce_v2_0_ring_set_wptr(struct amdgpu_ring *ring)
91 {
92         struct amdgpu_device *adev = ring->adev;
93
94         if (ring == &adev->vce.ring[0])
95                 WREG32(mmVCE_RB_WPTR, ring->wptr);
96         else
97                 WREG32(mmVCE_RB_WPTR2, ring->wptr);
98 }
99
100 static int vce_v2_0_lmi_clean(struct amdgpu_device *adev)
101 {
102         int i, j;
103
104         for (i = 0; i < 10; ++i) {
105                 for (j = 0; j < 100; ++j) {
106                         uint32_t status = RREG32(mmVCE_LMI_STATUS);
107
108                         if (status & 0x337f)
109                                 return 0;
110                         mdelay(10);
111                 }
112         }
113
114         return -ETIMEDOUT;
115 }
116
117 static int vce_v2_0_firmware_loaded(struct amdgpu_device *adev)
118 {
119         int i, j;
120
121         for (i = 0; i < 10; ++i) {
122                 for (j = 0; j < 100; ++j) {
123                         uint32_t status = RREG32(mmVCE_STATUS);
124
125                         if (status & VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK)
126                                 return 0;
127                         mdelay(10);
128                 }
129
130                 DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n");
131                 WREG32_P(mmVCE_SOFT_RESET,
132                         VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
133                         ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
134                 mdelay(10);
135                 WREG32_P(mmVCE_SOFT_RESET, 0,
136                         ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
137                 mdelay(10);
138         }
139
140         return -ETIMEDOUT;
141 }
142
143 /**
144  * vce_v2_0_start - start VCE block
145  *
146  * @adev: amdgpu_device pointer
147  *
148  * Setup and start the VCE block
149  */
150 static int vce_v2_0_start(struct amdgpu_device *adev)
151 {
152         struct amdgpu_ring *ring;
153         int r;
154
155         vce_v2_0_mc_resume(adev);
156
157         /* set BUSY flag */
158         WREG32_P(mmVCE_STATUS, 1, ~1);
159
160         ring = &adev->vce.ring[0];
161         WREG32(mmVCE_RB_RPTR, ring->wptr);
162         WREG32(mmVCE_RB_WPTR, ring->wptr);
163         WREG32(mmVCE_RB_BASE_LO, ring->gpu_addr);
164         WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
165         WREG32(mmVCE_RB_SIZE, ring->ring_size / 4);
166
167         ring = &adev->vce.ring[1];
168         WREG32(mmVCE_RB_RPTR2, ring->wptr);
169         WREG32(mmVCE_RB_WPTR2, ring->wptr);
170         WREG32(mmVCE_RB_BASE_LO2, ring->gpu_addr);
171         WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
172         WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4);
173
174         WREG32_FIELD(VCE_VCPU_CNTL, CLK_EN, 1);
175         WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 1);
176         mdelay(100);
177         WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 0);
178
179         r = vce_v2_0_firmware_loaded(adev);
180
181         /* clear BUSY flag */
182         WREG32_P(mmVCE_STATUS, 0, ~1);
183
184         if (r) {
185                 DRM_ERROR("VCE not responding, giving up!!!\n");
186                 return r;
187         }
188
189         return 0;
190 }
191
192 static int vce_v2_0_early_init(void *handle)
193 {
194         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
195
196         adev->vce.num_rings = 2;
197
198         vce_v2_0_set_ring_funcs(adev);
199         vce_v2_0_set_irq_funcs(adev);
200
201         return 0;
202 }
203
204 static int vce_v2_0_sw_init(void *handle)
205 {
206         struct amdgpu_ring *ring;
207         int r, i;
208         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
209
210         /* VCE */
211         r = amdgpu_irq_add_id(adev, 167, &adev->vce.irq);
212         if (r)
213                 return r;
214
215         r = amdgpu_vce_sw_init(adev, VCE_V2_0_FW_SIZE +
216                 VCE_V2_0_STACK_SIZE + VCE_V2_0_DATA_SIZE);
217         if (r)
218                 return r;
219
220         r = amdgpu_vce_resume(adev);
221         if (r)
222                 return r;
223
224         for (i = 0; i < adev->vce.num_rings; i++) {
225                 ring = &adev->vce.ring[i];
226                 sprintf(ring->name, "vce%d", i);
227                 r = amdgpu_ring_init(adev, ring, 512, VCE_CMD_NO_OP, 0xf,
228                                      &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE);
229                 if (r)
230                         return r;
231         }
232
233         return r;
234 }
235
236 static int vce_v2_0_sw_fini(void *handle)
237 {
238         int r;
239         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
240
241         r = amdgpu_vce_suspend(adev);
242         if (r)
243                 return r;
244
245         r = amdgpu_vce_sw_fini(adev);
246         if (r)
247                 return r;
248
249         return r;
250 }
251
252 static int vce_v2_0_hw_init(void *handle)
253 {
254         int r, i;
255         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
256
257         r = vce_v2_0_start(adev);
258         /* this error mean vcpu not in running state, so just skip ring test, not stop driver initialize */
259         if (r)
260                 return 0;
261
262         for (i = 0; i < adev->vce.num_rings; i++)
263                 adev->vce.ring[i].ready = false;
264
265         for (i = 0; i < adev->vce.num_rings; i++) {
266                 r = amdgpu_ring_test_ring(&adev->vce.ring[i]);
267                 if (r)
268                         return r;
269                 else
270                         adev->vce.ring[i].ready = true;
271         }
272
273         DRM_INFO("VCE initialized successfully.\n");
274
275         return 0;
276 }
277
278 static int vce_v2_0_hw_fini(void *handle)
279 {
280         return 0;
281 }
282
283 static int vce_v2_0_suspend(void *handle)
284 {
285         int r;
286         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
287
288         r = vce_v2_0_hw_fini(adev);
289         if (r)
290                 return r;
291
292         r = amdgpu_vce_suspend(adev);
293         if (r)
294                 return r;
295
296         return r;
297 }
298
299 static int vce_v2_0_resume(void *handle)
300 {
301         int r;
302         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
303
304         r = amdgpu_vce_resume(adev);
305         if (r)
306                 return r;
307
308         r = vce_v2_0_hw_init(adev);
309         if (r)
310                 return r;
311
312         return r;
313 }
314
315 static void vce_v2_0_set_sw_cg(struct amdgpu_device *adev, bool gated)
316 {
317         u32 tmp;
318
319         if (gated) {
320                 tmp = RREG32(mmVCE_CLOCK_GATING_B);
321                 tmp |= 0xe70000;
322                 WREG32(mmVCE_CLOCK_GATING_B, tmp);
323
324                 tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
325                 tmp |= 0xff000000;
326                 WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
327
328                 tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
329                 tmp &= ~0x3fc;
330                 WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp);
331
332                 WREG32(mmVCE_CGTT_CLK_OVERRIDE, 0);
333         } else {
334                 tmp = RREG32(mmVCE_CLOCK_GATING_B);
335                 tmp |= 0xe7;
336                 tmp &= ~0xe70000;
337                 WREG32(mmVCE_CLOCK_GATING_B, tmp);
338
339                 tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
340                 tmp |= 0x1fe000;
341                 tmp &= ~0xff000000;
342                 WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
343
344                 tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
345                 tmp |= 0x3fc;
346                 WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp);
347         }
348 }
349
350 static void vce_v2_0_set_dyn_cg(struct amdgpu_device *adev, bool gated)
351 {
352         if (vce_v2_0_wait_for_idle(adev)) {
353                 DRM_INFO("VCE is busy, Can't set clock gateing");
354                 return;
355         }
356
357         WREG32_P(mmVCE_LMI_CTRL2, 0x100, ~0x100);
358
359         if (vce_v2_0_lmi_clean(adev)) {
360                 DRM_INFO("LMI is busy, Can't set clock gateing");
361                 return;
362         }
363
364         WREG32_P(mmVCE_VCPU_CNTL, 0, ~VCE_VCPU_CNTL__CLK_EN_MASK);
365         WREG32_P(mmVCE_SOFT_RESET,
366                  VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
367                  ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
368         WREG32(mmVCE_STATUS, 0);
369
370         if (gated)
371                 WREG32(mmVCE_CGTT_CLK_OVERRIDE, 0);
372         /* LMI_MC/LMI_UMC always set in dynamic, set {CGC_*_GATE_MODE, CGC_*_SW_GATE} = {0, 0} */
373         if (gated) {
374                 /* Force CLOCK OFF , set {CGC_*_GATE_MODE, CGC_*_SW_GATE} = {*, 1} */
375                 WREG32(mmVCE_CLOCK_GATING_B, 0xe90010);
376         } else {
377                 /* Force CLOCK ON, set {CGC_*_GATE_MODE, CGC_*_SW_GATE} = {1, 0} */
378                 WREG32(mmVCE_CLOCK_GATING_B, 0x800f1);
379         }
380
381         /* Set VCE_UENC_CLOCK_GATING always in dynamic mode {*_FORCE_ON, *_FORCE_OFF} = {0, 0}*/;
382         WREG32(mmVCE_UENC_CLOCK_GATING, 0x40);
383
384         /* set VCE_UENC_REG_CLOCK_GATING always in dynamic mode */
385         WREG32(mmVCE_UENC_REG_CLOCK_GATING, 0x00);
386
387         WREG32_P(mmVCE_LMI_CTRL2, 0, ~0x100);
388         if(!gated) {
389                 WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK, ~VCE_VCPU_CNTL__CLK_EN_MASK);
390                 mdelay(100);
391                 WREG32_P(mmVCE_SOFT_RESET, 0, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
392
393                 vce_v2_0_firmware_loaded(adev);
394                 WREG32_P(mmVCE_STATUS, 0, ~VCE_STATUS__JOB_BUSY_MASK);
395         }
396 }
397
398 static void vce_v2_0_disable_cg(struct amdgpu_device *adev)
399 {
400         WREG32(mmVCE_CGTT_CLK_OVERRIDE, 7);
401 }
402
403 static void vce_v2_0_enable_mgcg(struct amdgpu_device *adev, bool enable)
404 {
405         bool sw_cg = false;
406
407         if (enable && (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)) {
408                 if (sw_cg)
409                         vce_v2_0_set_sw_cg(adev, true);
410                 else
411                         vce_v2_0_set_dyn_cg(adev, true);
412         } else {
413                 vce_v2_0_disable_cg(adev);
414
415                 if (sw_cg)
416                         vce_v2_0_set_sw_cg(adev, false);
417                 else
418                         vce_v2_0_set_dyn_cg(adev, false);
419         }
420 }
421
422 static void vce_v2_0_init_cg(struct amdgpu_device *adev)
423 {
424         u32 tmp;
425
426         tmp = RREG32(mmVCE_CLOCK_GATING_A);
427         tmp &= ~0xfff;
428         tmp |= ((0 << 0) | (4 << 4));
429         tmp |= 0x40000;
430         WREG32(mmVCE_CLOCK_GATING_A, tmp);
431
432         tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
433         tmp &= ~0xfff;
434         tmp |= ((0 << 0) | (4 << 4));
435         WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
436
437         tmp = RREG32(mmVCE_CLOCK_GATING_B);
438         tmp |= 0x10;
439         tmp &= ~0x100000;
440         WREG32(mmVCE_CLOCK_GATING_B, tmp);
441 }
442
443 static void vce_v2_0_mc_resume(struct amdgpu_device *adev)
444 {
445         uint64_t addr = adev->vce.gpu_addr;
446         uint32_t size;
447
448         WREG32_P(mmVCE_CLOCK_GATING_A, 0, ~(1 << 16));
449         WREG32_P(mmVCE_UENC_CLOCK_GATING, 0x1FF000, ~0xFF9FF000);
450         WREG32_P(mmVCE_UENC_REG_CLOCK_GATING, 0x3F, ~0x3F);
451         WREG32(mmVCE_CLOCK_GATING_B, 0xf7);
452
453         WREG32(mmVCE_LMI_CTRL, 0x00398000);
454         WREG32_P(mmVCE_LMI_CACHE_CTRL, 0x0, ~0x1);
455         WREG32(mmVCE_LMI_SWAP_CNTL, 0);
456         WREG32(mmVCE_LMI_SWAP_CNTL1, 0);
457         WREG32(mmVCE_LMI_VM_CTRL, 0);
458
459         addr += AMDGPU_VCE_FIRMWARE_OFFSET;
460         size = VCE_V2_0_FW_SIZE;
461         WREG32(mmVCE_VCPU_CACHE_OFFSET0, addr & 0x7fffffff);
462         WREG32(mmVCE_VCPU_CACHE_SIZE0, size);
463
464         addr += size;
465         size = VCE_V2_0_STACK_SIZE;
466         WREG32(mmVCE_VCPU_CACHE_OFFSET1, addr & 0x7fffffff);
467         WREG32(mmVCE_VCPU_CACHE_SIZE1, size);
468
469         addr += size;
470         size = VCE_V2_0_DATA_SIZE;
471         WREG32(mmVCE_VCPU_CACHE_OFFSET2, addr & 0x7fffffff);
472         WREG32(mmVCE_VCPU_CACHE_SIZE2, size);
473
474         WREG32_P(mmVCE_LMI_CTRL2, 0x0, ~0x100);
475         WREG32_FIELD(VCE_SYS_INT_EN, VCE_SYS_INT_TRAP_INTERRUPT_EN, 1);
476
477         vce_v2_0_init_cg(adev);
478 }
479
480 static bool vce_v2_0_is_idle(void *handle)
481 {
482         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
483
484         return !(RREG32(mmSRBM_STATUS2) & SRBM_STATUS2__VCE_BUSY_MASK);
485 }
486
487 static int vce_v2_0_wait_for_idle(void *handle)
488 {
489         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
490         unsigned i;
491
492         for (i = 0; i < adev->usec_timeout; i++) {
493                 if (vce_v2_0_is_idle(handle))
494                         return 0;
495         }
496         return -ETIMEDOUT;
497 }
498
499 static int vce_v2_0_soft_reset(void *handle)
500 {
501         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
502
503         WREG32_FIELD(SRBM_SOFT_RESET, SOFT_RESET_VCE, 1);
504         mdelay(5);
505
506         return vce_v2_0_start(adev);
507 }
508
509 static int vce_v2_0_set_interrupt_state(struct amdgpu_device *adev,
510                                         struct amdgpu_irq_src *source,
511                                         unsigned type,
512                                         enum amdgpu_interrupt_state state)
513 {
514         uint32_t val = 0;
515
516         if (state == AMDGPU_IRQ_STATE_ENABLE)
517                 val |= VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK;
518
519         WREG32_P(mmVCE_SYS_INT_EN, val, ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK);
520         return 0;
521 }
522
523 static int vce_v2_0_process_interrupt(struct amdgpu_device *adev,
524                                       struct amdgpu_irq_src *source,
525                                       struct amdgpu_iv_entry *entry)
526 {
527         DRM_DEBUG("IH: VCE\n");
528         switch (entry->src_data) {
529         case 0:
530         case 1:
531                 amdgpu_fence_process(&adev->vce.ring[entry->src_data]);
532                 break;
533         default:
534                 DRM_ERROR("Unhandled interrupt: %d %d\n",
535                           entry->src_id, entry->src_data);
536                 break;
537         }
538
539         return 0;
540 }
541
542 static void vce_v2_0_set_bypass_mode(struct amdgpu_device *adev, bool enable)
543 {
544         u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL);
545
546         if (enable)
547                 tmp |= GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK;
548         else
549                 tmp &= ~GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK;
550
551         WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp);
552 }
553
554
555 static int vce_v2_0_set_clockgating_state(void *handle,
556                                           enum amd_clockgating_state state)
557 {
558         bool gate = false;
559         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
560         bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
561
562
563         vce_v2_0_set_bypass_mode(adev, enable);
564
565         if (state == AMD_CG_STATE_GATE)
566                 gate = true;
567
568         vce_v2_0_enable_mgcg(adev, gate);
569
570         return 0;
571 }
572
573 static int vce_v2_0_set_powergating_state(void *handle,
574                                           enum amd_powergating_state state)
575 {
576         /* This doesn't actually powergate the VCE block.
577          * That's done in the dpm code via the SMC.  This
578          * just re-inits the block as necessary.  The actual
579          * gating still happens in the dpm code.  We should
580          * revisit this when there is a cleaner line between
581          * the smc and the hw blocks
582          */
583         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
584
585         if (!(adev->pg_flags & AMD_PG_SUPPORT_VCE))
586                 return 0;
587
588         if (state == AMD_PG_STATE_GATE)
589                 /* XXX do we need a vce_v2_0_stop()? */
590                 return 0;
591         else
592                 return vce_v2_0_start(adev);
593 }
594
595 const struct amd_ip_funcs vce_v2_0_ip_funcs = {
596         .name = "vce_v2_0",
597         .early_init = vce_v2_0_early_init,
598         .late_init = NULL,
599         .sw_init = vce_v2_0_sw_init,
600         .sw_fini = vce_v2_0_sw_fini,
601         .hw_init = vce_v2_0_hw_init,
602         .hw_fini = vce_v2_0_hw_fini,
603         .suspend = vce_v2_0_suspend,
604         .resume = vce_v2_0_resume,
605         .is_idle = vce_v2_0_is_idle,
606         .wait_for_idle = vce_v2_0_wait_for_idle,
607         .soft_reset = vce_v2_0_soft_reset,
608         .set_clockgating_state = vce_v2_0_set_clockgating_state,
609         .set_powergating_state = vce_v2_0_set_powergating_state,
610 };
611
612 static const struct amdgpu_ring_funcs vce_v2_0_ring_funcs = {
613         .get_rptr = vce_v2_0_ring_get_rptr,
614         .get_wptr = vce_v2_0_ring_get_wptr,
615         .set_wptr = vce_v2_0_ring_set_wptr,
616         .parse_cs = amdgpu_vce_ring_parse_cs,
617         .emit_ib = amdgpu_vce_ring_emit_ib,
618         .emit_fence = amdgpu_vce_ring_emit_fence,
619         .test_ring = amdgpu_vce_ring_test_ring,
620         .test_ib = amdgpu_vce_ring_test_ib,
621         .insert_nop = amdgpu_ring_insert_nop,
622         .pad_ib = amdgpu_ring_generic_pad_ib,
623         .begin_use = amdgpu_vce_ring_begin_use,
624         .end_use = amdgpu_vce_ring_end_use,
625         .get_emit_ib_size = amdgpu_vce_ring_get_emit_ib_size,
626         .get_dma_frame_size = amdgpu_vce_ring_get_dma_frame_size,
627 };
628
629 static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev)
630 {
631         int i;
632
633         for (i = 0; i < adev->vce.num_rings; i++)
634                 adev->vce.ring[i].funcs = &vce_v2_0_ring_funcs;
635 }
636
637 static const struct amdgpu_irq_src_funcs vce_v2_0_irq_funcs = {
638         .set = vce_v2_0_set_interrupt_state,
639         .process = vce_v2_0_process_interrupt,
640 };
641
642 static void vce_v2_0_set_irq_funcs(struct amdgpu_device *adev)
643 {
644         adev->vce.irq.num_types = 1;
645         adev->vce.irq.funcs = &vce_v2_0_irq_funcs;
646 };