Merge branch 'work.uaccess' into for-linus
[cascardo/linux.git] / drivers / gpu / drm / amd / amdgpu / vce_v2_0.c
1 /*
2  * Copyright 2013 Advanced Micro Devices, Inc.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  * Authors: Christian König <christian.koenig@amd.com>
26  */
27
28 #include <linux/firmware.h>
29 #include <drm/drmP.h>
30 #include "amdgpu.h"
31 #include "amdgpu_vce.h"
32 #include "cikd.h"
33
34 #include "vce/vce_2_0_d.h"
35 #include "vce/vce_2_0_sh_mask.h"
36
37 #include "oss/oss_2_0_d.h"
38 #include "oss/oss_2_0_sh_mask.h"
39
40 #define VCE_V2_0_FW_SIZE        (256 * 1024)
41 #define VCE_V2_0_STACK_SIZE     (64 * 1024)
42 #define VCE_V2_0_DATA_SIZE      (23552 * AMDGPU_MAX_VCE_HANDLES)
43
44 static void vce_v2_0_mc_resume(struct amdgpu_device *adev);
45 static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev);
46 static void vce_v2_0_set_irq_funcs(struct amdgpu_device *adev);
47 static int vce_v2_0_wait_for_idle(void *handle);
48 /**
49  * vce_v2_0_ring_get_rptr - get read pointer
50  *
51  * @ring: amdgpu_ring pointer
52  *
53  * Returns the current hardware read pointer
54  */
55 static uint32_t vce_v2_0_ring_get_rptr(struct amdgpu_ring *ring)
56 {
57         struct amdgpu_device *adev = ring->adev;
58
59         if (ring == &adev->vce.ring[0])
60                 return RREG32(mmVCE_RB_RPTR);
61         else
62                 return RREG32(mmVCE_RB_RPTR2);
63 }
64
65 /**
66  * vce_v2_0_ring_get_wptr - get write pointer
67  *
68  * @ring: amdgpu_ring pointer
69  *
70  * Returns the current hardware write pointer
71  */
72 static uint32_t vce_v2_0_ring_get_wptr(struct amdgpu_ring *ring)
73 {
74         struct amdgpu_device *adev = ring->adev;
75
76         if (ring == &adev->vce.ring[0])
77                 return RREG32(mmVCE_RB_WPTR);
78         else
79                 return RREG32(mmVCE_RB_WPTR2);
80 }
81
82 /**
83  * vce_v2_0_ring_set_wptr - set write pointer
84  *
85  * @ring: amdgpu_ring pointer
86  *
87  * Commits the write pointer to the hardware
88  */
89 static void vce_v2_0_ring_set_wptr(struct amdgpu_ring *ring)
90 {
91         struct amdgpu_device *adev = ring->adev;
92
93         if (ring == &adev->vce.ring[0])
94                 WREG32(mmVCE_RB_WPTR, ring->wptr);
95         else
96                 WREG32(mmVCE_RB_WPTR2, ring->wptr);
97 }
98
99 /**
100  * vce_v2_0_start - start VCE block
101  *
102  * @adev: amdgpu_device pointer
103  *
104  * Setup and start the VCE block
105  */
106 static int vce_v2_0_start(struct amdgpu_device *adev)
107 {
108         struct amdgpu_ring *ring;
109         int i, j, r;
110
111         vce_v2_0_mc_resume(adev);
112
113         /* set BUSY flag */
114         WREG32_P(mmVCE_STATUS, 1, ~1);
115
116         ring = &adev->vce.ring[0];
117         WREG32(mmVCE_RB_RPTR, ring->wptr);
118         WREG32(mmVCE_RB_WPTR, ring->wptr);
119         WREG32(mmVCE_RB_BASE_LO, ring->gpu_addr);
120         WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
121         WREG32(mmVCE_RB_SIZE, ring->ring_size / 4);
122
123         ring = &adev->vce.ring[1];
124         WREG32(mmVCE_RB_RPTR2, ring->wptr);
125         WREG32(mmVCE_RB_WPTR2, ring->wptr);
126         WREG32(mmVCE_RB_BASE_LO2, ring->gpu_addr);
127         WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
128         WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4);
129
130         WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK, ~VCE_VCPU_CNTL__CLK_EN_MASK);
131
132         WREG32_P(mmVCE_SOFT_RESET,
133                  VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
134                  ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
135
136         mdelay(100);
137
138         WREG32_P(mmVCE_SOFT_RESET, 0, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
139
140         for (i = 0; i < 10; ++i) {
141                 uint32_t status;
142                 for (j = 0; j < 100; ++j) {
143                         status = RREG32(mmVCE_STATUS);
144                         if (status & 2)
145                                 break;
146                         mdelay(10);
147                 }
148                 r = 0;
149                 if (status & 2)
150                         break;
151
152                 DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n");
153                 WREG32_P(mmVCE_SOFT_RESET, VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
154                                 ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
155                 mdelay(10);
156                 WREG32_P(mmVCE_SOFT_RESET, 0, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
157                 mdelay(10);
158                 r = -1;
159         }
160
161         /* clear BUSY flag */
162         WREG32_P(mmVCE_STATUS, 0, ~1);
163
164         if (r) {
165                 DRM_ERROR("VCE not responding, giving up!!!\n");
166                 return r;
167         }
168
169         return 0;
170 }
171
172 static int vce_v2_0_early_init(void *handle)
173 {
174         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
175
176         vce_v2_0_set_ring_funcs(adev);
177         vce_v2_0_set_irq_funcs(adev);
178
179         return 0;
180 }
181
182 static int vce_v2_0_sw_init(void *handle)
183 {
184         struct amdgpu_ring *ring;
185         int r;
186         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
187
188         /* VCE */
189         r = amdgpu_irq_add_id(adev, 167, &adev->vce.irq);
190         if (r)
191                 return r;
192
193         r = amdgpu_vce_sw_init(adev, VCE_V2_0_FW_SIZE +
194                 VCE_V2_0_STACK_SIZE + VCE_V2_0_DATA_SIZE);
195         if (r)
196                 return r;
197
198         r = amdgpu_vce_resume(adev);
199         if (r)
200                 return r;
201
202         ring = &adev->vce.ring[0];
203         sprintf(ring->name, "vce0");
204         r = amdgpu_ring_init(adev, ring, 512, VCE_CMD_NO_OP, 0xf,
205                              &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE);
206         if (r)
207                 return r;
208
209         ring = &adev->vce.ring[1];
210         sprintf(ring->name, "vce1");
211         r = amdgpu_ring_init(adev, ring, 512, VCE_CMD_NO_OP, 0xf,
212                              &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE);
213         if (r)
214                 return r;
215
216         return r;
217 }
218
219 static int vce_v2_0_sw_fini(void *handle)
220 {
221         int r;
222         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
223
224         r = amdgpu_vce_suspend(adev);
225         if (r)
226                 return r;
227
228         r = amdgpu_vce_sw_fini(adev);
229         if (r)
230                 return r;
231
232         return r;
233 }
234
235 static int vce_v2_0_hw_init(void *handle)
236 {
237         struct amdgpu_ring *ring;
238         int r;
239         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
240
241         r = vce_v2_0_start(adev);
242         if (r)
243 /* this error mean vcpu not in running state, so just skip ring test, not stop driver initialize */
244                 return 0;
245
246         ring = &adev->vce.ring[0];
247         ring->ready = true;
248         r = amdgpu_ring_test_ring(ring);
249         if (r) {
250                 ring->ready = false;
251                 return r;
252         }
253
254         ring = &adev->vce.ring[1];
255         ring->ready = true;
256         r = amdgpu_ring_test_ring(ring);
257         if (r) {
258                 ring->ready = false;
259                 return r;
260         }
261
262         DRM_INFO("VCE initialized successfully.\n");
263
264         return 0;
265 }
266
267 static int vce_v2_0_hw_fini(void *handle)
268 {
269         return 0;
270 }
271
272 static int vce_v2_0_suspend(void *handle)
273 {
274         int r;
275         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
276
277         r = vce_v2_0_hw_fini(adev);
278         if (r)
279                 return r;
280
281         r = amdgpu_vce_suspend(adev);
282         if (r)
283                 return r;
284
285         return r;
286 }
287
288 static int vce_v2_0_resume(void *handle)
289 {
290         int r;
291         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
292
293         r = amdgpu_vce_resume(adev);
294         if (r)
295                 return r;
296
297         r = vce_v2_0_hw_init(adev);
298         if (r)
299                 return r;
300
301         return r;
302 }
303
304 static void vce_v2_0_set_sw_cg(struct amdgpu_device *adev, bool gated)
305 {
306         u32 tmp;
307
308         if (gated) {
309                 tmp = RREG32(mmVCE_CLOCK_GATING_B);
310                 tmp |= 0xe70000;
311                 WREG32(mmVCE_CLOCK_GATING_B, tmp);
312
313                 tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
314                 tmp |= 0xff000000;
315                 WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
316
317                 tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
318                 tmp &= ~0x3fc;
319                 WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp);
320
321                 WREG32(mmVCE_CGTT_CLK_OVERRIDE, 0);
322         } else {
323                 tmp = RREG32(mmVCE_CLOCK_GATING_B);
324                 tmp |= 0xe7;
325                 tmp &= ~0xe70000;
326                 WREG32(mmVCE_CLOCK_GATING_B, tmp);
327
328                 tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
329                 tmp |= 0x1fe000;
330                 tmp &= ~0xff000000;
331                 WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
332
333                 tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
334                 tmp |= 0x3fc;
335                 WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp);
336         }
337 }
338
339 static void vce_v2_0_set_dyn_cg(struct amdgpu_device *adev, bool gated)
340 {
341         u32 orig, tmp;
342
343         if (gated) {
344                 if (vce_v2_0_wait_for_idle(adev)) {
345                         DRM_INFO("VCE is busy, Can't set clock gateing");
346                         return;
347                 }
348                 WREG32_P(mmVCE_VCPU_CNTL, 0, ~VCE_VCPU_CNTL__CLK_EN_MASK);
349                 WREG32_P(mmVCE_SOFT_RESET, VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
350                 mdelay(100);
351                 WREG32(mmVCE_STATUS, 0);
352         } else {
353                 WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK, ~VCE_VCPU_CNTL__CLK_EN_MASK);
354                 WREG32_P(mmVCE_SOFT_RESET, VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
355                 mdelay(100);
356         }
357
358         tmp = RREG32(mmVCE_CLOCK_GATING_B);
359         tmp &= ~0x00060006;
360         if (gated) {
361                 tmp |= 0xe10000;
362         } else {
363                 tmp |= 0xe1;
364                 tmp &= ~0xe10000;
365         }
366         WREG32(mmVCE_CLOCK_GATING_B, tmp);
367
368         orig = tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
369         tmp &= ~0x1fe000;
370         tmp &= ~0xff000000;
371         if (tmp != orig)
372                 WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
373
374         orig = tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
375         tmp &= ~0x3fc;
376         if (tmp != orig)
377                 WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp);
378
379         if (gated)
380                 WREG32(mmVCE_CGTT_CLK_OVERRIDE, 0);
381         WREG32_P(mmVCE_SOFT_RESET, 0, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
382 }
383
384 static void vce_v2_0_disable_cg(struct amdgpu_device *adev)
385 {
386         WREG32(mmVCE_CGTT_CLK_OVERRIDE, 7);
387 }
388
389 static void vce_v2_0_enable_mgcg(struct amdgpu_device *adev, bool enable)
390 {
391         bool sw_cg = false;
392
393         if (enable && (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)) {
394                 if (sw_cg)
395                         vce_v2_0_set_sw_cg(adev, true);
396                 else
397                         vce_v2_0_set_dyn_cg(adev, true);
398         } else {
399                 vce_v2_0_disable_cg(adev);
400
401                 if (sw_cg)
402                         vce_v2_0_set_sw_cg(adev, false);
403                 else
404                         vce_v2_0_set_dyn_cg(adev, false);
405         }
406 }
407
408 static void vce_v2_0_init_cg(struct amdgpu_device *adev)
409 {
410         u32 tmp;
411
412         tmp = RREG32(mmVCE_CLOCK_GATING_A);
413         tmp &= ~0xfff;
414         tmp |= ((0 << 0) | (4 << 4));
415         tmp |= 0x40000;
416         WREG32(mmVCE_CLOCK_GATING_A, tmp);
417
418         tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
419         tmp &= ~0xfff;
420         tmp |= ((0 << 0) | (4 << 4));
421         WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
422
423         tmp = RREG32(mmVCE_CLOCK_GATING_B);
424         tmp |= 0x10;
425         tmp &= ~0x100000;
426         WREG32(mmVCE_CLOCK_GATING_B, tmp);
427 }
428
429 static void vce_v2_0_mc_resume(struct amdgpu_device *adev)
430 {
431         uint64_t addr = adev->vce.gpu_addr;
432         uint32_t size;
433
434         WREG32_P(mmVCE_CLOCK_GATING_A, 0, ~(1 << 16));
435         WREG32_P(mmVCE_UENC_CLOCK_GATING, 0x1FF000, ~0xFF9FF000);
436         WREG32_P(mmVCE_UENC_REG_CLOCK_GATING, 0x3F, ~0x3F);
437         WREG32(mmVCE_CLOCK_GATING_B, 0xf7);
438
439         WREG32(mmVCE_LMI_CTRL, 0x00398000);
440         WREG32_P(mmVCE_LMI_CACHE_CTRL, 0x0, ~0x1);
441         WREG32(mmVCE_LMI_SWAP_CNTL, 0);
442         WREG32(mmVCE_LMI_SWAP_CNTL1, 0);
443         WREG32(mmVCE_LMI_VM_CTRL, 0);
444
445         addr += AMDGPU_VCE_FIRMWARE_OFFSET;
446         size = VCE_V2_0_FW_SIZE;
447         WREG32(mmVCE_VCPU_CACHE_OFFSET0, addr & 0x7fffffff);
448         WREG32(mmVCE_VCPU_CACHE_SIZE0, size);
449
450         addr += size;
451         size = VCE_V2_0_STACK_SIZE;
452         WREG32(mmVCE_VCPU_CACHE_OFFSET1, addr & 0x7fffffff);
453         WREG32(mmVCE_VCPU_CACHE_SIZE1, size);
454
455         addr += size;
456         size = VCE_V2_0_DATA_SIZE;
457         WREG32(mmVCE_VCPU_CACHE_OFFSET2, addr & 0x7fffffff);
458         WREG32(mmVCE_VCPU_CACHE_SIZE2, size);
459
460         WREG32_P(mmVCE_LMI_CTRL2, 0x0, ~0x100);
461
462         WREG32_P(mmVCE_SYS_INT_EN, VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK,
463                  ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK);
464
465         vce_v2_0_init_cg(adev);
466 }
467
468 static bool vce_v2_0_is_idle(void *handle)
469 {
470         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
471
472         return !(RREG32(mmSRBM_STATUS2) & SRBM_STATUS2__VCE_BUSY_MASK);
473 }
474
475 static int vce_v2_0_wait_for_idle(void *handle)
476 {
477         unsigned i;
478         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
479
480         for (i = 0; i < adev->usec_timeout; i++) {
481                 if (!(RREG32(mmSRBM_STATUS2) & SRBM_STATUS2__VCE_BUSY_MASK))
482                         return 0;
483         }
484         return -ETIMEDOUT;
485 }
486
487 static int vce_v2_0_soft_reset(void *handle)
488 {
489         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
490
491         WREG32_P(mmSRBM_SOFT_RESET, SRBM_SOFT_RESET__SOFT_RESET_VCE_MASK,
492                         ~SRBM_SOFT_RESET__SOFT_RESET_VCE_MASK);
493         mdelay(5);
494
495         return vce_v2_0_start(adev);
496 }
497
498 static int vce_v2_0_set_interrupt_state(struct amdgpu_device *adev,
499                                         struct amdgpu_irq_src *source,
500                                         unsigned type,
501                                         enum amdgpu_interrupt_state state)
502 {
503         uint32_t val = 0;
504
505         if (state == AMDGPU_IRQ_STATE_ENABLE)
506                 val |= VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK;
507
508         WREG32_P(mmVCE_SYS_INT_EN, val, ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK);
509         return 0;
510 }
511
512 static int vce_v2_0_process_interrupt(struct amdgpu_device *adev,
513                                       struct amdgpu_irq_src *source,
514                                       struct amdgpu_iv_entry *entry)
515 {
516         DRM_DEBUG("IH: VCE\n");
517         switch (entry->src_data) {
518         case 0:
519                 amdgpu_fence_process(&adev->vce.ring[0]);
520                 break;
521         case 1:
522                 amdgpu_fence_process(&adev->vce.ring[1]);
523                 break;
524         default:
525                 DRM_ERROR("Unhandled interrupt: %d %d\n",
526                           entry->src_id, entry->src_data);
527                 break;
528         }
529
530         return 0;
531 }
532
533 static int vce_v2_0_set_clockgating_state(void *handle,
534                                           enum amd_clockgating_state state)
535 {
536         bool gate = false;
537         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
538
539         if (state == AMD_CG_STATE_GATE)
540                 gate = true;
541
542         vce_v2_0_enable_mgcg(adev, gate);
543
544         return 0;
545 }
546
547 static int vce_v2_0_set_powergating_state(void *handle,
548                                           enum amd_powergating_state state)
549 {
550         /* This doesn't actually powergate the VCE block.
551          * That's done in the dpm code via the SMC.  This
552          * just re-inits the block as necessary.  The actual
553          * gating still happens in the dpm code.  We should
554          * revisit this when there is a cleaner line between
555          * the smc and the hw blocks
556          */
557         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
558
559         if (!(adev->pg_flags & AMD_PG_SUPPORT_VCE))
560                 return 0;
561
562         if (state == AMD_PG_STATE_GATE)
563                 /* XXX do we need a vce_v2_0_stop()? */
564                 return 0;
565         else
566                 return vce_v2_0_start(adev);
567 }
568
569 const struct amd_ip_funcs vce_v2_0_ip_funcs = {
570         .name = "vce_v2_0",
571         .early_init = vce_v2_0_early_init,
572         .late_init = NULL,
573         .sw_init = vce_v2_0_sw_init,
574         .sw_fini = vce_v2_0_sw_fini,
575         .hw_init = vce_v2_0_hw_init,
576         .hw_fini = vce_v2_0_hw_fini,
577         .suspend = vce_v2_0_suspend,
578         .resume = vce_v2_0_resume,
579         .is_idle = vce_v2_0_is_idle,
580         .wait_for_idle = vce_v2_0_wait_for_idle,
581         .soft_reset = vce_v2_0_soft_reset,
582         .set_clockgating_state = vce_v2_0_set_clockgating_state,
583         .set_powergating_state = vce_v2_0_set_powergating_state,
584 };
585
586 static const struct amdgpu_ring_funcs vce_v2_0_ring_funcs = {
587         .get_rptr = vce_v2_0_ring_get_rptr,
588         .get_wptr = vce_v2_0_ring_get_wptr,
589         .set_wptr = vce_v2_0_ring_set_wptr,
590         .parse_cs = amdgpu_vce_ring_parse_cs,
591         .emit_ib = amdgpu_vce_ring_emit_ib,
592         .emit_fence = amdgpu_vce_ring_emit_fence,
593         .test_ring = amdgpu_vce_ring_test_ring,
594         .test_ib = amdgpu_vce_ring_test_ib,
595         .insert_nop = amdgpu_ring_insert_nop,
596         .pad_ib = amdgpu_ring_generic_pad_ib,
597         .begin_use = amdgpu_vce_ring_begin_use,
598         .end_use = amdgpu_vce_ring_end_use,
599 };
600
601 static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev)
602 {
603         adev->vce.ring[0].funcs = &vce_v2_0_ring_funcs;
604         adev->vce.ring[1].funcs = &vce_v2_0_ring_funcs;
605 }
606
607 static const struct amdgpu_irq_src_funcs vce_v2_0_irq_funcs = {
608         .set = vce_v2_0_set_interrupt_state,
609         .process = vce_v2_0_process_interrupt,
610 };
611
612 static void vce_v2_0_set_irq_funcs(struct amdgpu_device *adev)
613 {
614         adev->vce.irq.num_types = 1;
615         adev->vce.irq.funcs = &vce_v2_0_irq_funcs;
616 };