USB: serial: Another Infineon flash loader USB ID
[cascardo/linux.git] / drivers / gpu / drm / amd / amdgpu / vce_v3_0.c
1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  * Authors: Christian König <christian.koenig@amd.com>
26  */
27
28 #include <linux/firmware.h>
29 #include <drm/drmP.h>
30 #include "amdgpu.h"
31 #include "amdgpu_vce.h"
32 #include "vid.h"
33 #include "vce/vce_3_0_d.h"
34 #include "vce/vce_3_0_sh_mask.h"
35 #include "oss/oss_3_0_d.h"
36 #include "oss/oss_3_0_sh_mask.h"
37 #include "gca/gfx_8_0_d.h"
38 #include "smu/smu_7_1_2_d.h"
39 #include "smu/smu_7_1_2_sh_mask.h"
40
41 #define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT     0x04
42 #define GRBM_GFX_INDEX__VCE_INSTANCE_MASK       0x10
43
44 #define VCE_V3_0_FW_SIZE        (384 * 1024)
45 #define VCE_V3_0_STACK_SIZE     (64 * 1024)
46 #define VCE_V3_0_DATA_SIZE      ((16 * 1024 * AMDGPU_MAX_VCE_HANDLES) + (52 * 1024))
47
48 static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx);
49 static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev);
50 static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev);
51
52 /**
53  * vce_v3_0_ring_get_rptr - get read pointer
54  *
55  * @ring: amdgpu_ring pointer
56  *
57  * Returns the current hardware read pointer
58  */
59 static uint32_t vce_v3_0_ring_get_rptr(struct amdgpu_ring *ring)
60 {
61         struct amdgpu_device *adev = ring->adev;
62
63         if (ring == &adev->vce.ring[0])
64                 return RREG32(mmVCE_RB_RPTR);
65         else
66                 return RREG32(mmVCE_RB_RPTR2);
67 }
68
69 /**
70  * vce_v3_0_ring_get_wptr - get write pointer
71  *
72  * @ring: amdgpu_ring pointer
73  *
74  * Returns the current hardware write pointer
75  */
76 static uint32_t vce_v3_0_ring_get_wptr(struct amdgpu_ring *ring)
77 {
78         struct amdgpu_device *adev = ring->adev;
79
80         if (ring == &adev->vce.ring[0])
81                 return RREG32(mmVCE_RB_WPTR);
82         else
83                 return RREG32(mmVCE_RB_WPTR2);
84 }
85
86 /**
87  * vce_v3_0_ring_set_wptr - set write pointer
88  *
89  * @ring: amdgpu_ring pointer
90  *
91  * Commits the write pointer to the hardware
92  */
93 static void vce_v3_0_ring_set_wptr(struct amdgpu_ring *ring)
94 {
95         struct amdgpu_device *adev = ring->adev;
96
97         if (ring == &adev->vce.ring[0])
98                 WREG32(mmVCE_RB_WPTR, ring->wptr);
99         else
100                 WREG32(mmVCE_RB_WPTR2, ring->wptr);
101 }
102
103 /**
104  * vce_v3_0_start - start VCE block
105  *
106  * @adev: amdgpu_device pointer
107  *
108  * Setup and start the VCE block
109  */
110 static int vce_v3_0_start(struct amdgpu_device *adev)
111 {
112         struct amdgpu_ring *ring;
113         int idx, i, j, r;
114
115         mutex_lock(&adev->grbm_idx_mutex);
116         for (idx = 0; idx < 2; ++idx) {
117
118                 if (adev->vce.harvest_config & (1 << idx))
119                         continue;
120
121                 if(idx == 0)
122                         WREG32_P(mmGRBM_GFX_INDEX, 0,
123                                 ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
124                 else
125                         WREG32_P(mmGRBM_GFX_INDEX,
126                                 GRBM_GFX_INDEX__VCE_INSTANCE_MASK,
127                                 ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
128
129                 vce_v3_0_mc_resume(adev, idx);
130
131                 /* set BUSY flag */
132                 WREG32_P(mmVCE_STATUS, 1, ~1);
133
134                 WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK,
135                         ~VCE_VCPU_CNTL__CLK_EN_MASK);
136
137                 WREG32_P(mmVCE_SOFT_RESET,
138                          VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
139                          ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
140
141                 mdelay(100);
142
143                 WREG32_P(mmVCE_SOFT_RESET, 0,
144                         ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
145
146                 for (i = 0; i < 10; ++i) {
147                         uint32_t status;
148                         for (j = 0; j < 100; ++j) {
149                                 status = RREG32(mmVCE_STATUS);
150                                 if (status & 2)
151                                         break;
152                                 mdelay(10);
153                         }
154                         r = 0;
155                         if (status & 2)
156                                 break;
157
158                         DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n");
159                         WREG32_P(mmVCE_SOFT_RESET,
160                                 VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
161                                 ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
162                         mdelay(10);
163                         WREG32_P(mmVCE_SOFT_RESET, 0,
164                                 ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
165                         mdelay(10);
166                         r = -1;
167                 }
168
169                 /* clear BUSY flag */
170                 WREG32_P(mmVCE_STATUS, 0, ~1);
171
172                 if (r) {
173                         DRM_ERROR("VCE not responding, giving up!!!\n");
174                         mutex_unlock(&adev->grbm_idx_mutex);
175                         return r;
176                 }
177         }
178
179         WREG32_P(mmGRBM_GFX_INDEX, 0, ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
180         mutex_unlock(&adev->grbm_idx_mutex);
181
182         ring = &adev->vce.ring[0];
183         WREG32(mmVCE_RB_RPTR, ring->wptr);
184         WREG32(mmVCE_RB_WPTR, ring->wptr);
185         WREG32(mmVCE_RB_BASE_LO, ring->gpu_addr);
186         WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
187         WREG32(mmVCE_RB_SIZE, ring->ring_size / 4);
188
189         ring = &adev->vce.ring[1];
190         WREG32(mmVCE_RB_RPTR2, ring->wptr);
191         WREG32(mmVCE_RB_WPTR2, ring->wptr);
192         WREG32(mmVCE_RB_BASE_LO2, ring->gpu_addr);
193         WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
194         WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4);
195
196         return 0;
197 }
198
199 #define ixVCE_HARVEST_FUSE_MACRO__ADDRESS     0xC0014074
200 #define VCE_HARVEST_FUSE_MACRO__SHIFT       27
201 #define VCE_HARVEST_FUSE_MACRO__MASK        0x18000000
202
203 static unsigned vce_v3_0_get_harvest_config(struct amdgpu_device *adev)
204 {
205         u32 tmp;
206         unsigned ret;
207
208         /* Fiji, Stoney are single pipe */
209         if ((adev->asic_type == CHIP_FIJI) ||
210             (adev->asic_type == CHIP_STONEY)){
211                 ret = AMDGPU_VCE_HARVEST_VCE1;
212                 return ret;
213         }
214
215         /* Tonga and CZ are dual or single pipe */
216         if (adev->flags & AMD_IS_APU)
217                 tmp = (RREG32_SMC(ixVCE_HARVEST_FUSE_MACRO__ADDRESS) &
218                        VCE_HARVEST_FUSE_MACRO__MASK) >>
219                         VCE_HARVEST_FUSE_MACRO__SHIFT;
220         else
221                 tmp = (RREG32_SMC(ixCC_HARVEST_FUSES) &
222                        CC_HARVEST_FUSES__VCE_DISABLE_MASK) >>
223                         CC_HARVEST_FUSES__VCE_DISABLE__SHIFT;
224
225         switch (tmp) {
226         case 1:
227                 ret = AMDGPU_VCE_HARVEST_VCE0;
228                 break;
229         case 2:
230                 ret = AMDGPU_VCE_HARVEST_VCE1;
231                 break;
232         case 3:
233                 ret = AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1;
234                 break;
235         default:
236                 ret = 0;
237         }
238
239         return ret;
240 }
241
242 static int vce_v3_0_early_init(void *handle)
243 {
244         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
245
246         adev->vce.harvest_config = vce_v3_0_get_harvest_config(adev);
247
248         if ((adev->vce.harvest_config &
249              (AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1)) ==
250             (AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1))
251                 return -ENOENT;
252
253         vce_v3_0_set_ring_funcs(adev);
254         vce_v3_0_set_irq_funcs(adev);
255
256         return 0;
257 }
258
259 static int vce_v3_0_sw_init(void *handle)
260 {
261         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
262         struct amdgpu_ring *ring;
263         int r;
264
265         /* VCE */
266         r = amdgpu_irq_add_id(adev, 167, &adev->vce.irq);
267         if (r)
268                 return r;
269
270         r = amdgpu_vce_sw_init(adev, VCE_V3_0_FW_SIZE +
271                 (VCE_V3_0_STACK_SIZE + VCE_V3_0_DATA_SIZE) * 2);
272         if (r)
273                 return r;
274
275         r = amdgpu_vce_resume(adev);
276         if (r)
277                 return r;
278
279         ring = &adev->vce.ring[0];
280         sprintf(ring->name, "vce0");
281         r = amdgpu_ring_init(adev, ring, 4096, VCE_CMD_NO_OP, 0xf,
282                              &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE);
283         if (r)
284                 return r;
285
286         ring = &adev->vce.ring[1];
287         sprintf(ring->name, "vce1");
288         r = amdgpu_ring_init(adev, ring, 4096, VCE_CMD_NO_OP, 0xf,
289                              &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE);
290         if (r)
291                 return r;
292
293         return r;
294 }
295
296 static int vce_v3_0_sw_fini(void *handle)
297 {
298         int r;
299         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
300
301         r = amdgpu_vce_suspend(adev);
302         if (r)
303                 return r;
304
305         r = amdgpu_vce_sw_fini(adev);
306         if (r)
307                 return r;
308
309         return r;
310 }
311
312 static int vce_v3_0_hw_init(void *handle)
313 {
314         struct amdgpu_ring *ring;
315         int r;
316         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
317
318         r = vce_v3_0_start(adev);
319         if (r)
320                 return r;
321
322         ring = &adev->vce.ring[0];
323         ring->ready = true;
324         r = amdgpu_ring_test_ring(ring);
325         if (r) {
326                 ring->ready = false;
327                 return r;
328         }
329
330         ring = &adev->vce.ring[1];
331         ring->ready = true;
332         r = amdgpu_ring_test_ring(ring);
333         if (r) {
334                 ring->ready = false;
335                 return r;
336         }
337
338         DRM_INFO("VCE initialized successfully.\n");
339
340         return 0;
341 }
342
343 static int vce_v3_0_hw_fini(void *handle)
344 {
345         return 0;
346 }
347
348 static int vce_v3_0_suspend(void *handle)
349 {
350         int r;
351         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
352
353         r = vce_v3_0_hw_fini(adev);
354         if (r)
355                 return r;
356
357         r = amdgpu_vce_suspend(adev);
358         if (r)
359                 return r;
360
361         return r;
362 }
363
364 static int vce_v3_0_resume(void *handle)
365 {
366         int r;
367         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
368
369         r = amdgpu_vce_resume(adev);
370         if (r)
371                 return r;
372
373         r = vce_v3_0_hw_init(adev);
374         if (r)
375                 return r;
376
377         return r;
378 }
379
380 static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx)
381 {
382         uint32_t offset, size;
383
384         WREG32_P(mmVCE_CLOCK_GATING_A, 0, ~(1 << 16));
385         WREG32_P(mmVCE_UENC_CLOCK_GATING, 0x1FF000, ~0xFF9FF000);
386         WREG32_P(mmVCE_UENC_REG_CLOCK_GATING, 0x3F, ~0x3F);
387         WREG32(mmVCE_CLOCK_GATING_B, 0xf7);
388
389         WREG32(mmVCE_LMI_CTRL, 0x00398000);
390         WREG32_P(mmVCE_LMI_CACHE_CTRL, 0x0, ~0x1);
391         WREG32(mmVCE_LMI_SWAP_CNTL, 0);
392         WREG32(mmVCE_LMI_SWAP_CNTL1, 0);
393         WREG32(mmVCE_LMI_VM_CTRL, 0);
394
395         WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR, (adev->vce.gpu_addr >> 8));
396         offset = AMDGPU_VCE_FIRMWARE_OFFSET;
397         size = VCE_V3_0_FW_SIZE;
398         WREG32(mmVCE_VCPU_CACHE_OFFSET0, offset & 0x7fffffff);
399         WREG32(mmVCE_VCPU_CACHE_SIZE0, size);
400
401         if (idx == 0) {
402                 offset += size;
403                 size = VCE_V3_0_STACK_SIZE;
404                 WREG32(mmVCE_VCPU_CACHE_OFFSET1, offset & 0x7fffffff);
405                 WREG32(mmVCE_VCPU_CACHE_SIZE1, size);
406                 offset += size;
407                 size = VCE_V3_0_DATA_SIZE;
408                 WREG32(mmVCE_VCPU_CACHE_OFFSET2, offset & 0x7fffffff);
409                 WREG32(mmVCE_VCPU_CACHE_SIZE2, size);
410         } else {
411                 offset += size + VCE_V3_0_STACK_SIZE + VCE_V3_0_DATA_SIZE;
412                 size = VCE_V3_0_STACK_SIZE;
413                 WREG32(mmVCE_VCPU_CACHE_OFFSET1, offset & 0xfffffff);
414                 WREG32(mmVCE_VCPU_CACHE_SIZE1, size);
415                 offset += size;
416                 size = VCE_V3_0_DATA_SIZE;
417                 WREG32(mmVCE_VCPU_CACHE_OFFSET2, offset & 0xfffffff);
418                 WREG32(mmVCE_VCPU_CACHE_SIZE2, size);
419         }
420
421         WREG32_P(mmVCE_LMI_CTRL2, 0x0, ~0x100);
422
423         WREG32_P(mmVCE_SYS_INT_EN, VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK,
424                  ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK);
425 }
426
427 static bool vce_v3_0_is_idle(void *handle)
428 {
429         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
430         u32 mask = 0;
431         int idx;
432
433         for (idx = 0; idx < 2; ++idx) {
434                 if (adev->vce.harvest_config & (1 << idx))
435                         continue;
436
437                 if (idx == 0)
438                         mask |= SRBM_STATUS2__VCE0_BUSY_MASK;
439                 else
440                         mask |= SRBM_STATUS2__VCE1_BUSY_MASK;
441         }
442
443         return !(RREG32(mmSRBM_STATUS2) & mask);
444 }
445
446 static int vce_v3_0_wait_for_idle(void *handle)
447 {
448         unsigned i;
449         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
450         u32 mask = 0;
451         int idx;
452
453         for (idx = 0; idx < 2; ++idx) {
454                 if (adev->vce.harvest_config & (1 << idx))
455                         continue;
456
457                 if (idx == 0)
458                         mask |= SRBM_STATUS2__VCE0_BUSY_MASK;
459                 else
460                         mask |= SRBM_STATUS2__VCE1_BUSY_MASK;
461         }
462
463         for (i = 0; i < adev->usec_timeout; i++) {
464                 if (!(RREG32(mmSRBM_STATUS2) & mask))
465                         return 0;
466         }
467         return -ETIMEDOUT;
468 }
469
470 static int vce_v3_0_soft_reset(void *handle)
471 {
472         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
473         u32 mask = 0;
474         int idx;
475
476         for (idx = 0; idx < 2; ++idx) {
477                 if (adev->vce.harvest_config & (1 << idx))
478                         continue;
479
480                 if (idx == 0)
481                         mask |= SRBM_SOFT_RESET__SOFT_RESET_VCE0_MASK;
482                 else
483                         mask |= SRBM_SOFT_RESET__SOFT_RESET_VCE1_MASK;
484         }
485         WREG32_P(mmSRBM_SOFT_RESET, mask,
486                  ~(SRBM_SOFT_RESET__SOFT_RESET_VCE0_MASK |
487                    SRBM_SOFT_RESET__SOFT_RESET_VCE1_MASK));
488         mdelay(5);
489
490         return vce_v3_0_start(adev);
491 }
492
493 static void vce_v3_0_print_status(void *handle)
494 {
495         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
496
497         dev_info(adev->dev, "VCE 3.0 registers\n");
498         dev_info(adev->dev, "  VCE_STATUS=0x%08X\n",
499                  RREG32(mmVCE_STATUS));
500         dev_info(adev->dev, "  VCE_VCPU_CNTL=0x%08X\n",
501                  RREG32(mmVCE_VCPU_CNTL));
502         dev_info(adev->dev, "  VCE_VCPU_CACHE_OFFSET0=0x%08X\n",
503                  RREG32(mmVCE_VCPU_CACHE_OFFSET0));
504         dev_info(adev->dev, "  VCE_VCPU_CACHE_SIZE0=0x%08X\n",
505                  RREG32(mmVCE_VCPU_CACHE_SIZE0));
506         dev_info(adev->dev, "  VCE_VCPU_CACHE_OFFSET1=0x%08X\n",
507                  RREG32(mmVCE_VCPU_CACHE_OFFSET1));
508         dev_info(adev->dev, "  VCE_VCPU_CACHE_SIZE1=0x%08X\n",
509                  RREG32(mmVCE_VCPU_CACHE_SIZE1));
510         dev_info(adev->dev, "  VCE_VCPU_CACHE_OFFSET2=0x%08X\n",
511                  RREG32(mmVCE_VCPU_CACHE_OFFSET2));
512         dev_info(adev->dev, "  VCE_VCPU_CACHE_SIZE2=0x%08X\n",
513                  RREG32(mmVCE_VCPU_CACHE_SIZE2));
514         dev_info(adev->dev, "  VCE_SOFT_RESET=0x%08X\n",
515                  RREG32(mmVCE_SOFT_RESET));
516         dev_info(adev->dev, "  VCE_RB_BASE_LO2=0x%08X\n",
517                  RREG32(mmVCE_RB_BASE_LO2));
518         dev_info(adev->dev, "  VCE_RB_BASE_HI2=0x%08X\n",
519                  RREG32(mmVCE_RB_BASE_HI2));
520         dev_info(adev->dev, "  VCE_RB_SIZE2=0x%08X\n",
521                  RREG32(mmVCE_RB_SIZE2));
522         dev_info(adev->dev, "  VCE_RB_RPTR2=0x%08X\n",
523                  RREG32(mmVCE_RB_RPTR2));
524         dev_info(adev->dev, "  VCE_RB_WPTR2=0x%08X\n",
525                  RREG32(mmVCE_RB_WPTR2));
526         dev_info(adev->dev, "  VCE_RB_BASE_LO=0x%08X\n",
527                  RREG32(mmVCE_RB_BASE_LO));
528         dev_info(adev->dev, "  VCE_RB_BASE_HI=0x%08X\n",
529                  RREG32(mmVCE_RB_BASE_HI));
530         dev_info(adev->dev, "  VCE_RB_SIZE=0x%08X\n",
531                  RREG32(mmVCE_RB_SIZE));
532         dev_info(adev->dev, "  VCE_RB_RPTR=0x%08X\n",
533                  RREG32(mmVCE_RB_RPTR));
534         dev_info(adev->dev, "  VCE_RB_WPTR=0x%08X\n",
535                  RREG32(mmVCE_RB_WPTR));
536         dev_info(adev->dev, "  VCE_CLOCK_GATING_A=0x%08X\n",
537                  RREG32(mmVCE_CLOCK_GATING_A));
538         dev_info(adev->dev, "  VCE_CLOCK_GATING_B=0x%08X\n",
539                  RREG32(mmVCE_CLOCK_GATING_B));
540         dev_info(adev->dev, "  VCE_UENC_CLOCK_GATING=0x%08X\n",
541                  RREG32(mmVCE_UENC_CLOCK_GATING));
542         dev_info(adev->dev, "  VCE_UENC_REG_CLOCK_GATING=0x%08X\n",
543                  RREG32(mmVCE_UENC_REG_CLOCK_GATING));
544         dev_info(adev->dev, "  VCE_SYS_INT_EN=0x%08X\n",
545                  RREG32(mmVCE_SYS_INT_EN));
546         dev_info(adev->dev, "  VCE_LMI_CTRL2=0x%08X\n",
547                  RREG32(mmVCE_LMI_CTRL2));
548         dev_info(adev->dev, "  VCE_LMI_CTRL=0x%08X\n",
549                  RREG32(mmVCE_LMI_CTRL));
550         dev_info(adev->dev, "  VCE_LMI_VM_CTRL=0x%08X\n",
551                  RREG32(mmVCE_LMI_VM_CTRL));
552         dev_info(adev->dev, "  VCE_LMI_SWAP_CNTL=0x%08X\n",
553                  RREG32(mmVCE_LMI_SWAP_CNTL));
554         dev_info(adev->dev, "  VCE_LMI_SWAP_CNTL1=0x%08X\n",
555                  RREG32(mmVCE_LMI_SWAP_CNTL1));
556         dev_info(adev->dev, "  VCE_LMI_CACHE_CTRL=0x%08X\n",
557                  RREG32(mmVCE_LMI_CACHE_CTRL));
558 }
559
560 static int vce_v3_0_set_interrupt_state(struct amdgpu_device *adev,
561                                         struct amdgpu_irq_src *source,
562                                         unsigned type,
563                                         enum amdgpu_interrupt_state state)
564 {
565         uint32_t val = 0;
566
567         if (state == AMDGPU_IRQ_STATE_ENABLE)
568                 val |= VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK;
569
570         WREG32_P(mmVCE_SYS_INT_EN, val, ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK);
571         return 0;
572 }
573
574 static int vce_v3_0_process_interrupt(struct amdgpu_device *adev,
575                                       struct amdgpu_irq_src *source,
576                                       struct amdgpu_iv_entry *entry)
577 {
578         DRM_DEBUG("IH: VCE\n");
579         switch (entry->src_data) {
580         case 0:
581                 amdgpu_fence_process(&adev->vce.ring[0]);
582                 break;
583         case 1:
584                 amdgpu_fence_process(&adev->vce.ring[1]);
585                 break;
586         default:
587                 DRM_ERROR("Unhandled interrupt: %d %d\n",
588                           entry->src_id, entry->src_data);
589                 break;
590         }
591
592         return 0;
593 }
594
595 static int vce_v3_0_set_clockgating_state(void *handle,
596                                           enum amd_clockgating_state state)
597 {
598         return 0;
599 }
600
601 static int vce_v3_0_set_powergating_state(void *handle,
602                                           enum amd_powergating_state state)
603 {
604         /* This doesn't actually powergate the VCE block.
605          * That's done in the dpm code via the SMC.  This
606          * just re-inits the block as necessary.  The actual
607          * gating still happens in the dpm code.  We should
608          * revisit this when there is a cleaner line between
609          * the smc and the hw blocks
610          */
611         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
612
613         if (state == AMD_PG_STATE_GATE)
614                 /* XXX do we need a vce_v3_0_stop()? */
615                 return 0;
616         else
617                 return vce_v3_0_start(adev);
618 }
619
620 const struct amd_ip_funcs vce_v3_0_ip_funcs = {
621         .early_init = vce_v3_0_early_init,
622         .late_init = NULL,
623         .sw_init = vce_v3_0_sw_init,
624         .sw_fini = vce_v3_0_sw_fini,
625         .hw_init = vce_v3_0_hw_init,
626         .hw_fini = vce_v3_0_hw_fini,
627         .suspend = vce_v3_0_suspend,
628         .resume = vce_v3_0_resume,
629         .is_idle = vce_v3_0_is_idle,
630         .wait_for_idle = vce_v3_0_wait_for_idle,
631         .soft_reset = vce_v3_0_soft_reset,
632         .print_status = vce_v3_0_print_status,
633         .set_clockgating_state = vce_v3_0_set_clockgating_state,
634         .set_powergating_state = vce_v3_0_set_powergating_state,
635 };
636
637 static const struct amdgpu_ring_funcs vce_v3_0_ring_funcs = {
638         .get_rptr = vce_v3_0_ring_get_rptr,
639         .get_wptr = vce_v3_0_ring_get_wptr,
640         .set_wptr = vce_v3_0_ring_set_wptr,
641         .parse_cs = amdgpu_vce_ring_parse_cs,
642         .emit_ib = amdgpu_vce_ring_emit_ib,
643         .emit_fence = amdgpu_vce_ring_emit_fence,
644         .emit_semaphore = amdgpu_vce_ring_emit_semaphore,
645         .test_ring = amdgpu_vce_ring_test_ring,
646         .test_ib = amdgpu_vce_ring_test_ib,
647         .insert_nop = amdgpu_ring_insert_nop,
648 };
649
650 static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev)
651 {
652         adev->vce.ring[0].funcs = &vce_v3_0_ring_funcs;
653         adev->vce.ring[1].funcs = &vce_v3_0_ring_funcs;
654 }
655
656 static const struct amdgpu_irq_src_funcs vce_v3_0_irq_funcs = {
657         .set = vce_v3_0_set_interrupt_state,
658         .process = vce_v3_0_process_interrupt,
659 };
660
661 static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev)
662 {
663         adev->vce.irq.num_types = 1;
664         adev->vce.irq.funcs = &vce_v3_0_irq_funcs;
665 };