2 * Copyright 2014 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
25 * Authors: Christian König <christian.koenig@amd.com>
28 #include <linux/firmware.h>
31 #include "amdgpu_vce.h"
33 #include "vce/vce_3_0_d.h"
34 #include "vce/vce_3_0_sh_mask.h"
35 #include "oss/oss_3_0_d.h"
36 #include "oss/oss_3_0_sh_mask.h"
37 #include "gca/gfx_8_0_d.h"
38 #include "smu/smu_7_1_2_d.h"
39 #include "smu/smu_7_1_2_sh_mask.h"
41 #define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT 0x04
42 #define GRBM_GFX_INDEX__VCE_INSTANCE_MASK 0x10
43 #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR0 0x8616
44 #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR1 0x8617
45 #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR2 0x8618
47 #define VCE_V3_0_FW_SIZE (384 * 1024)
48 #define VCE_V3_0_STACK_SIZE (64 * 1024)
49 #define VCE_V3_0_DATA_SIZE ((16 * 1024 * AMDGPU_MAX_VCE_HANDLES) + (52 * 1024))
51 static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx);
52 static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev);
53 static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev);
56 * vce_v3_0_ring_get_rptr - get read pointer
58 * @ring: amdgpu_ring pointer
60 * Returns the current hardware read pointer
62 static uint32_t vce_v3_0_ring_get_rptr(struct amdgpu_ring *ring)
64 struct amdgpu_device *adev = ring->adev;
66 if (ring == &adev->vce.ring[0])
67 return RREG32(mmVCE_RB_RPTR);
69 return RREG32(mmVCE_RB_RPTR2);
73 * vce_v3_0_ring_get_wptr - get write pointer
75 * @ring: amdgpu_ring pointer
77 * Returns the current hardware write pointer
79 static uint32_t vce_v3_0_ring_get_wptr(struct amdgpu_ring *ring)
81 struct amdgpu_device *adev = ring->adev;
83 if (ring == &adev->vce.ring[0])
84 return RREG32(mmVCE_RB_WPTR);
86 return RREG32(mmVCE_RB_WPTR2);
90 * vce_v3_0_ring_set_wptr - set write pointer
92 * @ring: amdgpu_ring pointer
94 * Commits the write pointer to the hardware
96 static void vce_v3_0_ring_set_wptr(struct amdgpu_ring *ring)
98 struct amdgpu_device *adev = ring->adev;
100 if (ring == &adev->vce.ring[0])
101 WREG32(mmVCE_RB_WPTR, ring->wptr);
103 WREG32(mmVCE_RB_WPTR2, ring->wptr);
106 static void vce_v3_0_override_vce_clock_gating(struct amdgpu_device *adev, bool override)
110 tmp = data = RREG32(mmVCE_RB_ARB_CTRL);
112 data |= VCE_RB_ARB_CTRL__VCE_CGTT_OVERRIDE_MASK;
114 data &= ~VCE_RB_ARB_CTRL__VCE_CGTT_OVERRIDE_MASK;
117 WREG32(mmVCE_RB_ARB_CTRL, data);
120 static void vce_v3_0_set_vce_sw_clock_gating(struct amdgpu_device *adev,
124 /* Set Override to disable Clock Gating */
125 vce_v3_0_override_vce_clock_gating(adev, true);
128 /* Force CLOCK ON for VCE_CLOCK_GATING_B,
129 * {*_FORCE_ON, *_FORCE_OFF} = {1, 0}
130 * VREG can be FORCE ON or set to Dynamic, but can't be OFF
132 tmp = data = RREG32(mmVCE_CLOCK_GATING_B);
136 WREG32(mmVCE_CLOCK_GATING_B, data);
138 /* Force CLOCK ON for VCE_UENC_CLOCK_GATING,
139 * {*_FORCE_ON, *_FORCE_OFF} = {1, 0}
141 tmp = data = RREG32(mmVCE_UENC_CLOCK_GATING);
145 WREG32(mmVCE_UENC_CLOCK_GATING, data);
147 /* set VCE_UENC_CLOCK_GATING_2 */
148 tmp = data = RREG32(mmVCE_UENC_CLOCK_GATING_2);
152 WREG32(mmVCE_UENC_CLOCK_GATING_2, data);
154 /* Force CLOCK ON for VCE_UENC_REG_CLOCK_GATING */
155 tmp = data = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
158 WREG32(mmVCE_UENC_REG_CLOCK_GATING, data);
160 /* Force VCE_UENC_DMA_DCLK_CTRL Clock ON */
161 tmp = data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL);
162 data |= VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK |
163 VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK |
164 VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK |
167 WREG32(mmVCE_UENC_DMA_DCLK_CTRL, data);
169 /* Force CLOCK OFF for VCE_CLOCK_GATING_B,
170 * {*, *_FORCE_OFF} = {*, 1}
171 * set VREG to Dynamic, as it can't be OFF
173 tmp = data = RREG32(mmVCE_CLOCK_GATING_B);
177 WREG32(mmVCE_CLOCK_GATING_B, data);
178 /* Force CLOCK OFF for VCE_UENC_CLOCK_GATING,
179 * Force ClOCK OFF takes precedent over Force CLOCK ON setting.
180 * {*_FORCE_ON, *_FORCE_OFF} = {*, 1}
182 tmp = data = RREG32(mmVCE_UENC_CLOCK_GATING);
185 WREG32(mmVCE_UENC_CLOCK_GATING, data);
186 /* Set VCE_UENC_CLOCK_GATING_2 */
187 tmp = data = RREG32(mmVCE_UENC_CLOCK_GATING_2);
190 WREG32(mmVCE_UENC_CLOCK_GATING_2, data);
191 /* Set VCE_UENC_REG_CLOCK_GATING to dynamic */
192 tmp = data = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
195 WREG32(mmVCE_UENC_REG_CLOCK_GATING, data);
196 /* Set VCE_UENC_DMA_DCLK_CTRL CG always in dynamic mode */
197 tmp = data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL);
198 data &= ~(VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK |
199 VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK |
200 VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK |
203 WREG32(mmVCE_UENC_DMA_DCLK_CTRL, data);
205 vce_v3_0_override_vce_clock_gating(adev, false);
209 * vce_v3_0_start - start VCE block
211 * @adev: amdgpu_device pointer
213 * Setup and start the VCE block
215 static int vce_v3_0_start(struct amdgpu_device *adev)
217 struct amdgpu_ring *ring;
220 mutex_lock(&adev->grbm_idx_mutex);
221 for (idx = 0; idx < 2; ++idx) {
223 if (adev->vce.harvest_config & (1 << idx))
227 WREG32_P(mmGRBM_GFX_INDEX, 0,
228 ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
230 WREG32_P(mmGRBM_GFX_INDEX,
231 GRBM_GFX_INDEX__VCE_INSTANCE_MASK,
232 ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
234 vce_v3_0_mc_resume(adev, idx);
237 WREG32_P(mmVCE_STATUS, 1, ~1);
238 if (adev->asic_type >= CHIP_STONEY)
239 WREG32_P(mmVCE_VCPU_CNTL, 1, ~0x200001);
241 WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK,
242 ~VCE_VCPU_CNTL__CLK_EN_MASK);
244 WREG32_P(mmVCE_SOFT_RESET,
245 VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
246 ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
250 WREG32_P(mmVCE_SOFT_RESET, 0,
251 ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
253 for (i = 0; i < 10; ++i) {
255 for (j = 0; j < 100; ++j) {
256 status = RREG32(mmVCE_STATUS);
265 DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n");
266 WREG32_P(mmVCE_SOFT_RESET,
267 VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
268 ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
270 WREG32_P(mmVCE_SOFT_RESET, 0,
271 ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
276 /* clear BUSY flag */
277 WREG32_P(mmVCE_STATUS, 0, ~1);
279 /* Set Clock-Gating off */
280 if (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)
281 vce_v3_0_set_vce_sw_clock_gating(adev, false);
284 DRM_ERROR("VCE not responding, giving up!!!\n");
285 mutex_unlock(&adev->grbm_idx_mutex);
290 WREG32_P(mmGRBM_GFX_INDEX, 0, ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
291 mutex_unlock(&adev->grbm_idx_mutex);
293 ring = &adev->vce.ring[0];
294 WREG32(mmVCE_RB_RPTR, ring->wptr);
295 WREG32(mmVCE_RB_WPTR, ring->wptr);
296 WREG32(mmVCE_RB_BASE_LO, ring->gpu_addr);
297 WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
298 WREG32(mmVCE_RB_SIZE, ring->ring_size / 4);
300 ring = &adev->vce.ring[1];
301 WREG32(mmVCE_RB_RPTR2, ring->wptr);
302 WREG32(mmVCE_RB_WPTR2, ring->wptr);
303 WREG32(mmVCE_RB_BASE_LO2, ring->gpu_addr);
304 WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
305 WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4);
310 #define ixVCE_HARVEST_FUSE_MACRO__ADDRESS 0xC0014074
311 #define VCE_HARVEST_FUSE_MACRO__SHIFT 27
312 #define VCE_HARVEST_FUSE_MACRO__MASK 0x18000000
314 static unsigned vce_v3_0_get_harvest_config(struct amdgpu_device *adev)
318 /* Fiji, Stoney, Polaris10, Polaris11 are single pipe */
319 if ((adev->asic_type == CHIP_FIJI) ||
320 (adev->asic_type == CHIP_STONEY) ||
321 (adev->asic_type == CHIP_POLARIS10) ||
322 (adev->asic_type == CHIP_POLARIS11))
323 return AMDGPU_VCE_HARVEST_VCE1;
325 /* Tonga and CZ are dual or single pipe */
326 if (adev->flags & AMD_IS_APU)
327 tmp = (RREG32_SMC(ixVCE_HARVEST_FUSE_MACRO__ADDRESS) &
328 VCE_HARVEST_FUSE_MACRO__MASK) >>
329 VCE_HARVEST_FUSE_MACRO__SHIFT;
331 tmp = (RREG32_SMC(ixCC_HARVEST_FUSES) &
332 CC_HARVEST_FUSES__VCE_DISABLE_MASK) >>
333 CC_HARVEST_FUSES__VCE_DISABLE__SHIFT;
337 return AMDGPU_VCE_HARVEST_VCE0;
339 return AMDGPU_VCE_HARVEST_VCE1;
341 return AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1;
347 static int vce_v3_0_early_init(void *handle)
349 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
351 adev->vce.harvest_config = vce_v3_0_get_harvest_config(adev);
353 if ((adev->vce.harvest_config &
354 (AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1)) ==
355 (AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1))
358 vce_v3_0_set_ring_funcs(adev);
359 vce_v3_0_set_irq_funcs(adev);
364 static int vce_v3_0_sw_init(void *handle)
366 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
367 struct amdgpu_ring *ring;
371 r = amdgpu_irq_add_id(adev, 167, &adev->vce.irq);
375 r = amdgpu_vce_sw_init(adev, VCE_V3_0_FW_SIZE +
376 (VCE_V3_0_STACK_SIZE + VCE_V3_0_DATA_SIZE) * 2);
380 r = amdgpu_vce_resume(adev);
384 ring = &adev->vce.ring[0];
385 sprintf(ring->name, "vce0");
386 r = amdgpu_ring_init(adev, ring, 512, VCE_CMD_NO_OP, 0xf,
387 &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE);
391 ring = &adev->vce.ring[1];
392 sprintf(ring->name, "vce1");
393 r = amdgpu_ring_init(adev, ring, 512, VCE_CMD_NO_OP, 0xf,
394 &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE);
401 static int vce_v3_0_sw_fini(void *handle)
404 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
406 r = amdgpu_vce_suspend(adev);
410 r = amdgpu_vce_sw_fini(adev);
417 static int vce_v3_0_hw_init(void *handle)
420 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
422 r = vce_v3_0_start(adev);
426 adev->vce.ring[0].ready = false;
427 adev->vce.ring[1].ready = false;
429 for (i = 0; i < 2; i++) {
430 r = amdgpu_ring_test_ring(&adev->vce.ring[i]);
434 adev->vce.ring[i].ready = true;
437 DRM_INFO("VCE initialized successfully.\n");
442 static int vce_v3_0_hw_fini(void *handle)
447 static int vce_v3_0_suspend(void *handle)
450 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
452 r = vce_v3_0_hw_fini(adev);
456 r = amdgpu_vce_suspend(adev);
463 static int vce_v3_0_resume(void *handle)
466 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
468 r = amdgpu_vce_resume(adev);
472 r = vce_v3_0_hw_init(adev);
479 static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx)
481 uint32_t offset, size;
483 WREG32_P(mmVCE_CLOCK_GATING_A, 0, ~(1 << 16));
484 WREG32_P(mmVCE_UENC_CLOCK_GATING, 0x1FF000, ~0xFF9FF000);
485 WREG32_P(mmVCE_UENC_REG_CLOCK_GATING, 0x3F, ~0x3F);
486 WREG32(mmVCE_CLOCK_GATING_B, 0xf7);
488 WREG32(mmVCE_LMI_CTRL, 0x00398000);
489 WREG32_P(mmVCE_LMI_CACHE_CTRL, 0x0, ~0x1);
490 WREG32(mmVCE_LMI_SWAP_CNTL, 0);
491 WREG32(mmVCE_LMI_SWAP_CNTL1, 0);
492 WREG32(mmVCE_LMI_VM_CTRL, 0);
493 if (adev->asic_type >= CHIP_STONEY) {
494 WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR0, (adev->vce.gpu_addr >> 8));
495 WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR1, (adev->vce.gpu_addr >> 8));
496 WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR2, (adev->vce.gpu_addr >> 8));
498 WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR, (adev->vce.gpu_addr >> 8));
499 offset = AMDGPU_VCE_FIRMWARE_OFFSET;
500 size = VCE_V3_0_FW_SIZE;
501 WREG32(mmVCE_VCPU_CACHE_OFFSET0, offset & 0x7fffffff);
502 WREG32(mmVCE_VCPU_CACHE_SIZE0, size);
506 size = VCE_V3_0_STACK_SIZE;
507 WREG32(mmVCE_VCPU_CACHE_OFFSET1, offset & 0x7fffffff);
508 WREG32(mmVCE_VCPU_CACHE_SIZE1, size);
510 size = VCE_V3_0_DATA_SIZE;
511 WREG32(mmVCE_VCPU_CACHE_OFFSET2, offset & 0x7fffffff);
512 WREG32(mmVCE_VCPU_CACHE_SIZE2, size);
514 offset += size + VCE_V3_0_STACK_SIZE + VCE_V3_0_DATA_SIZE;
515 size = VCE_V3_0_STACK_SIZE;
516 WREG32(mmVCE_VCPU_CACHE_OFFSET1, offset & 0xfffffff);
517 WREG32(mmVCE_VCPU_CACHE_SIZE1, size);
519 size = VCE_V3_0_DATA_SIZE;
520 WREG32(mmVCE_VCPU_CACHE_OFFSET2, offset & 0xfffffff);
521 WREG32(mmVCE_VCPU_CACHE_SIZE2, size);
524 WREG32_P(mmVCE_LMI_CTRL2, 0x0, ~0x100);
526 WREG32_P(mmVCE_SYS_INT_EN, VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK,
527 ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK);
530 static bool vce_v3_0_is_idle(void *handle)
532 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
535 mask |= (adev->vce.harvest_config & AMDGPU_VCE_HARVEST_VCE0) ? 0 : SRBM_STATUS2__VCE0_BUSY_MASK;
536 mask |= (adev->vce.harvest_config & AMDGPU_VCE_HARVEST_VCE1) ? 0 : SRBM_STATUS2__VCE1_BUSY_MASK;
538 return !(RREG32(mmSRBM_STATUS2) & mask);
541 static int vce_v3_0_wait_for_idle(void *handle)
544 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
546 for (i = 0; i < adev->usec_timeout; i++)
547 if (vce_v3_0_is_idle(handle))
553 static int vce_v3_0_soft_reset(void *handle)
555 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
558 mask |= (adev->vce.harvest_config & AMDGPU_VCE_HARVEST_VCE0) ? 0 : SRBM_SOFT_RESET__SOFT_RESET_VCE0_MASK;
559 mask |= (adev->vce.harvest_config & AMDGPU_VCE_HARVEST_VCE1) ? 0 : SRBM_SOFT_RESET__SOFT_RESET_VCE1_MASK;
561 WREG32_P(mmSRBM_SOFT_RESET, mask,
562 ~(SRBM_SOFT_RESET__SOFT_RESET_VCE0_MASK |
563 SRBM_SOFT_RESET__SOFT_RESET_VCE1_MASK));
566 return vce_v3_0_start(adev);
569 static int vce_v3_0_set_interrupt_state(struct amdgpu_device *adev,
570 struct amdgpu_irq_src *source,
572 enum amdgpu_interrupt_state state)
576 if (state == AMDGPU_IRQ_STATE_ENABLE)
577 val |= VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK;
579 WREG32_P(mmVCE_SYS_INT_EN, val, ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK);
583 static int vce_v3_0_process_interrupt(struct amdgpu_device *adev,
584 struct amdgpu_irq_src *source,
585 struct amdgpu_iv_entry *entry)
587 DRM_DEBUG("IH: VCE\n");
589 WREG32_P(mmVCE_SYS_INT_STATUS,
590 VCE_SYS_INT_STATUS__VCE_SYS_INT_TRAP_INTERRUPT_INT_MASK,
591 ~VCE_SYS_INT_STATUS__VCE_SYS_INT_TRAP_INTERRUPT_INT_MASK);
593 switch (entry->src_data) {
596 amdgpu_fence_process(&adev->vce.ring[entry->src_data]);
599 DRM_ERROR("Unhandled interrupt: %d %d\n",
600 entry->src_id, entry->src_data);
607 static int vce_v3_0_set_clockgating_state(void *handle,
608 enum amd_clockgating_state state)
610 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
611 bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
614 if (!(adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG))
617 mutex_lock(&adev->grbm_idx_mutex);
618 for (i = 0; i < 2; i++) {
619 /* Program VCE Instance 0 or 1 if not harvested */
620 if (adev->vce.harvest_config & (1 << i))
624 WREG32_P(mmGRBM_GFX_INDEX, 0,
625 ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
627 WREG32_P(mmGRBM_GFX_INDEX,
628 GRBM_GFX_INDEX__VCE_INSTANCE_MASK,
629 ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
632 /* initialize VCE_CLOCK_GATING_A: Clock ON/OFF delay */
633 uint32_t data = RREG32(mmVCE_CLOCK_GATING_A);
634 data &= ~(0xf | 0xff0);
635 data |= ((0x0 << 0) | (0x04 << 4));
636 WREG32(mmVCE_CLOCK_GATING_A, data);
638 /* initialize VCE_UENC_CLOCK_GATING: Clock ON/OFF delay */
639 data = RREG32(mmVCE_UENC_CLOCK_GATING);
640 data &= ~(0xf | 0xff0);
641 data |= ((0x0 << 0) | (0x04 << 4));
642 WREG32(mmVCE_UENC_CLOCK_GATING, data);
645 vce_v3_0_set_vce_sw_clock_gating(adev, enable);
648 WREG32_P(mmGRBM_GFX_INDEX, 0, ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
649 mutex_unlock(&adev->grbm_idx_mutex);
654 static int vce_v3_0_set_powergating_state(void *handle,
655 enum amd_powergating_state state)
657 /* This doesn't actually powergate the VCE block.
658 * That's done in the dpm code via the SMC. This
659 * just re-inits the block as necessary. The actual
660 * gating still happens in the dpm code. We should
661 * revisit this when there is a cleaner line between
662 * the smc and the hw blocks
664 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
666 if (!(adev->pg_flags & AMD_PG_SUPPORT_VCE))
669 if (state == AMD_PG_STATE_GATE)
670 /* XXX do we need a vce_v3_0_stop()? */
673 return vce_v3_0_start(adev);
676 const struct amd_ip_funcs vce_v3_0_ip_funcs = {
678 .early_init = vce_v3_0_early_init,
680 .sw_init = vce_v3_0_sw_init,
681 .sw_fini = vce_v3_0_sw_fini,
682 .hw_init = vce_v3_0_hw_init,
683 .hw_fini = vce_v3_0_hw_fini,
684 .suspend = vce_v3_0_suspend,
685 .resume = vce_v3_0_resume,
686 .is_idle = vce_v3_0_is_idle,
687 .wait_for_idle = vce_v3_0_wait_for_idle,
688 .soft_reset = vce_v3_0_soft_reset,
689 .set_clockgating_state = vce_v3_0_set_clockgating_state,
690 .set_powergating_state = vce_v3_0_set_powergating_state,
693 static const struct amdgpu_ring_funcs vce_v3_0_ring_funcs = {
694 .get_rptr = vce_v3_0_ring_get_rptr,
695 .get_wptr = vce_v3_0_ring_get_wptr,
696 .set_wptr = vce_v3_0_ring_set_wptr,
697 .parse_cs = amdgpu_vce_ring_parse_cs,
698 .emit_ib = amdgpu_vce_ring_emit_ib,
699 .emit_fence = amdgpu_vce_ring_emit_fence,
700 .test_ring = amdgpu_vce_ring_test_ring,
701 .test_ib = amdgpu_vce_ring_test_ib,
702 .insert_nop = amdgpu_ring_insert_nop,
703 .pad_ib = amdgpu_ring_generic_pad_ib,
706 static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev)
708 adev->vce.ring[0].funcs = &vce_v3_0_ring_funcs;
709 adev->vce.ring[1].funcs = &vce_v3_0_ring_funcs;
712 static const struct amdgpu_irq_src_funcs vce_v3_0_irq_funcs = {
713 .set = vce_v3_0_set_interrupt_state,
714 .process = vce_v3_0_process_interrupt,
717 static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev)
719 adev->vce.irq.num_types = 1;
720 adev->vce.irq.funcs = &vce_v3_0_irq_funcs;