2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
25 #include "amdgpu_pm.h"
26 #include "amdgpu_i2c.h"
28 #include "amdgpu_atombios.h"
29 #include "atombios_crtc.h"
30 #include "atombios_encoders.h"
31 #include "amdgpu_pll.h"
32 #include "amdgpu_connectors.h"
33 #include "si/si_reg.h"
36 static void dce_v6_0_set_display_funcs(struct amdgpu_device *adev);
37 static void dce_v6_0_set_irq_funcs(struct amdgpu_device *adev);
39 static const u32 crtc_offsets[6] =
41 SI_CRTC0_REGISTER_OFFSET,
42 SI_CRTC1_REGISTER_OFFSET,
43 SI_CRTC2_REGISTER_OFFSET,
44 SI_CRTC3_REGISTER_OFFSET,
45 SI_CRTC4_REGISTER_OFFSET,
46 SI_CRTC5_REGISTER_OFFSET
49 static const uint32_t dig_offsets[] = {
50 SI_CRTC0_REGISTER_OFFSET,
51 SI_CRTC1_REGISTER_OFFSET,
52 SI_CRTC2_REGISTER_OFFSET,
53 SI_CRTC3_REGISTER_OFFSET,
54 SI_CRTC4_REGISTER_OFFSET,
55 SI_CRTC5_REGISTER_OFFSET,
56 (0x13830 - 0x7030) >> 2,
65 } interrupt_status_offsets[6] = { {
66 .reg = DISP_INTERRUPT_STATUS,
67 .vblank = DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT_MASK,
68 .vline = DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT_MASK,
69 .hpd = DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK
71 .reg = DISP_INTERRUPT_STATUS_CONTINUE,
72 .vblank = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT_MASK,
73 .vline = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT_MASK,
74 .hpd = DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK
76 .reg = DISP_INTERRUPT_STATUS_CONTINUE2,
77 .vblank = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT_MASK,
78 .vline = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT_MASK,
79 .hpd = DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK
81 .reg = DISP_INTERRUPT_STATUS_CONTINUE3,
82 .vblank = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT_MASK,
83 .vline = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT_MASK,
84 .hpd = DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK
86 .reg = DISP_INTERRUPT_STATUS_CONTINUE4,
87 .vblank = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT_MASK,
88 .vline = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT_MASK,
89 .hpd = DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK
91 .reg = DISP_INTERRUPT_STATUS_CONTINUE5,
92 .vblank = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT_MASK,
93 .vline = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT_MASK,
94 .hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK
97 static const uint32_t hpd_int_control_offsets[6] = {
106 static u32 dce_v6_0_audio_endpt_rreg(struct amdgpu_device *adev,
107 u32 block_offset, u32 reg)
109 DRM_INFO("xxxx: dce_v6_0_audio_endpt_rreg ----no impl!!!!\n");
113 static void dce_v6_0_audio_endpt_wreg(struct amdgpu_device *adev,
114 u32 block_offset, u32 reg, u32 v)
116 DRM_INFO("xxxx: dce_v6_0_audio_endpt_wreg ----no impl!!!!\n");
119 static bool dce_v6_0_is_in_vblank(struct amdgpu_device *adev, int crtc)
121 if (RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK)
127 static bool dce_v6_0_is_counter_moving(struct amdgpu_device *adev, int crtc)
131 pos1 = RREG32(EVERGREEN_CRTC_STATUS_POSITION + crtc_offsets[crtc]);
132 pos2 = RREG32(EVERGREEN_CRTC_STATUS_POSITION + crtc_offsets[crtc]);
141 * dce_v6_0_wait_for_vblank - vblank wait asic callback.
143 * @crtc: crtc to wait for vblank on
145 * Wait for vblank on the requested crtc (evergreen+).
147 static void dce_v6_0_vblank_wait(struct amdgpu_device *adev, int crtc)
151 if (crtc >= adev->mode_info.num_crtc)
154 if (!(RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[crtc]) & EVERGREEN_CRTC_MASTER_EN))
157 /* depending on when we hit vblank, we may be close to active; if so,
158 * wait for another frame.
160 while (dce_v6_0_is_in_vblank(adev, crtc)) {
163 if (!dce_v6_0_is_counter_moving(adev, crtc))
168 while (!dce_v6_0_is_in_vblank(adev, crtc)) {
171 if (!dce_v6_0_is_counter_moving(adev, crtc))
177 static u32 dce_v6_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
179 if (crtc >= adev->mode_info.num_crtc)
182 return RREG32(CRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
185 static void dce_v6_0_pageflip_interrupt_init(struct amdgpu_device *adev)
189 /* Enable pflip interrupts */
190 for (i = 0; i < adev->mode_info.num_crtc; i++)
191 amdgpu_irq_get(adev, &adev->pageflip_irq, i);
194 static void dce_v6_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
198 /* Disable pflip interrupts */
199 for (i = 0; i < adev->mode_info.num_crtc; i++)
200 amdgpu_irq_put(adev, &adev->pageflip_irq, i);
204 * dce_v6_0_page_flip - pageflip callback.
206 * @adev: amdgpu_device pointer
207 * @crtc_id: crtc to cleanup pageflip on
208 * @crtc_base: new address of the crtc (GPU MC address)
210 * Does the actual pageflip (evergreen+).
211 * During vblank we take the crtc lock and wait for the update_pending
212 * bit to go high, when it does, we release the lock, and allow the
213 * double buffered update to take place.
214 * Returns the current update pending status.
216 static void dce_v6_0_page_flip(struct amdgpu_device *adev,
217 int crtc_id, u64 crtc_base, bool async)
219 struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
221 /* flip at hsync for async, default is vsync */
222 WREG32(EVERGREEN_GRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, async ?
223 EVERGREEN_GRPH_SURFACE_UPDATE_H_RETRACE_EN : 0);
224 /* update the scanout addresses */
225 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
226 upper_32_bits(crtc_base));
227 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
231 RREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset);
234 static int dce_v6_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
235 u32 *vbl, u32 *position)
237 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
239 *vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + crtc_offsets[crtc]);
240 *position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + crtc_offsets[crtc]);
247 * dce_v6_0_hpd_sense - hpd sense callback.
249 * @adev: amdgpu_device pointer
250 * @hpd: hpd (hotplug detect) pin
252 * Checks if a digital monitor is connected (evergreen+).
253 * Returns true if connected, false if not connected.
255 static bool dce_v6_0_hpd_sense(struct amdgpu_device *adev,
256 enum amdgpu_hpd_id hpd)
258 bool connected = false;
262 if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE)
266 if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE)
270 if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE)
274 if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE)
278 if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE)
282 if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
293 * dce_v6_0_hpd_set_polarity - hpd set polarity callback.
295 * @adev: amdgpu_device pointer
296 * @hpd: hpd (hotplug detect) pin
298 * Set the polarity of the hpd pin (evergreen+).
300 static void dce_v6_0_hpd_set_polarity(struct amdgpu_device *adev,
301 enum amdgpu_hpd_id hpd)
304 bool connected = dce_v6_0_hpd_sense(adev, hpd);
308 tmp = RREG32(DC_HPD1_INT_CONTROL);
310 tmp &= ~DC_HPDx_INT_POLARITY;
312 tmp |= DC_HPDx_INT_POLARITY;
313 WREG32(DC_HPD1_INT_CONTROL, tmp);
316 tmp = RREG32(DC_HPD2_INT_CONTROL);
318 tmp &= ~DC_HPDx_INT_POLARITY;
320 tmp |= DC_HPDx_INT_POLARITY;
321 WREG32(DC_HPD2_INT_CONTROL, tmp);
324 tmp = RREG32(DC_HPD3_INT_CONTROL);
326 tmp &= ~DC_HPDx_INT_POLARITY;
328 tmp |= DC_HPDx_INT_POLARITY;
329 WREG32(DC_HPD3_INT_CONTROL, tmp);
332 tmp = RREG32(DC_HPD4_INT_CONTROL);
334 tmp &= ~DC_HPDx_INT_POLARITY;
336 tmp |= DC_HPDx_INT_POLARITY;
337 WREG32(DC_HPD4_INT_CONTROL, tmp);
340 tmp = RREG32(DC_HPD5_INT_CONTROL);
342 tmp &= ~DC_HPDx_INT_POLARITY;
344 tmp |= DC_HPDx_INT_POLARITY;
345 WREG32(DC_HPD5_INT_CONTROL, tmp);
348 tmp = RREG32(DC_HPD6_INT_CONTROL);
350 tmp &= ~DC_HPDx_INT_POLARITY;
352 tmp |= DC_HPDx_INT_POLARITY;
353 WREG32(DC_HPD6_INT_CONTROL, tmp);
361 * dce_v6_0_hpd_init - hpd setup callback.
363 * @adev: amdgpu_device pointer
365 * Setup the hpd pins used by the card (evergreen+).
366 * Enable the pin, set the polarity, and enable the hpd interrupts.
368 static void dce_v6_0_hpd_init(struct amdgpu_device *adev)
370 struct drm_device *dev = adev->ddev;
371 struct drm_connector *connector;
372 u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) |
373 DC_HPDx_RX_INT_TIMER(0xfa) | DC_HPDx_EN;
375 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
376 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
378 switch (amdgpu_connector->hpd.hpd) {
380 WREG32(DC_HPD1_CONTROL, tmp);
383 WREG32(DC_HPD2_CONTROL, tmp);
386 WREG32(DC_HPD3_CONTROL, tmp);
389 WREG32(DC_HPD4_CONTROL, tmp);
392 WREG32(DC_HPD5_CONTROL, tmp);
395 WREG32(DC_HPD6_CONTROL, tmp);
401 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
402 connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
403 /* don't try to enable hpd on eDP or LVDS avoid breaking the
404 * aux dp channel on imac and help (but not completely fix)
405 * https://bugzilla.redhat.com/show_bug.cgi?id=726143
406 * also avoid interrupt storms during dpms.
408 u32 dc_hpd_int_cntl_reg, dc_hpd_int_cntl;
410 switch (amdgpu_connector->hpd.hpd) {
412 dc_hpd_int_cntl_reg = DC_HPD1_INT_CONTROL;
415 dc_hpd_int_cntl_reg = DC_HPD2_INT_CONTROL;
418 dc_hpd_int_cntl_reg = DC_HPD3_INT_CONTROL;
421 dc_hpd_int_cntl_reg = DC_HPD4_INT_CONTROL;
424 dc_hpd_int_cntl_reg = DC_HPD5_INT_CONTROL;
427 dc_hpd_int_cntl_reg = DC_HPD6_INT_CONTROL;
433 dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg);
434 dc_hpd_int_cntl &= ~DC_HPDx_INT_EN;
435 WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl);
439 dce_v6_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
440 amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
446 * dce_v6_0_hpd_fini - hpd tear down callback.
448 * @adev: amdgpu_device pointer
450 * Tear down the hpd pins used by the card (evergreen+).
451 * Disable the hpd interrupts.
453 static void dce_v6_0_hpd_fini(struct amdgpu_device *adev)
455 struct drm_device *dev = adev->ddev;
456 struct drm_connector *connector;
458 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
459 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
461 switch (amdgpu_connector->hpd.hpd) {
463 WREG32(DC_HPD1_CONTROL, 0);
466 WREG32(DC_HPD2_CONTROL, 0);
469 WREG32(DC_HPD3_CONTROL, 0);
472 WREG32(DC_HPD4_CONTROL, 0);
475 WREG32(DC_HPD5_CONTROL, 0);
478 WREG32(DC_HPD6_CONTROL, 0);
483 amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
487 static u32 dce_v6_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
489 return SI_DC_GPIO_HPD_A;
492 static bool dce_v6_0_is_display_hung(struct amdgpu_device *adev)
494 DRM_INFO("xxxx: dce_v6_0_is_display_hung ----no imp!!!!!\n");
499 static u32 evergreen_get_vblank_counter(struct amdgpu_device* adev, int crtc)
501 if (crtc >= adev->mode_info.num_crtc)
504 return RREG32(CRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
507 static void dce_v6_0_stop_mc_access(struct amdgpu_device *adev,
508 struct amdgpu_mode_mc_save *save)
510 u32 crtc_enabled, tmp, frame_count;
513 save->vga_render_control = RREG32(VGA_RENDER_CONTROL);
514 save->vga_hdp_control = RREG32(VGA_HDP_CONTROL);
516 /* disable VGA render */
517 WREG32(VGA_RENDER_CONTROL, 0);
519 /* blank the display controllers */
520 for (i = 0; i < adev->mode_info.num_crtc; i++) {
521 crtc_enabled = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]) & EVERGREEN_CRTC_MASTER_EN;
523 save->crtc_enabled[i] = true;
524 tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]);
526 if (!(tmp & EVERGREEN_CRTC_BLANK_DATA_EN)) {
527 dce_v6_0_vblank_wait(adev, i);
528 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
529 tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
530 WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
531 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
533 /* wait for the next frame */
534 frame_count = evergreen_get_vblank_counter(adev, i);
535 for (j = 0; j < adev->usec_timeout; j++) {
536 if (evergreen_get_vblank_counter(adev, i) != frame_count)
541 /* XXX this is a hack to avoid strange behavior with EFI on certain systems */
542 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
543 tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
544 tmp &= ~EVERGREEN_CRTC_MASTER_EN;
545 WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
546 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
547 save->crtc_enabled[i] = false;
550 save->crtc_enabled[i] = false;
555 static void dce_v6_0_resume_mc_access(struct amdgpu_device *adev,
556 struct amdgpu_mode_mc_save *save)
561 /* update crtc base addresses */
562 for (i = 0; i < adev->mode_info.num_crtc; i++) {
563 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
564 upper_32_bits(adev->mc.vram_start));
565 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
566 upper_32_bits(adev->mc.vram_start));
567 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
568 (u32)adev->mc.vram_start);
569 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i],
570 (u32)adev->mc.vram_start);
573 WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(adev->mc.vram_start));
574 WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)adev->mc.vram_start);
576 /* unlock regs and wait for update */
577 for (i = 0; i < adev->mode_info.num_crtc; i++) {
578 if (save->crtc_enabled[i]) {
579 tmp = RREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i]);
580 if ((tmp & 0x7) != 3) {
583 WREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i], tmp);
585 tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
586 if (tmp & EVERGREEN_GRPH_UPDATE_LOCK) {
587 tmp &= ~EVERGREEN_GRPH_UPDATE_LOCK;
588 WREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i], tmp);
590 tmp = RREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i]);
593 WREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
595 for (j = 0; j < adev->usec_timeout; j++) {
596 tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
597 if ((tmp & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING) == 0)
604 /* Unlock vga access */
605 WREG32(VGA_HDP_CONTROL, save->vga_hdp_control);
607 WREG32(VGA_RENDER_CONTROL, save->vga_render_control);
611 static void dce_v6_0_set_vga_render_state(struct amdgpu_device *adev,
615 WREG32(R_000300_VGA_RENDER_CONTROL,
616 RREG32(R_000300_VGA_RENDER_CONTROL) & C_000300_VGA_VSTATUS_CNTL);
620 static void dce_v6_0_program_fmt(struct drm_encoder *encoder)
623 struct drm_device *dev = encoder->dev;
624 struct amdgpu_device *adev = dev->dev_private;
625 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
626 struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
627 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
630 enum amdgpu_connector_dither dither = AMDGPU_FMT_DITHER_DISABLE;
633 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
634 bpc = amdgpu_connector_get_monitor_bpc(connector);
635 dither = amdgpu_connector->dither;
638 /* LVDS FMT is set up by atom */
639 if (amdgpu_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
648 if (dither == AMDGPU_FMT_DITHER_ENABLE)
649 /* XXX sort out optimal dither settings */
650 tmp |= (FMT_FRAME_RANDOM_ENABLE | FMT_HIGHPASS_RANDOM_ENABLE |
651 FMT_SPATIAL_DITHER_EN);
653 tmp |= FMT_TRUNCATE_EN;
656 if (dither == AMDGPU_FMT_DITHER_ENABLE)
657 /* XXX sort out optimal dither settings */
658 tmp |= (FMT_FRAME_RANDOM_ENABLE | FMT_HIGHPASS_RANDOM_ENABLE |
659 FMT_RGB_RANDOM_ENABLE |
660 FMT_SPATIAL_DITHER_EN | FMT_SPATIAL_DITHER_DEPTH);
662 tmp |= (FMT_TRUNCATE_EN | FMT_TRUNCATE_DEPTH);
670 WREG32(FMT_BIT_DEPTH_CONTROL + amdgpu_crtc->crtc_offset, tmp);
674 * cik_get_number_of_dram_channels - get the number of dram channels
676 * @adev: amdgpu_device pointer
678 * Look up the number of video ram channels (CIK).
679 * Used for display watermark bandwidth calculations
680 * Returns the number of dram channels
682 static u32 si_get_number_of_dram_channels(struct amdgpu_device *adev)
684 u32 tmp = RREG32(MC_SHARED_CHMAP);
686 switch ((tmp & MC_SHARED_CHMAP__NOOFCHAN_MASK) >> MC_SHARED_CHMAP__NOOFCHAN__SHIFT) {
709 struct dce6_wm_params {
710 u32 dram_channels; /* number of dram channels */
711 u32 yclk; /* bandwidth per dram data pin in kHz */
712 u32 sclk; /* engine clock in kHz */
713 u32 disp_clk; /* display clock in kHz */
714 u32 src_width; /* viewport width */
715 u32 active_time; /* active display time in ns */
716 u32 blank_time; /* blank time in ns */
717 bool interlaced; /* mode is interlaced */
718 fixed20_12 vsc; /* vertical scale ratio */
719 u32 num_heads; /* number of active crtcs */
720 u32 bytes_per_pixel; /* bytes per pixel display + overlay */
721 u32 lb_size; /* line buffer allocated to pipe */
722 u32 vtaps; /* vertical scaler taps */
726 * dce_v6_0_dram_bandwidth - get the dram bandwidth
728 * @wm: watermark calculation data
730 * Calculate the raw dram bandwidth (CIK).
731 * Used for display watermark bandwidth calculations
732 * Returns the dram bandwidth in MBytes/s
734 static u32 dce_v6_0_dram_bandwidth(struct dce6_wm_params *wm)
736 /* Calculate raw DRAM Bandwidth */
737 fixed20_12 dram_efficiency; /* 0.7 */
738 fixed20_12 yclk, dram_channels, bandwidth;
741 a.full = dfixed_const(1000);
742 yclk.full = dfixed_const(wm->yclk);
743 yclk.full = dfixed_div(yclk, a);
744 dram_channels.full = dfixed_const(wm->dram_channels * 4);
745 a.full = dfixed_const(10);
746 dram_efficiency.full = dfixed_const(7);
747 dram_efficiency.full = dfixed_div(dram_efficiency, a);
748 bandwidth.full = dfixed_mul(dram_channels, yclk);
749 bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
751 return dfixed_trunc(bandwidth);
755 * dce_v6_0_dram_bandwidth_for_display - get the dram bandwidth for display
757 * @wm: watermark calculation data
759 * Calculate the dram bandwidth used for display (CIK).
760 * Used for display watermark bandwidth calculations
761 * Returns the dram bandwidth for display in MBytes/s
763 static u32 dce_v6_0_dram_bandwidth_for_display(struct dce6_wm_params *wm)
765 /* Calculate DRAM Bandwidth and the part allocated to display. */
766 fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
767 fixed20_12 yclk, dram_channels, bandwidth;
770 a.full = dfixed_const(1000);
771 yclk.full = dfixed_const(wm->yclk);
772 yclk.full = dfixed_div(yclk, a);
773 dram_channels.full = dfixed_const(wm->dram_channels * 4);
774 a.full = dfixed_const(10);
775 disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
776 disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
777 bandwidth.full = dfixed_mul(dram_channels, yclk);
778 bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
780 return dfixed_trunc(bandwidth);
784 * dce_v6_0_data_return_bandwidth - get the data return bandwidth
786 * @wm: watermark calculation data
788 * Calculate the data return bandwidth used for display (CIK).
789 * Used for display watermark bandwidth calculations
790 * Returns the data return bandwidth in MBytes/s
792 static u32 dce_v6_0_data_return_bandwidth(struct dce6_wm_params *wm)
794 /* Calculate the display Data return Bandwidth */
795 fixed20_12 return_efficiency; /* 0.8 */
796 fixed20_12 sclk, bandwidth;
799 a.full = dfixed_const(1000);
800 sclk.full = dfixed_const(wm->sclk);
801 sclk.full = dfixed_div(sclk, a);
802 a.full = dfixed_const(10);
803 return_efficiency.full = dfixed_const(8);
804 return_efficiency.full = dfixed_div(return_efficiency, a);
805 a.full = dfixed_const(32);
806 bandwidth.full = dfixed_mul(a, sclk);
807 bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
809 return dfixed_trunc(bandwidth);
813 * dce_v6_0_dmif_request_bandwidth - get the dmif bandwidth
815 * @wm: watermark calculation data
817 * Calculate the dmif bandwidth used for display (CIK).
818 * Used for display watermark bandwidth calculations
819 * Returns the dmif bandwidth in MBytes/s
821 static u32 dce_v6_0_dmif_request_bandwidth(struct dce6_wm_params *wm)
823 /* Calculate the DMIF Request Bandwidth */
824 fixed20_12 disp_clk_request_efficiency; /* 0.8 */
825 fixed20_12 disp_clk, bandwidth;
828 a.full = dfixed_const(1000);
829 disp_clk.full = dfixed_const(wm->disp_clk);
830 disp_clk.full = dfixed_div(disp_clk, a);
831 a.full = dfixed_const(32);
832 b.full = dfixed_mul(a, disp_clk);
834 a.full = dfixed_const(10);
835 disp_clk_request_efficiency.full = dfixed_const(8);
836 disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
838 bandwidth.full = dfixed_mul(b, disp_clk_request_efficiency);
840 return dfixed_trunc(bandwidth);
844 * dce_v6_0_available_bandwidth - get the min available bandwidth
846 * @wm: watermark calculation data
848 * Calculate the min available bandwidth used for display (CIK).
849 * Used for display watermark bandwidth calculations
850 * Returns the min available bandwidth in MBytes/s
852 static u32 dce_v6_0_available_bandwidth(struct dce6_wm_params *wm)
854 /* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
855 u32 dram_bandwidth = dce_v6_0_dram_bandwidth(wm);
856 u32 data_return_bandwidth = dce_v6_0_data_return_bandwidth(wm);
857 u32 dmif_req_bandwidth = dce_v6_0_dmif_request_bandwidth(wm);
859 return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
863 * dce_v6_0_average_bandwidth - get the average available bandwidth
865 * @wm: watermark calculation data
867 * Calculate the average available bandwidth used for display (CIK).
868 * Used for display watermark bandwidth calculations
869 * Returns the average available bandwidth in MBytes/s
871 static u32 dce_v6_0_average_bandwidth(struct dce6_wm_params *wm)
873 /* Calculate the display mode Average Bandwidth
874 * DisplayMode should contain the source and destination dimensions,
878 fixed20_12 line_time;
879 fixed20_12 src_width;
880 fixed20_12 bandwidth;
883 a.full = dfixed_const(1000);
884 line_time.full = dfixed_const(wm->active_time + wm->blank_time);
885 line_time.full = dfixed_div(line_time, a);
886 bpp.full = dfixed_const(wm->bytes_per_pixel);
887 src_width.full = dfixed_const(wm->src_width);
888 bandwidth.full = dfixed_mul(src_width, bpp);
889 bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
890 bandwidth.full = dfixed_div(bandwidth, line_time);
892 return dfixed_trunc(bandwidth);
896 * dce_v6_0_latency_watermark - get the latency watermark
898 * @wm: watermark calculation data
900 * Calculate the latency watermark (CIK).
901 * Used for display watermark bandwidth calculations
902 * Returns the latency watermark in ns
904 static u32 dce_v6_0_latency_watermark(struct dce6_wm_params *wm)
906 /* First calculate the latency in ns */
907 u32 mc_latency = 2000; /* 2000 ns. */
908 u32 available_bandwidth = dce_v6_0_available_bandwidth(wm);
909 u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
910 u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
911 u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
912 u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
913 (wm->num_heads * cursor_line_pair_return_time);
914 u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
915 u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
916 u32 tmp, dmif_size = 12288;
919 if (wm->num_heads == 0)
922 a.full = dfixed_const(2);
923 b.full = dfixed_const(1);
924 if ((wm->vsc.full > a.full) ||
925 ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
927 ((wm->vsc.full >= a.full) && wm->interlaced))
928 max_src_lines_per_dst_line = 4;
930 max_src_lines_per_dst_line = 2;
932 a.full = dfixed_const(available_bandwidth);
933 b.full = dfixed_const(wm->num_heads);
934 a.full = dfixed_div(a, b);
936 b.full = dfixed_const(mc_latency + 512);
937 c.full = dfixed_const(wm->disp_clk);
938 b.full = dfixed_div(b, c);
940 c.full = dfixed_const(dmif_size);
941 b.full = dfixed_div(c, b);
943 tmp = min(dfixed_trunc(a), dfixed_trunc(b));
945 b.full = dfixed_const(1000);
946 c.full = dfixed_const(wm->disp_clk);
947 b.full = dfixed_div(c, b);
948 c.full = dfixed_const(wm->bytes_per_pixel);
949 b.full = dfixed_mul(b, c);
951 lb_fill_bw = min(tmp, dfixed_trunc(b));
953 a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
954 b.full = dfixed_const(1000);
955 c.full = dfixed_const(lb_fill_bw);
956 b.full = dfixed_div(c, b);
957 a.full = dfixed_div(a, b);
958 line_fill_time = dfixed_trunc(a);
960 if (line_fill_time < wm->active_time)
963 return latency + (line_fill_time - wm->active_time);
968 * dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display - check
969 * average and available dram bandwidth
971 * @wm: watermark calculation data
973 * Check if the display average bandwidth fits in the display
974 * dram bandwidth (CIK).
975 * Used for display watermark bandwidth calculations
976 * Returns true if the display fits, false if not.
978 static bool dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display(struct dce6_wm_params *wm)
980 if (dce_v6_0_average_bandwidth(wm) <=
981 (dce_v6_0_dram_bandwidth_for_display(wm) / wm->num_heads))
988 * dce_v6_0_average_bandwidth_vs_available_bandwidth - check
989 * average and available bandwidth
991 * @wm: watermark calculation data
993 * Check if the display average bandwidth fits in the display
994 * available bandwidth (CIK).
995 * Used for display watermark bandwidth calculations
996 * Returns true if the display fits, false if not.
998 static bool dce_v6_0_average_bandwidth_vs_available_bandwidth(struct dce6_wm_params *wm)
1000 if (dce_v6_0_average_bandwidth(wm) <=
1001 (dce_v6_0_available_bandwidth(wm) / wm->num_heads))
1008 * dce_v6_0_check_latency_hiding - check latency hiding
1010 * @wm: watermark calculation data
1012 * Check latency hiding (CIK).
1013 * Used for display watermark bandwidth calculations
1014 * Returns true if the display fits, false if not.
1016 static bool dce_v6_0_check_latency_hiding(struct dce6_wm_params *wm)
1018 u32 lb_partitions = wm->lb_size / wm->src_width;
1019 u32 line_time = wm->active_time + wm->blank_time;
1020 u32 latency_tolerant_lines;
1024 a.full = dfixed_const(1);
1025 if (wm->vsc.full > a.full)
1026 latency_tolerant_lines = 1;
1028 if (lb_partitions <= (wm->vtaps + 1))
1029 latency_tolerant_lines = 1;
1031 latency_tolerant_lines = 2;
1034 latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
1036 if (dce_v6_0_latency_watermark(wm) <= latency_hiding)
1043 * dce_v6_0_program_watermarks - program display watermarks
1045 * @adev: amdgpu_device pointer
1046 * @amdgpu_crtc: the selected display controller
1047 * @lb_size: line buffer size
1048 * @num_heads: number of display controllers in use
1050 * Calculate and program the display watermarks for the
1051 * selected display controller (CIK).
1053 static void dce_v6_0_program_watermarks(struct amdgpu_device *adev,
1054 struct amdgpu_crtc *amdgpu_crtc,
1055 u32 lb_size, u32 num_heads)
1057 struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
1058 struct dce6_wm_params wm_low, wm_high;
1062 u32 latency_watermark_a = 0, latency_watermark_b = 0;
1063 u32 priority_a_mark = 0, priority_b_mark = 0;
1064 u32 priority_a_cnt = PRIORITY_OFF;
1065 u32 priority_b_cnt = PRIORITY_OFF;
1066 u32 tmp, arb_control3;
1069 if (amdgpu_crtc->base.enabled && num_heads && mode) {
1070 pixel_period = 1000000 / (u32)mode->clock;
1071 line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
1075 dram_channels = si_get_number_of_dram_channels(adev);
1077 /* watermark for high clocks */
1078 if (adev->pm.dpm_enabled) {
1080 amdgpu_dpm_get_mclk(adev, false) * 10;
1082 amdgpu_dpm_get_sclk(adev, false) * 10;
1084 wm_high.yclk = adev->pm.current_mclk * 10;
1085 wm_high.sclk = adev->pm.current_sclk * 10;
1088 wm_high.disp_clk = mode->clock;
1089 wm_high.src_width = mode->crtc_hdisplay;
1090 wm_high.active_time = mode->crtc_hdisplay * pixel_period;
1091 wm_high.blank_time = line_time - wm_high.active_time;
1092 wm_high.interlaced = false;
1093 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1094 wm_high.interlaced = true;
1095 wm_high.vsc = amdgpu_crtc->vsc;
1097 if (amdgpu_crtc->rmx_type != RMX_OFF)
1099 wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */
1100 wm_high.lb_size = lb_size;
1101 wm_high.dram_channels = dram_channels;
1102 wm_high.num_heads = num_heads;
1104 if (adev->pm.dpm_enabled) {
1105 /* watermark for low clocks */
1107 amdgpu_dpm_get_mclk(adev, true) * 10;
1109 amdgpu_dpm_get_sclk(adev, true) * 10;
1111 wm_low.yclk = adev->pm.current_mclk * 10;
1112 wm_low.sclk = adev->pm.current_sclk * 10;
1115 wm_low.disp_clk = mode->clock;
1116 wm_low.src_width = mode->crtc_hdisplay;
1117 wm_low.active_time = mode->crtc_hdisplay * pixel_period;
1118 wm_low.blank_time = line_time - wm_low.active_time;
1119 wm_low.interlaced = false;
1120 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1121 wm_low.interlaced = true;
1122 wm_low.vsc = amdgpu_crtc->vsc;
1124 if (amdgpu_crtc->rmx_type != RMX_OFF)
1126 wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */
1127 wm_low.lb_size = lb_size;
1128 wm_low.dram_channels = dram_channels;
1129 wm_low.num_heads = num_heads;
1131 /* set for high clocks */
1132 latency_watermark_a = min(dce_v6_0_latency_watermark(&wm_high), (u32)65535);
1133 /* set for low clocks */
1134 latency_watermark_b = min(dce_v6_0_latency_watermark(&wm_low), (u32)65535);
1136 /* possibly force display priority to high */
1137 /* should really do this at mode validation time... */
1138 if (!dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) ||
1139 !dce_v6_0_average_bandwidth_vs_available_bandwidth(&wm_high) ||
1140 !dce_v6_0_check_latency_hiding(&wm_high) ||
1141 (adev->mode_info.disp_priority == 2)) {
1142 DRM_DEBUG_KMS("force priority to high\n");
1143 priority_a_cnt |= PRIORITY_ALWAYS_ON;
1144 priority_b_cnt |= PRIORITY_ALWAYS_ON;
1146 if (!dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) ||
1147 !dce_v6_0_average_bandwidth_vs_available_bandwidth(&wm_low) ||
1148 !dce_v6_0_check_latency_hiding(&wm_low) ||
1149 (adev->mode_info.disp_priority == 2)) {
1150 DRM_DEBUG_KMS("force priority to high\n");
1151 priority_a_cnt |= PRIORITY_ALWAYS_ON;
1152 priority_b_cnt |= PRIORITY_ALWAYS_ON;
1155 a.full = dfixed_const(1000);
1156 b.full = dfixed_const(mode->clock);
1157 b.full = dfixed_div(b, a);
1158 c.full = dfixed_const(latency_watermark_a);
1159 c.full = dfixed_mul(c, b);
1160 c.full = dfixed_mul(c, amdgpu_crtc->hsc);
1161 c.full = dfixed_div(c, a);
1162 a.full = dfixed_const(16);
1163 c.full = dfixed_div(c, a);
1164 priority_a_mark = dfixed_trunc(c);
1165 priority_a_cnt |= priority_a_mark & PRIORITY_MARK_MASK;
1167 a.full = dfixed_const(1000);
1168 b.full = dfixed_const(mode->clock);
1169 b.full = dfixed_div(b, a);
1170 c.full = dfixed_const(latency_watermark_b);
1171 c.full = dfixed_mul(c, b);
1172 c.full = dfixed_mul(c, amdgpu_crtc->hsc);
1173 c.full = dfixed_div(c, a);
1174 a.full = dfixed_const(16);
1175 c.full = dfixed_div(c, a);
1176 priority_b_mark = dfixed_trunc(c);
1177 priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK;
1181 arb_control3 = RREG32(DPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset);
1183 tmp &= ~LATENCY_WATERMARK_MASK(3);
1184 tmp |= LATENCY_WATERMARK_MASK(1);
1185 WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, tmp);
1186 WREG32(DPG_PIPE_LATENCY_CONTROL + amdgpu_crtc->crtc_offset,
1187 (LATENCY_LOW_WATERMARK(latency_watermark_a) |
1188 LATENCY_HIGH_WATERMARK(line_time)));
1190 tmp = RREG32(DPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset);
1191 tmp &= ~LATENCY_WATERMARK_MASK(3);
1192 tmp |= LATENCY_WATERMARK_MASK(2);
1193 WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, tmp);
1194 WREG32(DPG_PIPE_LATENCY_CONTROL + amdgpu_crtc->crtc_offset,
1195 (LATENCY_LOW_WATERMARK(latency_watermark_b) |
1196 LATENCY_HIGH_WATERMARK(line_time)));
1197 /* restore original selection */
1198 WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, arb_control3);
1200 /* write the priority marks */
1201 WREG32(PRIORITY_A_CNT + amdgpu_crtc->crtc_offset, priority_a_cnt);
1202 WREG32(PRIORITY_B_CNT + amdgpu_crtc->crtc_offset, priority_b_cnt);
1204 /* save values for DPM */
1205 amdgpu_crtc->line_time = line_time;
1206 amdgpu_crtc->wm_high = latency_watermark_a;
1209 /* watermark setup */
1210 static u32 dce_v6_0_line_buffer_adjust(struct amdgpu_device *adev,
1211 struct amdgpu_crtc *amdgpu_crtc,
1212 struct drm_display_mode *mode,
1213 struct drm_display_mode *other_mode)
1215 u32 tmp, buffer_alloc, i;
1216 u32 pipe_offset = amdgpu_crtc->crtc_id * 0x8;
1219 * There are 3 line buffers, each one shared by 2 display controllers.
1220 * DC_LB_MEMORY_SPLIT controls how that line buffer is shared between
1221 * the display controllers. The paritioning is done via one of four
1222 * preset allocations specified in bits 21:20:
1224 * 2 - whole lb, other crtc must be disabled
1226 /* this can get tricky if we have two large displays on a paired group
1227 * of crtcs. Ideally for multiple large displays we'd assign them to
1228 * non-linked crtcs for maximum line buffer allocation.
1230 if (amdgpu_crtc->base.enabled && mode) {
1235 tmp = 2; /* whole */
1243 WREG32(DC_LB_MEMORY_SPLIT + amdgpu_crtc->crtc_offset,
1244 DC_LB_MEMORY_CONFIG(tmp));
1246 WREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
1247 DMIF_BUFFERS_ALLOCATED(buffer_alloc));
1248 for (i = 0; i < adev->usec_timeout; i++) {
1249 if (RREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset) &
1250 DMIF_BUFFERS_ALLOCATED_COMPLETED)
1255 if (amdgpu_crtc->base.enabled && mode) {
1265 /* controller not enabled, so no lb used */
1272 * dce_v6_0_bandwidth_update - program display watermarks
1274 * @adev: amdgpu_device pointer
1276 * Calculate and program the display watermarks and line
1277 * buffer allocation (CIK).
1279 static void dce_v6_0_bandwidth_update(struct amdgpu_device *adev)
1281 struct drm_display_mode *mode0 = NULL;
1282 struct drm_display_mode *mode1 = NULL;
1283 u32 num_heads = 0, lb_size;
1286 if (!adev->mode_info.mode_config_initialized)
1289 amdgpu_update_display_priority(adev);
1291 for (i = 0; i < adev->mode_info.num_crtc; i++) {
1292 if (adev->mode_info.crtcs[i]->base.enabled)
1295 for (i = 0; i < adev->mode_info.num_crtc; i += 2) {
1296 mode0 = &adev->mode_info.crtcs[i]->base.mode;
1297 mode1 = &adev->mode_info.crtcs[i+1]->base.mode;
1298 lb_size = dce_v6_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i], mode0, mode1);
1299 dce_v6_0_program_watermarks(adev, adev->mode_info.crtcs[i], lb_size, num_heads);
1300 lb_size = dce_v6_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i+1], mode1, mode0);
1301 dce_v6_0_program_watermarks(adev, adev->mode_info.crtcs[i+1], lb_size, num_heads);
1305 static void dce_v6_0_audio_get_connected_pins(struct amdgpu_device *adev)
1310 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1311 offset = adev->mode_info.audio.pin[i].offset;
1312 tmp = RREG32_AUDIO_ENDPT(offset,
1313 AZ_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT);
1314 if (((tmp & PORT_CONNECTIVITY_MASK) >> PORT_CONNECTIVITY_SHIFT) == 1)
1315 adev->mode_info.audio.pin[i].connected = false;
1317 adev->mode_info.audio.pin[i].connected = true;
1322 static struct amdgpu_audio_pin *dce_v6_0_audio_get_pin(struct amdgpu_device *adev)
1326 dce_v6_0_audio_get_connected_pins(adev);
1328 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1329 if (adev->mode_info.audio.pin[i].connected)
1330 return &adev->mode_info.audio.pin[i];
1332 DRM_ERROR("No connected audio pins found!\n");
1336 static void dce_v6_0_afmt_audio_select_pin(struct drm_encoder *encoder)
1338 struct amdgpu_device *adev = encoder->dev->dev_private;
1339 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1340 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1343 if (!dig || !dig->afmt || !dig->afmt->pin)
1346 offset = dig->afmt->offset;
1348 WREG32(AFMT_AUDIO_SRC_CONTROL + offset,
1349 AFMT_AUDIO_SRC_SELECT(dig->afmt->pin->id));
1353 static void dce_v6_0_audio_write_latency_fields(struct drm_encoder *encoder,
1354 struct drm_display_mode *mode)
1356 DRM_INFO("xxxx: dce_v6_0_audio_write_latency_fields---no imp!!!!!\n");
1359 static void dce_v6_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
1361 DRM_INFO("xxxx: dce_v6_0_audio_write_speaker_allocation---no imp!!!!!\n");
1364 static void dce_v6_0_audio_write_sad_regs(struct drm_encoder *encoder)
1366 DRM_INFO("xxxx: dce_v6_0_audio_write_sad_regs---no imp!!!!!\n");
1370 static void dce_v6_0_audio_enable(struct amdgpu_device *adev,
1371 struct amdgpu_audio_pin *pin,
1374 DRM_INFO("xxxx: dce_v6_0_audio_enable---no imp!!!!!\n");
1377 static const u32 pin_offsets[7] =
1388 static int dce_v6_0_audio_init(struct amdgpu_device *adev)
1393 static void dce_v6_0_audio_fini(struct amdgpu_device *adev)
1399 static void dce_v6_0_afmt_update_ACR(struct drm_encoder *encoder, uint32_t clock)
1401 DRM_INFO("xxxx: dce_v6_0_afmt_update_ACR---no imp!!!!!\n");
1405 * build a HDMI Video Info Frame
1408 static void dce_v6_0_afmt_update_avi_infoframe(struct drm_encoder *encoder,
1409 void *buffer, size_t size)
1411 DRM_INFO("xxxx: dce_v6_0_afmt_update_avi_infoframe---no imp!!!!!\n");
1414 static void dce_v6_0_audio_set_dto(struct drm_encoder *encoder, u32 clock)
1416 DRM_INFO("xxxx: dce_v6_0_audio_set_dto---no imp!!!!!\n");
1420 * update the info frames with the data from the current display mode
1422 static void dce_v6_0_afmt_setmode(struct drm_encoder *encoder,
1423 struct drm_display_mode *mode)
1425 DRM_INFO("xxxx: dce_v6_0_afmt_setmode ----no impl !!!!!!!!\n");
1428 static void dce_v6_0_afmt_enable(struct drm_encoder *encoder, bool enable)
1430 struct drm_device *dev = encoder->dev;
1431 struct amdgpu_device *adev = dev->dev_private;
1432 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1433 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1435 if (!dig || !dig->afmt)
1438 /* Silent, r600_hdmi_enable will raise WARN for us */
1439 if (enable && dig->afmt->enabled)
1441 if (!enable && !dig->afmt->enabled)
1444 if (!enable && dig->afmt->pin) {
1445 dce_v6_0_audio_enable(adev, dig->afmt->pin, false);
1446 dig->afmt->pin = NULL;
1449 dig->afmt->enabled = enable;
1451 DRM_DEBUG("%sabling AFMT interface @ 0x%04X for encoder 0x%x\n",
1452 enable ? "En" : "Dis", dig->afmt->offset, amdgpu_encoder->encoder_id);
1455 static int dce_v6_0_afmt_init(struct amdgpu_device *adev)
1459 for (i = 0; i < adev->mode_info.num_dig; i++)
1460 adev->mode_info.afmt[i] = NULL;
1462 /* DCE6 has audio blocks tied to DIG encoders */
1463 for (i = 0; i < adev->mode_info.num_dig; i++) {
1464 adev->mode_info.afmt[i] = kzalloc(sizeof(struct amdgpu_afmt), GFP_KERNEL);
1465 if (adev->mode_info.afmt[i]) {
1466 adev->mode_info.afmt[i]->offset = dig_offsets[i];
1467 adev->mode_info.afmt[i]->id = i;
1469 for (j = 0; j < i; j++) {
1470 kfree(adev->mode_info.afmt[j]);
1471 adev->mode_info.afmt[j] = NULL;
1473 DRM_ERROR("Out of memory allocating afmt table\n");
1480 static void dce_v6_0_afmt_fini(struct amdgpu_device *adev)
1484 for (i = 0; i < adev->mode_info.num_dig; i++) {
1485 kfree(adev->mode_info.afmt[i]);
1486 adev->mode_info.afmt[i] = NULL;
1490 static const u32 vga_control_regs[6] =
1492 AVIVO_D1VGA_CONTROL,
1493 AVIVO_D2VGA_CONTROL,
1494 EVERGREEN_D3VGA_CONTROL,
1495 EVERGREEN_D4VGA_CONTROL,
1496 EVERGREEN_D5VGA_CONTROL,
1497 EVERGREEN_D6VGA_CONTROL,
1500 static void dce_v6_0_vga_enable(struct drm_crtc *crtc, bool enable)
1502 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1503 struct drm_device *dev = crtc->dev;
1504 struct amdgpu_device *adev = dev->dev_private;
1507 vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1;
1508 WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control | (enable ? 1 : 0));
1511 static void dce_v6_0_grph_enable(struct drm_crtc *crtc, bool enable)
1513 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1514 struct drm_device *dev = crtc->dev;
1515 struct amdgpu_device *adev = dev->dev_private;
1517 WREG32(EVERGREEN_GRPH_ENABLE + amdgpu_crtc->crtc_offset, enable ? 1 : 0);
1520 static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
1521 struct drm_framebuffer *fb,
1522 int x, int y, int atomic)
1524 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1525 struct drm_device *dev = crtc->dev;
1526 struct amdgpu_device *adev = dev->dev_private;
1527 struct amdgpu_framebuffer *amdgpu_fb;
1528 struct drm_framebuffer *target_fb;
1529 struct drm_gem_object *obj;
1530 struct amdgpu_bo *abo;
1531 uint64_t fb_location, tiling_flags;
1532 uint32_t fb_format, fb_pitch_pixels, pipe_config;
1533 u32 fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_NONE);
1534 u32 viewport_w, viewport_h;
1536 bool bypass_lut = false;
1539 if (!atomic && !crtc->primary->fb) {
1540 DRM_DEBUG_KMS("No FB bound\n");
1545 amdgpu_fb = to_amdgpu_framebuffer(fb);
1548 amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
1549 target_fb = crtc->primary->fb;
1552 /* If atomic, assume fb object is pinned & idle & fenced and
1553 * just update base pointers
1555 obj = amdgpu_fb->obj;
1556 abo = gem_to_amdgpu_bo(obj);
1557 r = amdgpu_bo_reserve(abo, false);
1558 if (unlikely(r != 0))
1562 fb_location = amdgpu_bo_gpu_offset(abo);
1564 r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
1565 if (unlikely(r != 0)) {
1566 amdgpu_bo_unreserve(abo);
1571 amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
1572 amdgpu_bo_unreserve(abo);
1574 switch (target_fb->pixel_format) {
1576 fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_8BPP) |
1577 EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_INDEXED));
1579 case DRM_FORMAT_XRGB4444:
1580 case DRM_FORMAT_ARGB4444:
1581 fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
1582 EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB4444));
1584 fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16);
1587 case DRM_FORMAT_XRGB1555:
1588 case DRM_FORMAT_ARGB1555:
1589 fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
1590 EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB1555));
1592 fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16);
1595 case DRM_FORMAT_BGRX5551:
1596 case DRM_FORMAT_BGRA5551:
1597 fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
1598 EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_BGRA5551));
1600 fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16);
1603 case DRM_FORMAT_RGB565:
1604 fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
1605 EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB565));
1607 fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16);
1610 case DRM_FORMAT_XRGB8888:
1611 case DRM_FORMAT_ARGB8888:
1612 fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_32BPP) |
1613 EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB8888));
1615 fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN32);
1618 case DRM_FORMAT_XRGB2101010:
1619 case DRM_FORMAT_ARGB2101010:
1620 fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_32BPP) |
1621 EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB2101010));
1623 fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN32);
1625 /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
1628 case DRM_FORMAT_BGRX1010102:
1629 case DRM_FORMAT_BGRA1010102:
1630 fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_32BPP) |
1631 EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_BGRA1010102));
1633 fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN32);
1635 /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
1639 DRM_ERROR("Unsupported screen format %s\n",
1640 drm_get_format_name(target_fb->pixel_format));
1644 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_2D_TILED_THIN1) {
1645 unsigned bankw, bankh, mtaspect, tile_split, num_banks;
1647 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
1648 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
1649 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
1650 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
1651 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
1653 fb_format |= EVERGREEN_GRPH_NUM_BANKS(num_banks);
1654 fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1);
1655 fb_format |= EVERGREEN_GRPH_TILE_SPLIT(tile_split);
1656 fb_format |= EVERGREEN_GRPH_BANK_WIDTH(bankw);
1657 fb_format |= EVERGREEN_GRPH_BANK_HEIGHT(bankh);
1658 fb_format |= EVERGREEN_GRPH_MACRO_TILE_ASPECT(mtaspect);
1659 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_1D_TILED_THIN1) {
1660 fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1);
1663 pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
1664 fb_format |= SI_GRPH_PIPE_CONFIG(pipe_config);
1666 dce_v6_0_vga_enable(crtc, false);
1668 /* Make sure surface address is updated at vertical blank rather than
1671 WREG32(EVERGREEN_GRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, 0);
1673 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
1674 upper_32_bits(fb_location));
1675 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
1676 upper_32_bits(fb_location));
1677 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
1678 (u32)fb_location & EVERGREEN_GRPH_SURFACE_ADDRESS_MASK);
1679 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
1680 (u32) fb_location & EVERGREEN_GRPH_SURFACE_ADDRESS_MASK);
1681 WREG32(EVERGREEN_GRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
1682 WREG32(EVERGREEN_GRPH_SWAP_CONTROL + amdgpu_crtc->crtc_offset, fb_swap);
1685 * The LUT only has 256 slots for indexing by a 8 bpc fb. Bypass the LUT
1686 * for > 8 bpc scanout to avoid truncation of fb indices to 8 msb's, to
1687 * retain the full precision throughout the pipeline.
1689 WREG32_P(EVERGREEN_GRPH_LUT_10BIT_BYPASS_CONTROL + amdgpu_crtc->crtc_offset,
1690 (bypass_lut ? EVERGREEN_LUT_10BIT_BYPASS_EN : 0),
1691 ~EVERGREEN_LUT_10BIT_BYPASS_EN);
1694 DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n");
1696 WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_X + amdgpu_crtc->crtc_offset, 0);
1697 WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_Y + amdgpu_crtc->crtc_offset, 0);
1698 WREG32(EVERGREEN_GRPH_X_START + amdgpu_crtc->crtc_offset, 0);
1699 WREG32(EVERGREEN_GRPH_Y_START + amdgpu_crtc->crtc_offset, 0);
1700 WREG32(EVERGREEN_GRPH_X_END + amdgpu_crtc->crtc_offset, target_fb->width);
1701 WREG32(EVERGREEN_GRPH_Y_END + amdgpu_crtc->crtc_offset, target_fb->height);
1703 fb_pitch_pixels = target_fb->pitches[0] / (target_fb->bits_per_pixel / 8);
1704 WREG32(EVERGREEN_GRPH_PITCH + amdgpu_crtc->crtc_offset, fb_pitch_pixels);
1706 dce_v6_0_grph_enable(crtc, true);
1708 WREG32(EVERGREEN_DESKTOP_HEIGHT + amdgpu_crtc->crtc_offset,
1712 WREG32(EVERGREEN_VIEWPORT_START + amdgpu_crtc->crtc_offset,
1714 viewport_w = crtc->mode.hdisplay;
1715 viewport_h = (crtc->mode.vdisplay + 1) & ~1;
1717 WREG32(EVERGREEN_VIEWPORT_SIZE + amdgpu_crtc->crtc_offset,
1718 (viewport_w << 16) | viewport_h);
1720 /* set pageflip to happen anywhere in vblank interval */
1721 WREG32(EVERGREEN_MASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0);
1723 if (!atomic && fb && fb != crtc->primary->fb) {
1724 amdgpu_fb = to_amdgpu_framebuffer(fb);
1725 abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
1726 r = amdgpu_bo_reserve(abo, false);
1727 if (unlikely(r != 0))
1729 amdgpu_bo_unpin(abo);
1730 amdgpu_bo_unreserve(abo);
1733 /* Bytes per pixel may have changed */
1734 dce_v6_0_bandwidth_update(adev);
1740 static void dce_v6_0_set_interleave(struct drm_crtc *crtc,
1741 struct drm_display_mode *mode)
1743 struct drm_device *dev = crtc->dev;
1744 struct amdgpu_device *adev = dev->dev_private;
1745 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1747 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1748 WREG32(EVERGREEN_DATA_FORMAT + amdgpu_crtc->crtc_offset,
1749 EVERGREEN_INTERLEAVE_EN);
1751 WREG32(EVERGREEN_DATA_FORMAT + amdgpu_crtc->crtc_offset, 0);
1754 static void dce_v6_0_crtc_load_lut(struct drm_crtc *crtc)
1757 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1758 struct drm_device *dev = crtc->dev;
1759 struct amdgpu_device *adev = dev->dev_private;
1762 DRM_DEBUG_KMS("%d\n", amdgpu_crtc->crtc_id);
1764 WREG32(NI_INPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
1765 (NI_INPUT_CSC_GRPH_MODE(NI_INPUT_CSC_BYPASS) |
1766 NI_INPUT_CSC_OVL_MODE(NI_INPUT_CSC_BYPASS)));
1767 WREG32(NI_PRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset,
1768 NI_GRPH_PRESCALE_BYPASS);
1769 WREG32(NI_PRESCALE_OVL_CONTROL + amdgpu_crtc->crtc_offset,
1770 NI_OVL_PRESCALE_BYPASS);
1771 WREG32(NI_INPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset,
1772 (NI_GRPH_INPUT_GAMMA_MODE(NI_INPUT_GAMMA_USE_LUT) |
1773 NI_OVL_INPUT_GAMMA_MODE(NI_INPUT_GAMMA_USE_LUT)));
1777 WREG32(EVERGREEN_DC_LUT_CONTROL + amdgpu_crtc->crtc_offset, 0);
1779 WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0);
1780 WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0);
1781 WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_RED + amdgpu_crtc->crtc_offset, 0);
1783 WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0xffff);
1784 WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0xffff);
1785 WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_RED + amdgpu_crtc->crtc_offset, 0xffff);
1787 WREG32(EVERGREEN_DC_LUT_RW_MODE + amdgpu_crtc->crtc_offset, 0);
1788 WREG32(EVERGREEN_DC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007);
1790 WREG32(EVERGREEN_DC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0);
1791 for (i = 0; i < 256; i++) {
1792 WREG32(EVERGREEN_DC_LUT_30_COLOR + amdgpu_crtc->crtc_offset,
1793 (amdgpu_crtc->lut_r[i] << 20) |
1794 (amdgpu_crtc->lut_g[i] << 10) |
1795 (amdgpu_crtc->lut_b[i] << 0));
1798 WREG32(NI_DEGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
1799 (NI_GRPH_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) |
1800 NI_OVL_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) |
1801 NI_ICON_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) |
1802 NI_CURSOR_DEGAMMA_MODE(NI_DEGAMMA_BYPASS)));
1803 WREG32(NI_GAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset,
1804 (NI_GRPH_GAMUT_REMAP_MODE(NI_GAMUT_REMAP_BYPASS) |
1805 NI_OVL_GAMUT_REMAP_MODE(NI_GAMUT_REMAP_BYPASS)));
1806 WREG32(NI_REGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
1807 (NI_GRPH_REGAMMA_MODE(NI_REGAMMA_BYPASS) |
1808 NI_OVL_REGAMMA_MODE(NI_REGAMMA_BYPASS)));
1809 WREG32(NI_OUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
1810 (NI_OUTPUT_CSC_GRPH_MODE(0) |
1811 NI_OUTPUT_CSC_OVL_MODE(NI_OUTPUT_CSC_BYPASS)));
1812 /* XXX match this to the depth of the crtc fmt block, move to modeset? */
1813 WREG32(0x1a50 + amdgpu_crtc->crtc_offset, 0);
1818 static int dce_v6_0_pick_dig_encoder(struct drm_encoder *encoder)
1820 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1821 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1823 switch (amdgpu_encoder->encoder_id) {
1824 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
1825 return dig->linkb ? 1 : 0;
1826 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
1827 return dig->linkb ? 3 : 2;
1828 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
1829 return dig->linkb ? 5 : 4;
1830 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
1833 DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id);
1839 * dce_v6_0_pick_pll - Allocate a PPLL for use by the crtc.
1843 * Returns the PPLL (Pixel PLL) to be used by the crtc. For DP monitors
1844 * a single PPLL can be used for all DP crtcs/encoders. For non-DP
1845 * monitors a dedicated PPLL must be used. If a particular board has
1846 * an external DP PLL, return ATOM_PPLL_INVALID to skip PLL programming
1847 * as there is no need to program the PLL itself. If we are not able to
1848 * allocate a PLL, return ATOM_PPLL_INVALID to skip PLL programming to
1849 * avoid messing up an existing monitor.
1853 static u32 dce_v6_0_pick_pll(struct drm_crtc *crtc)
1855 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1856 struct drm_device *dev = crtc->dev;
1857 struct amdgpu_device *adev = dev->dev_private;
1861 if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) {
1862 if (adev->clock.dp_extclk)
1863 /* skip PPLL programming if using ext clock */
1864 return ATOM_PPLL_INVALID;
1868 /* use the same PPLL for all monitors with the same clock */
1869 pll = amdgpu_pll_get_shared_nondp_ppll(crtc);
1870 if (pll != ATOM_PPLL_INVALID)
1874 /* PPLL1, and PPLL2 */
1875 pll_in_use = amdgpu_pll_get_use_mask(crtc);
1876 if (!(pll_in_use & (1 << ATOM_PPLL2)))
1878 if (!(pll_in_use & (1 << ATOM_PPLL1)))
1880 DRM_ERROR("unable to allocate a PPLL\n");
1881 return ATOM_PPLL_INVALID;
1884 static void dce_v6_0_lock_cursor(struct drm_crtc *crtc, bool lock)
1886 struct amdgpu_device *adev = crtc->dev->dev_private;
1887 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1890 cur_lock = RREG32(EVERGREEN_CUR_UPDATE + amdgpu_crtc->crtc_offset);
1892 cur_lock |= EVERGREEN_CURSOR_UPDATE_LOCK;
1894 cur_lock &= ~EVERGREEN_CURSOR_UPDATE_LOCK;
1895 WREG32(EVERGREEN_CUR_UPDATE + amdgpu_crtc->crtc_offset, cur_lock);
1898 static void dce_v6_0_hide_cursor(struct drm_crtc *crtc)
1900 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1901 struct amdgpu_device *adev = crtc->dev->dev_private;
1903 WREG32_IDX(EVERGREEN_CUR_CONTROL + amdgpu_crtc->crtc_offset,
1904 EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT) |
1905 EVERGREEN_CURSOR_URGENT_CONTROL(EVERGREEN_CURSOR_URGENT_1_2));
1910 static void dce_v6_0_show_cursor(struct drm_crtc *crtc)
1912 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1913 struct amdgpu_device *adev = crtc->dev->dev_private;
1915 WREG32(EVERGREEN_CUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
1916 upper_32_bits(amdgpu_crtc->cursor_addr));
1917 WREG32(EVERGREEN_CUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
1918 lower_32_bits(amdgpu_crtc->cursor_addr));
1920 WREG32_IDX(EVERGREEN_CUR_CONTROL + amdgpu_crtc->crtc_offset,
1921 EVERGREEN_CURSOR_EN |
1922 EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT) |
1923 EVERGREEN_CURSOR_URGENT_CONTROL(EVERGREEN_CURSOR_URGENT_1_2));
1927 static int dce_v6_0_cursor_move_locked(struct drm_crtc *crtc,
1930 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1931 struct amdgpu_device *adev = crtc->dev->dev_private;
1932 int xorigin = 0, yorigin = 0;
1934 int w = amdgpu_crtc->cursor_width;
1936 /* avivo cursor are offset into the total surface */
1939 DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
1942 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
1946 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
1950 WREG32(EVERGREEN_CUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
1951 WREG32(EVERGREEN_CUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
1952 WREG32(EVERGREEN_CUR_SIZE + amdgpu_crtc->crtc_offset,
1953 ((w - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
1955 amdgpu_crtc->cursor_x = x;
1956 amdgpu_crtc->cursor_y = y;
1960 static int dce_v6_0_crtc_cursor_move(struct drm_crtc *crtc,
1965 dce_v6_0_lock_cursor(crtc, true);
1966 ret = dce_v6_0_cursor_move_locked(crtc, x, y);
1967 dce_v6_0_lock_cursor(crtc, false);
1972 static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc,
1973 struct drm_file *file_priv,
1980 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1981 struct drm_gem_object *obj;
1982 struct amdgpu_bo *aobj;
1986 /* turn off cursor */
1987 dce_v6_0_hide_cursor(crtc);
1992 if ((width > amdgpu_crtc->max_cursor_width) ||
1993 (height > amdgpu_crtc->max_cursor_height)) {
1994 DRM_ERROR("bad cursor width or height %d x %d\n", width, height);
1998 obj = drm_gem_object_lookup(file_priv, handle);
2000 DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, amdgpu_crtc->crtc_id);
2004 aobj = gem_to_amdgpu_bo(obj);
2005 ret = amdgpu_bo_reserve(aobj, false);
2007 drm_gem_object_unreference_unlocked(obj);
2011 ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM, &amdgpu_crtc->cursor_addr);
2012 amdgpu_bo_unreserve(aobj);
2014 DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
2015 drm_gem_object_unreference_unlocked(obj);
2019 amdgpu_crtc->cursor_width = width;
2020 amdgpu_crtc->cursor_height = height;
2022 dce_v6_0_lock_cursor(crtc, true);
2024 if (hot_x != amdgpu_crtc->cursor_hot_x ||
2025 hot_y != amdgpu_crtc->cursor_hot_y) {
2028 x = amdgpu_crtc->cursor_x + amdgpu_crtc->cursor_hot_x - hot_x;
2029 y = amdgpu_crtc->cursor_y + amdgpu_crtc->cursor_hot_y - hot_y;
2031 dce_v6_0_cursor_move_locked(crtc, x, y);
2033 amdgpu_crtc->cursor_hot_x = hot_x;
2034 amdgpu_crtc->cursor_hot_y = hot_y;
2037 dce_v6_0_show_cursor(crtc);
2038 dce_v6_0_lock_cursor(crtc, false);
2041 if (amdgpu_crtc->cursor_bo) {
2042 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
2043 ret = amdgpu_bo_reserve(aobj, false);
2044 if (likely(ret == 0)) {
2045 amdgpu_bo_unpin(aobj);
2046 amdgpu_bo_unreserve(aobj);
2048 drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo);
2051 amdgpu_crtc->cursor_bo = obj;
2055 static void dce_v6_0_cursor_reset(struct drm_crtc *crtc)
2057 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2059 if (amdgpu_crtc->cursor_bo) {
2060 dce_v6_0_lock_cursor(crtc, true);
2062 dce_v6_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
2063 amdgpu_crtc->cursor_y);
2065 dce_v6_0_show_cursor(crtc);
2066 dce_v6_0_lock_cursor(crtc, false);
2070 static int dce_v6_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
2071 u16 *blue, uint32_t size)
2073 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2076 /* userspace palettes are always correct as is */
2077 for (i = 0; i < size; i++) {
2078 amdgpu_crtc->lut_r[i] = red[i] >> 6;
2079 amdgpu_crtc->lut_g[i] = green[i] >> 6;
2080 amdgpu_crtc->lut_b[i] = blue[i] >> 6;
2082 dce_v6_0_crtc_load_lut(crtc);
2087 static void dce_v6_0_crtc_destroy(struct drm_crtc *crtc)
2089 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2091 drm_crtc_cleanup(crtc);
2095 static const struct drm_crtc_funcs dce_v6_0_crtc_funcs = {
2096 .cursor_set2 = dce_v6_0_crtc_cursor_set2,
2097 .cursor_move = dce_v6_0_crtc_cursor_move,
2098 .gamma_set = dce_v6_0_crtc_gamma_set,
2099 .set_config = amdgpu_crtc_set_config,
2100 .destroy = dce_v6_0_crtc_destroy,
2101 .page_flip_target = amdgpu_crtc_page_flip_target,
2104 static void dce_v6_0_crtc_dpms(struct drm_crtc *crtc, int mode)
2106 struct drm_device *dev = crtc->dev;
2107 struct amdgpu_device *adev = dev->dev_private;
2108 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2112 case DRM_MODE_DPMS_ON:
2113 amdgpu_crtc->enabled = true;
2114 amdgpu_atombios_crtc_enable(crtc, ATOM_ENABLE);
2115 amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
2116 /* Make sure VBLANK and PFLIP interrupts are still enabled */
2117 type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
2118 amdgpu_irq_update(adev, &adev->crtc_irq, type);
2119 amdgpu_irq_update(adev, &adev->pageflip_irq, type);
2120 drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id);
2121 dce_v6_0_crtc_load_lut(crtc);
2123 case DRM_MODE_DPMS_STANDBY:
2124 case DRM_MODE_DPMS_SUSPEND:
2125 case DRM_MODE_DPMS_OFF:
2126 drm_vblank_pre_modeset(dev, amdgpu_crtc->crtc_id);
2127 if (amdgpu_crtc->enabled)
2128 amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE);
2129 amdgpu_atombios_crtc_enable(crtc, ATOM_DISABLE);
2130 amdgpu_crtc->enabled = false;
2133 /* adjust pm to dpms */
2134 amdgpu_pm_compute_clocks(adev);
2137 static void dce_v6_0_crtc_prepare(struct drm_crtc *crtc)
2139 /* disable crtc pair power gating before programming */
2140 amdgpu_atombios_crtc_powergate(crtc, ATOM_DISABLE);
2141 amdgpu_atombios_crtc_lock(crtc, ATOM_ENABLE);
2142 dce_v6_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
2145 static void dce_v6_0_crtc_commit(struct drm_crtc *crtc)
2147 dce_v6_0_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
2148 amdgpu_atombios_crtc_lock(crtc, ATOM_DISABLE);
2151 static void dce_v6_0_crtc_disable(struct drm_crtc *crtc)
2154 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2155 struct drm_device *dev = crtc->dev;
2156 struct amdgpu_device *adev = dev->dev_private;
2157 struct amdgpu_atom_ss ss;
2160 dce_v6_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
2161 if (crtc->primary->fb) {
2163 struct amdgpu_framebuffer *amdgpu_fb;
2164 struct amdgpu_bo *abo;
2166 amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
2167 abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
2168 r = amdgpu_bo_reserve(abo, false);
2170 DRM_ERROR("failed to reserve abo before unpin\n");
2172 amdgpu_bo_unpin(abo);
2173 amdgpu_bo_unreserve(abo);
2176 /* disable the GRPH */
2177 dce_v6_0_grph_enable(crtc, false);
2179 amdgpu_atombios_crtc_powergate(crtc, ATOM_ENABLE);
2181 for (i = 0; i < adev->mode_info.num_crtc; i++) {
2182 if (adev->mode_info.crtcs[i] &&
2183 adev->mode_info.crtcs[i]->enabled &&
2184 i != amdgpu_crtc->crtc_id &&
2185 amdgpu_crtc->pll_id == adev->mode_info.crtcs[i]->pll_id) {
2186 /* one other crtc is using this pll don't turn
2193 switch (amdgpu_crtc->pll_id) {
2196 /* disable the ppll */
2197 amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id,
2198 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
2204 amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
2205 amdgpu_crtc->adjusted_clock = 0;
2206 amdgpu_crtc->encoder = NULL;
2207 amdgpu_crtc->connector = NULL;
2210 static int dce_v6_0_crtc_mode_set(struct drm_crtc *crtc,
2211 struct drm_display_mode *mode,
2212 struct drm_display_mode *adjusted_mode,
2213 int x, int y, struct drm_framebuffer *old_fb)
2215 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2217 if (!amdgpu_crtc->adjusted_clock)
2220 amdgpu_atombios_crtc_set_pll(crtc, adjusted_mode);
2221 amdgpu_atombios_crtc_set_dtd_timing(crtc, adjusted_mode);
2222 dce_v6_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
2223 amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode);
2224 amdgpu_atombios_crtc_scaler_setup(crtc);
2225 dce_v6_0_cursor_reset(crtc);
2226 /* update the hw version fpr dpm */
2227 amdgpu_crtc->hw_mode = *adjusted_mode;
2232 static bool dce_v6_0_crtc_mode_fixup(struct drm_crtc *crtc,
2233 const struct drm_display_mode *mode,
2234 struct drm_display_mode *adjusted_mode)
2237 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2238 struct drm_device *dev = crtc->dev;
2239 struct drm_encoder *encoder;
2241 /* assign the encoder to the amdgpu crtc to avoid repeated lookups later */
2242 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
2243 if (encoder->crtc == crtc) {
2244 amdgpu_crtc->encoder = encoder;
2245 amdgpu_crtc->connector = amdgpu_get_connector_for_encoder(encoder);
2249 if ((amdgpu_crtc->encoder == NULL) || (amdgpu_crtc->connector == NULL)) {
2250 amdgpu_crtc->encoder = NULL;
2251 amdgpu_crtc->connector = NULL;
2254 if (!amdgpu_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
2256 if (amdgpu_atombios_crtc_prepare_pll(crtc, adjusted_mode))
2259 amdgpu_crtc->pll_id = dce_v6_0_pick_pll(crtc);
2260 /* if we can't get a PPLL for a non-DP encoder, fail */
2261 if ((amdgpu_crtc->pll_id == ATOM_PPLL_INVALID) &&
2262 !ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder)))
2268 static int dce_v6_0_crtc_set_base(struct drm_crtc *crtc, int x, int y,
2269 struct drm_framebuffer *old_fb)
2271 return dce_v6_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
2274 static int dce_v6_0_crtc_set_base_atomic(struct drm_crtc *crtc,
2275 struct drm_framebuffer *fb,
2276 int x, int y, enum mode_set_atomic state)
2278 return dce_v6_0_crtc_do_set_base(crtc, fb, x, y, 1);
2281 static const struct drm_crtc_helper_funcs dce_v6_0_crtc_helper_funcs = {
2282 .dpms = dce_v6_0_crtc_dpms,
2283 .mode_fixup = dce_v6_0_crtc_mode_fixup,
2284 .mode_set = dce_v6_0_crtc_mode_set,
2285 .mode_set_base = dce_v6_0_crtc_set_base,
2286 .mode_set_base_atomic = dce_v6_0_crtc_set_base_atomic,
2287 .prepare = dce_v6_0_crtc_prepare,
2288 .commit = dce_v6_0_crtc_commit,
2289 .load_lut = dce_v6_0_crtc_load_lut,
2290 .disable = dce_v6_0_crtc_disable,
2293 static int dce_v6_0_crtc_init(struct amdgpu_device *adev, int index)
2295 struct amdgpu_crtc *amdgpu_crtc;
2298 amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) +
2299 (AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
2300 if (amdgpu_crtc == NULL)
2303 drm_crtc_init(adev->ddev, &amdgpu_crtc->base, &dce_v6_0_crtc_funcs);
2305 drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
2306 amdgpu_crtc->crtc_id = index;
2307 adev->mode_info.crtcs[index] = amdgpu_crtc;
2309 amdgpu_crtc->max_cursor_width = CURSOR_WIDTH;
2310 amdgpu_crtc->max_cursor_height = CURSOR_HEIGHT;
2311 adev->ddev->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
2312 adev->ddev->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
2314 for (i = 0; i < 256; i++) {
2315 amdgpu_crtc->lut_r[i] = i << 2;
2316 amdgpu_crtc->lut_g[i] = i << 2;
2317 amdgpu_crtc->lut_b[i] = i << 2;
2320 amdgpu_crtc->crtc_offset = crtc_offsets[amdgpu_crtc->crtc_id];
2322 amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
2323 amdgpu_crtc->adjusted_clock = 0;
2324 amdgpu_crtc->encoder = NULL;
2325 amdgpu_crtc->connector = NULL;
2326 drm_crtc_helper_add(&amdgpu_crtc->base, &dce_v6_0_crtc_helper_funcs);
2331 static int dce_v6_0_early_init(void *handle)
2333 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2335 adev->audio_endpt_rreg = &dce_v6_0_audio_endpt_rreg;
2336 adev->audio_endpt_wreg = &dce_v6_0_audio_endpt_wreg;
2338 dce_v6_0_set_display_funcs(adev);
2339 dce_v6_0_set_irq_funcs(adev);
2341 switch (adev->asic_type) {
2345 adev->mode_info.num_crtc = 6;
2346 adev->mode_info.num_hpd = 6;
2347 adev->mode_info.num_dig = 6;
2350 adev->mode_info.num_crtc = 2;
2351 adev->mode_info.num_hpd = 2;
2352 adev->mode_info.num_dig = 2;
2355 /* FIXME: not supported yet */
2362 static int dce_v6_0_sw_init(void *handle)
2366 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2368 for (i = 0; i < adev->mode_info.num_crtc; i++) {
2369 r = amdgpu_irq_add_id(adev, i + 1, &adev->crtc_irq);
2374 for (i = 8; i < 20; i += 2) {
2375 r = amdgpu_irq_add_id(adev, i, &adev->pageflip_irq);
2381 r = amdgpu_irq_add_id(adev, 42, &adev->hpd_irq);
2385 adev->mode_info.mode_config_initialized = true;
2387 adev->ddev->mode_config.funcs = &amdgpu_mode_funcs;
2388 adev->ddev->mode_config.async_page_flip = true;
2389 adev->ddev->mode_config.max_width = 16384;
2390 adev->ddev->mode_config.max_height = 16384;
2391 adev->ddev->mode_config.preferred_depth = 24;
2392 adev->ddev->mode_config.prefer_shadow = 1;
2393 adev->ddev->mode_config.fb_base = adev->mc.aper_base;
2395 r = amdgpu_modeset_create_props(adev);
2399 adev->ddev->mode_config.max_width = 16384;
2400 adev->ddev->mode_config.max_height = 16384;
2402 /* allocate crtcs */
2403 for (i = 0; i < adev->mode_info.num_crtc; i++) {
2404 r = dce_v6_0_crtc_init(adev, i);
2409 ret = amdgpu_atombios_get_connector_info_from_object_table(adev);
2411 amdgpu_print_display_setup(adev->ddev);
2416 r = dce_v6_0_afmt_init(adev);
2420 r = dce_v6_0_audio_init(adev);
2424 drm_kms_helper_poll_init(adev->ddev);
2429 static int dce_v6_0_sw_fini(void *handle)
2431 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2433 kfree(adev->mode_info.bios_hardcoded_edid);
2435 drm_kms_helper_poll_fini(adev->ddev);
2437 dce_v6_0_audio_fini(adev);
2438 dce_v6_0_afmt_fini(adev);
2440 drm_mode_config_cleanup(adev->ddev);
2441 adev->mode_info.mode_config_initialized = false;
2446 static int dce_v6_0_hw_init(void *handle)
2449 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2451 /* init dig PHYs, disp eng pll */
2452 amdgpu_atombios_encoder_init_dig(adev);
2453 amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk);
2455 /* initialize hpd */
2456 dce_v6_0_hpd_init(adev);
2458 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
2459 dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
2462 dce_v6_0_pageflip_interrupt_init(adev);
2467 static int dce_v6_0_hw_fini(void *handle)
2470 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2472 dce_v6_0_hpd_fini(adev);
2474 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
2475 dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
2478 dce_v6_0_pageflip_interrupt_fini(adev);
2483 static int dce_v6_0_suspend(void *handle)
2485 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2487 amdgpu_atombios_scratch_regs_save(adev);
2489 return dce_v6_0_hw_fini(handle);
2492 static int dce_v6_0_resume(void *handle)
2494 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2497 ret = dce_v6_0_hw_init(handle);
2499 amdgpu_atombios_scratch_regs_restore(adev);
2501 /* turn on the BL */
2502 if (adev->mode_info.bl_encoder) {
2503 u8 bl_level = amdgpu_display_backlight_get_level(adev,
2504 adev->mode_info.bl_encoder);
2505 amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder,
2512 static bool dce_v6_0_is_idle(void *handle)
2517 static int dce_v6_0_wait_for_idle(void *handle)
2522 static int dce_v6_0_soft_reset(void *handle)
2524 DRM_INFO("xxxx: dce_v6_0_soft_reset --- no impl!!\n");
2528 static void dce_v6_0_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
2530 enum amdgpu_interrupt_state state)
2532 u32 reg_block, interrupt_mask;
2534 if (crtc >= adev->mode_info.num_crtc) {
2535 DRM_DEBUG("invalid crtc %d\n", crtc);
2541 reg_block = SI_CRTC0_REGISTER_OFFSET;
2544 reg_block = SI_CRTC1_REGISTER_OFFSET;
2547 reg_block = SI_CRTC2_REGISTER_OFFSET;
2550 reg_block = SI_CRTC3_REGISTER_OFFSET;
2553 reg_block = SI_CRTC4_REGISTER_OFFSET;
2556 reg_block = SI_CRTC5_REGISTER_OFFSET;
2559 DRM_DEBUG("invalid crtc %d\n", crtc);
2564 case AMDGPU_IRQ_STATE_DISABLE:
2565 interrupt_mask = RREG32(INT_MASK + reg_block);
2566 interrupt_mask &= ~VBLANK_INT_MASK;
2567 WREG32(INT_MASK + reg_block, interrupt_mask);
2569 case AMDGPU_IRQ_STATE_ENABLE:
2570 interrupt_mask = RREG32(INT_MASK + reg_block);
2571 interrupt_mask |= VBLANK_INT_MASK;
2572 WREG32(INT_MASK + reg_block, interrupt_mask);
2579 static void dce_v6_0_set_crtc_vline_interrupt_state(struct amdgpu_device *adev,
2581 enum amdgpu_interrupt_state state)
2586 static int dce_v6_0_set_hpd_interrupt_state(struct amdgpu_device *adev,
2587 struct amdgpu_irq_src *src,
2589 enum amdgpu_interrupt_state state)
2591 u32 dc_hpd_int_cntl_reg, dc_hpd_int_cntl;
2595 dc_hpd_int_cntl_reg = DC_HPD1_INT_CONTROL;
2598 dc_hpd_int_cntl_reg = DC_HPD2_INT_CONTROL;
2601 dc_hpd_int_cntl_reg = DC_HPD3_INT_CONTROL;
2604 dc_hpd_int_cntl_reg = DC_HPD4_INT_CONTROL;
2607 dc_hpd_int_cntl_reg = DC_HPD5_INT_CONTROL;
2610 dc_hpd_int_cntl_reg = DC_HPD6_INT_CONTROL;
2613 DRM_DEBUG("invalid hdp %d\n", type);
2618 case AMDGPU_IRQ_STATE_DISABLE:
2619 dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg);
2620 dc_hpd_int_cntl &= ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
2621 WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl);
2623 case AMDGPU_IRQ_STATE_ENABLE:
2624 dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg);
2625 dc_hpd_int_cntl |= (DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
2626 WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl);
2635 static int dce_v6_0_set_crtc_interrupt_state(struct amdgpu_device *adev,
2636 struct amdgpu_irq_src *src,
2638 enum amdgpu_interrupt_state state)
2641 case AMDGPU_CRTC_IRQ_VBLANK1:
2642 dce_v6_0_set_crtc_vblank_interrupt_state(adev, 0, state);
2644 case AMDGPU_CRTC_IRQ_VBLANK2:
2645 dce_v6_0_set_crtc_vblank_interrupt_state(adev, 1, state);
2647 case AMDGPU_CRTC_IRQ_VBLANK3:
2648 dce_v6_0_set_crtc_vblank_interrupt_state(adev, 2, state);
2650 case AMDGPU_CRTC_IRQ_VBLANK4:
2651 dce_v6_0_set_crtc_vblank_interrupt_state(adev, 3, state);
2653 case AMDGPU_CRTC_IRQ_VBLANK5:
2654 dce_v6_0_set_crtc_vblank_interrupt_state(adev, 4, state);
2656 case AMDGPU_CRTC_IRQ_VBLANK6:
2657 dce_v6_0_set_crtc_vblank_interrupt_state(adev, 5, state);
2659 case AMDGPU_CRTC_IRQ_VLINE1:
2660 dce_v6_0_set_crtc_vline_interrupt_state(adev, 0, state);
2662 case AMDGPU_CRTC_IRQ_VLINE2:
2663 dce_v6_0_set_crtc_vline_interrupt_state(adev, 1, state);
2665 case AMDGPU_CRTC_IRQ_VLINE3:
2666 dce_v6_0_set_crtc_vline_interrupt_state(adev, 2, state);
2668 case AMDGPU_CRTC_IRQ_VLINE4:
2669 dce_v6_0_set_crtc_vline_interrupt_state(adev, 3, state);
2671 case AMDGPU_CRTC_IRQ_VLINE5:
2672 dce_v6_0_set_crtc_vline_interrupt_state(adev, 4, state);
2674 case AMDGPU_CRTC_IRQ_VLINE6:
2675 dce_v6_0_set_crtc_vline_interrupt_state(adev, 5, state);
2683 static int dce_v6_0_crtc_irq(struct amdgpu_device *adev,
2684 struct amdgpu_irq_src *source,
2685 struct amdgpu_iv_entry *entry)
2687 unsigned crtc = entry->src_id - 1;
2688 uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg);
2689 unsigned irq_type = amdgpu_crtc_idx_to_irq_type(adev, crtc);
2691 switch (entry->src_data) {
2692 case 0: /* vblank */
2693 if (disp_int & interrupt_status_offsets[crtc].vblank)
2694 WREG32(VBLANK_STATUS + crtc_offsets[crtc], VBLANK_ACK);
2696 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
2698 if (amdgpu_irq_enabled(adev, source, irq_type)) {
2699 drm_handle_vblank(adev->ddev, crtc);
2701 DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
2704 if (disp_int & interrupt_status_offsets[crtc].vline)
2705 WREG32(VLINE_STATUS + crtc_offsets[crtc], VLINE_ACK);
2707 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
2709 DRM_DEBUG("IH: D%d vline\n", crtc + 1);
2712 DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data);
2719 static int dce_v6_0_set_pageflip_interrupt_state(struct amdgpu_device *adev,
2720 struct amdgpu_irq_src *src,
2722 enum amdgpu_interrupt_state state)
2726 if (type >= adev->mode_info.num_crtc) {
2727 DRM_ERROR("invalid pageflip crtc %d\n", type);
2731 reg = RREG32(GRPH_INT_CONTROL + crtc_offsets[type]);
2732 if (state == AMDGPU_IRQ_STATE_DISABLE)
2733 WREG32(GRPH_INT_CONTROL + crtc_offsets[type],
2734 reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
2736 WREG32(GRPH_INT_CONTROL + crtc_offsets[type],
2737 reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
2742 static int dce_v6_0_pageflip_irq(struct amdgpu_device *adev,
2743 struct amdgpu_irq_src *source,
2744 struct amdgpu_iv_entry *entry)
2746 unsigned long flags;
2748 struct amdgpu_crtc *amdgpu_crtc;
2749 struct amdgpu_flip_work *works;
2751 crtc_id = (entry->src_id - 8) >> 1;
2752 amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
2754 if (crtc_id >= adev->mode_info.num_crtc) {
2755 DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
2759 if (RREG32(GRPH_INT_STATUS + crtc_offsets[crtc_id]) &
2760 GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK)
2761 WREG32(GRPH_INT_STATUS + crtc_offsets[crtc_id],
2762 GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK);
2764 /* IRQ could occur when in initial stage */
2765 if (amdgpu_crtc == NULL)
2768 spin_lock_irqsave(&adev->ddev->event_lock, flags);
2769 works = amdgpu_crtc->pflip_works;
2770 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
2771 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
2772 "AMDGPU_FLIP_SUBMITTED(%d)\n",
2773 amdgpu_crtc->pflip_status,
2774 AMDGPU_FLIP_SUBMITTED);
2775 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
2779 /* page flip completed. clean up */
2780 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
2781 amdgpu_crtc->pflip_works = NULL;
2783 /* wakeup usersapce */
2785 drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
2787 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
2789 drm_crtc_vblank_put(&amdgpu_crtc->base);
2790 schedule_work(&works->unpin_work);
2795 static int dce_v6_0_hpd_irq(struct amdgpu_device *adev,
2796 struct amdgpu_irq_src *source,
2797 struct amdgpu_iv_entry *entry)
2799 uint32_t disp_int, mask, int_control, tmp;
2802 if (entry->src_data >= adev->mode_info.num_hpd) {
2803 DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data);
2807 hpd = entry->src_data;
2808 disp_int = RREG32(interrupt_status_offsets[hpd].reg);
2809 mask = interrupt_status_offsets[hpd].hpd;
2810 int_control = hpd_int_control_offsets[hpd];
2812 if (disp_int & mask) {
2813 tmp = RREG32(int_control);
2814 tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK;
2815 WREG32(int_control, tmp);
2816 schedule_work(&adev->hotplug_work);
2817 DRM_INFO("IH: HPD%d\n", hpd + 1);
2824 static int dce_v6_0_set_clockgating_state(void *handle,
2825 enum amd_clockgating_state state)
2830 static int dce_v6_0_set_powergating_state(void *handle,
2831 enum amd_powergating_state state)
2836 const struct amd_ip_funcs dce_v6_0_ip_funcs = {
2838 .early_init = dce_v6_0_early_init,
2840 .sw_init = dce_v6_0_sw_init,
2841 .sw_fini = dce_v6_0_sw_fini,
2842 .hw_init = dce_v6_0_hw_init,
2843 .hw_fini = dce_v6_0_hw_fini,
2844 .suspend = dce_v6_0_suspend,
2845 .resume = dce_v6_0_resume,
2846 .is_idle = dce_v6_0_is_idle,
2847 .wait_for_idle = dce_v6_0_wait_for_idle,
2848 .soft_reset = dce_v6_0_soft_reset,
2849 .set_clockgating_state = dce_v6_0_set_clockgating_state,
2850 .set_powergating_state = dce_v6_0_set_powergating_state,
2854 dce_v6_0_encoder_mode_set(struct drm_encoder *encoder,
2855 struct drm_display_mode *mode,
2856 struct drm_display_mode *adjusted_mode)
2859 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
2861 amdgpu_encoder->pixel_clock = adjusted_mode->clock;
2863 /* need to call this here rather than in prepare() since we need some crtc info */
2864 amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
2866 /* set scaler clears this on some chips */
2867 dce_v6_0_set_interleave(encoder->crtc, mode);
2869 if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
2870 dce_v6_0_afmt_enable(encoder, true);
2871 dce_v6_0_afmt_setmode(encoder, adjusted_mode);
2875 static void dce_v6_0_encoder_prepare(struct drm_encoder *encoder)
2878 struct amdgpu_device *adev = encoder->dev->dev_private;
2879 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
2880 struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
2882 if ((amdgpu_encoder->active_device &
2883 (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
2884 (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) !=
2885 ENCODER_OBJECT_ID_NONE)) {
2886 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
2888 dig->dig_encoder = dce_v6_0_pick_dig_encoder(encoder);
2889 if (amdgpu_encoder->active_device & ATOM_DEVICE_DFP_SUPPORT)
2890 dig->afmt = adev->mode_info.afmt[dig->dig_encoder];
2894 amdgpu_atombios_scratch_regs_lock(adev, true);
2897 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
2899 /* select the clock/data port if it uses a router */
2900 if (amdgpu_connector->router.cd_valid)
2901 amdgpu_i2c_router_select_cd_port(amdgpu_connector);
2903 /* turn eDP panel on for mode set */
2904 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
2905 amdgpu_atombios_encoder_set_edp_panel_power(connector,
2906 ATOM_TRANSMITTER_ACTION_POWER_ON);
2909 /* this is needed for the pll/ss setup to work correctly in some cases */
2910 amdgpu_atombios_encoder_set_crtc_source(encoder);
2911 /* set up the FMT blocks */
2912 dce_v6_0_program_fmt(encoder);
2915 static void dce_v6_0_encoder_commit(struct drm_encoder *encoder)
2918 struct drm_device *dev = encoder->dev;
2919 struct amdgpu_device *adev = dev->dev_private;
2921 /* need to call this here as we need the crtc set up */
2922 amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
2923 amdgpu_atombios_scratch_regs_lock(adev, false);
2926 static void dce_v6_0_encoder_disable(struct drm_encoder *encoder)
2929 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
2930 struct amdgpu_encoder_atom_dig *dig;
2932 amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
2934 if (amdgpu_atombios_encoder_is_digital(encoder)) {
2935 if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
2936 dce_v6_0_afmt_enable(encoder, false);
2937 dig = amdgpu_encoder->enc_priv;
2938 dig->dig_encoder = -1;
2940 amdgpu_encoder->active_device = 0;
2943 /* these are handled by the primary encoders */
2944 static void dce_v6_0_ext_prepare(struct drm_encoder *encoder)
2949 static void dce_v6_0_ext_commit(struct drm_encoder *encoder)
2955 dce_v6_0_ext_mode_set(struct drm_encoder *encoder,
2956 struct drm_display_mode *mode,
2957 struct drm_display_mode *adjusted_mode)
2962 static void dce_v6_0_ext_disable(struct drm_encoder *encoder)
2968 dce_v6_0_ext_dpms(struct drm_encoder *encoder, int mode)
2973 static bool dce_v6_0_ext_mode_fixup(struct drm_encoder *encoder,
2974 const struct drm_display_mode *mode,
2975 struct drm_display_mode *adjusted_mode)
2980 static const struct drm_encoder_helper_funcs dce_v6_0_ext_helper_funcs = {
2981 .dpms = dce_v6_0_ext_dpms,
2982 .mode_fixup = dce_v6_0_ext_mode_fixup,
2983 .prepare = dce_v6_0_ext_prepare,
2984 .mode_set = dce_v6_0_ext_mode_set,
2985 .commit = dce_v6_0_ext_commit,
2986 .disable = dce_v6_0_ext_disable,
2987 /* no detect for TMDS/LVDS yet */
2990 static const struct drm_encoder_helper_funcs dce_v6_0_dig_helper_funcs = {
2991 .dpms = amdgpu_atombios_encoder_dpms,
2992 .mode_fixup = amdgpu_atombios_encoder_mode_fixup,
2993 .prepare = dce_v6_0_encoder_prepare,
2994 .mode_set = dce_v6_0_encoder_mode_set,
2995 .commit = dce_v6_0_encoder_commit,
2996 .disable = dce_v6_0_encoder_disable,
2997 .detect = amdgpu_atombios_encoder_dig_detect,
3000 static const struct drm_encoder_helper_funcs dce_v6_0_dac_helper_funcs = {
3001 .dpms = amdgpu_atombios_encoder_dpms,
3002 .mode_fixup = amdgpu_atombios_encoder_mode_fixup,
3003 .prepare = dce_v6_0_encoder_prepare,
3004 .mode_set = dce_v6_0_encoder_mode_set,
3005 .commit = dce_v6_0_encoder_commit,
3006 .detect = amdgpu_atombios_encoder_dac_detect,
3009 static void dce_v6_0_encoder_destroy(struct drm_encoder *encoder)
3011 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3012 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
3013 amdgpu_atombios_encoder_fini_backlight(amdgpu_encoder);
3014 kfree(amdgpu_encoder->enc_priv);
3015 drm_encoder_cleanup(encoder);
3016 kfree(amdgpu_encoder);
3019 static const struct drm_encoder_funcs dce_v6_0_encoder_funcs = {
3020 .destroy = dce_v6_0_encoder_destroy,
3023 static void dce_v6_0_encoder_add(struct amdgpu_device *adev,
3024 uint32_t encoder_enum,
3025 uint32_t supported_device,
3028 struct drm_device *dev = adev->ddev;
3029 struct drm_encoder *encoder;
3030 struct amdgpu_encoder *amdgpu_encoder;
3032 /* see if we already added it */
3033 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
3034 amdgpu_encoder = to_amdgpu_encoder(encoder);
3035 if (amdgpu_encoder->encoder_enum == encoder_enum) {
3036 amdgpu_encoder->devices |= supported_device;
3043 amdgpu_encoder = kzalloc(sizeof(struct amdgpu_encoder), GFP_KERNEL);
3044 if (!amdgpu_encoder)
3047 encoder = &amdgpu_encoder->base;
3048 switch (adev->mode_info.num_crtc) {
3050 encoder->possible_crtcs = 0x1;
3054 encoder->possible_crtcs = 0x3;
3057 encoder->possible_crtcs = 0xf;
3060 encoder->possible_crtcs = 0x3f;
3064 amdgpu_encoder->enc_priv = NULL;
3065 amdgpu_encoder->encoder_enum = encoder_enum;
3066 amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
3067 amdgpu_encoder->devices = supported_device;
3068 amdgpu_encoder->rmx_type = RMX_OFF;
3069 amdgpu_encoder->underscan_type = UNDERSCAN_OFF;
3070 amdgpu_encoder->is_ext_encoder = false;
3071 amdgpu_encoder->caps = caps;
3073 switch (amdgpu_encoder->encoder_id) {
3074 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
3075 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
3076 drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3077 DRM_MODE_ENCODER_DAC, NULL);
3078 drm_encoder_helper_add(encoder, &dce_v6_0_dac_helper_funcs);
3080 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
3081 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
3082 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
3083 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
3084 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
3085 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
3086 amdgpu_encoder->rmx_type = RMX_FULL;
3087 drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3088 DRM_MODE_ENCODER_LVDS, NULL);
3089 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder);
3090 } else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) {
3091 drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3092 DRM_MODE_ENCODER_DAC, NULL);
3093 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
3095 drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3096 DRM_MODE_ENCODER_TMDS, NULL);
3097 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
3099 drm_encoder_helper_add(encoder, &dce_v6_0_dig_helper_funcs);
3101 case ENCODER_OBJECT_ID_SI170B:
3102 case ENCODER_OBJECT_ID_CH7303:
3103 case ENCODER_OBJECT_ID_EXTERNAL_SDVOA:
3104 case ENCODER_OBJECT_ID_EXTERNAL_SDVOB:
3105 case ENCODER_OBJECT_ID_TITFP513:
3106 case ENCODER_OBJECT_ID_VT1623:
3107 case ENCODER_OBJECT_ID_HDMI_SI1930:
3108 case ENCODER_OBJECT_ID_TRAVIS:
3109 case ENCODER_OBJECT_ID_NUTMEG:
3110 /* these are handled by the primary encoders */
3111 amdgpu_encoder->is_ext_encoder = true;
3112 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
3113 drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3114 DRM_MODE_ENCODER_LVDS, NULL);
3115 else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT))
3116 drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3117 DRM_MODE_ENCODER_DAC, NULL);
3119 drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3120 DRM_MODE_ENCODER_TMDS, NULL);
3121 drm_encoder_helper_add(encoder, &dce_v6_0_ext_helper_funcs);
3126 static const struct amdgpu_display_funcs dce_v6_0_display_funcs = {
3127 .set_vga_render_state = &dce_v6_0_set_vga_render_state,
3128 .bandwidth_update = &dce_v6_0_bandwidth_update,
3129 .vblank_get_counter = &dce_v6_0_vblank_get_counter,
3130 .vblank_wait = &dce_v6_0_vblank_wait,
3131 .is_display_hung = &dce_v6_0_is_display_hung,
3132 .backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level,
3133 .backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level,
3134 .hpd_sense = &dce_v6_0_hpd_sense,
3135 .hpd_set_polarity = &dce_v6_0_hpd_set_polarity,
3136 .hpd_get_gpio_reg = &dce_v6_0_hpd_get_gpio_reg,
3137 .page_flip = &dce_v6_0_page_flip,
3138 .page_flip_get_scanoutpos = &dce_v6_0_crtc_get_scanoutpos,
3139 .add_encoder = &dce_v6_0_encoder_add,
3140 .add_connector = &amdgpu_connector_add,
3141 .stop_mc_access = &dce_v6_0_stop_mc_access,
3142 .resume_mc_access = &dce_v6_0_resume_mc_access,
3145 static void dce_v6_0_set_display_funcs(struct amdgpu_device *adev)
3147 if (adev->mode_info.funcs == NULL)
3148 adev->mode_info.funcs = &dce_v6_0_display_funcs;
3151 static const struct amdgpu_irq_src_funcs dce_v6_0_crtc_irq_funcs = {
3152 .set = dce_v6_0_set_crtc_interrupt_state,
3153 .process = dce_v6_0_crtc_irq,
3156 static const struct amdgpu_irq_src_funcs dce_v6_0_pageflip_irq_funcs = {
3157 .set = dce_v6_0_set_pageflip_interrupt_state,
3158 .process = dce_v6_0_pageflip_irq,
3161 static const struct amdgpu_irq_src_funcs dce_v6_0_hpd_irq_funcs = {
3162 .set = dce_v6_0_set_hpd_interrupt_state,
3163 .process = dce_v6_0_hpd_irq,
3166 static void dce_v6_0_set_irq_funcs(struct amdgpu_device *adev)
3168 adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_LAST;
3169 adev->crtc_irq.funcs = &dce_v6_0_crtc_irq_funcs;
3171 adev->pageflip_irq.num_types = AMDGPU_PAGEFLIP_IRQ_LAST;
3172 adev->pageflip_irq.funcs = &dce_v6_0_pageflip_irq_funcs;
3174 adev->hpd_irq.num_types = AMDGPU_HPD_LAST;
3175 adev->hpd_irq.funcs = &dce_v6_0_hpd_irq_funcs;