2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Keith Packard <keithp@keithp.com>
28 #include <linux/i2c.h>
29 #include <linux/slab.h>
30 #include <linux/export.h>
31 #include <linux/notifier.h>
32 #include <linux/reboot.h>
34 #include <drm/drm_atomic_helper.h>
35 #include <drm/drm_crtc.h>
36 #include <drm/drm_crtc_helper.h>
37 #include <drm/drm_edid.h>
38 #include "intel_drv.h"
39 #include <drm/i915_drm.h>
42 #define DP_LINK_CHECK_TIMEOUT (10 * 1000)
44 /* Compliance test status bits */
45 #define INTEL_DP_RESOLUTION_SHIFT_MASK 0
46 #define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
47 #define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
48 #define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
55 static const struct dp_link_dpll gen4_dpll[] = {
57 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
59 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
62 static const struct dp_link_dpll pch_dpll[] = {
64 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
66 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
69 static const struct dp_link_dpll vlv_dpll[] = {
71 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
73 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
77 * CHV supports eDP 1.4 that have more link rates.
78 * Below only provides the fixed rate but exclude variable rate.
80 static const struct dp_link_dpll chv_dpll[] = {
82 * CHV requires to program fractional division for m2.
83 * m2 is stored in fixed point format using formula below
84 * (m2_int << 22) | m2_fraction
86 { 162000, /* m2_int = 32, m2_fraction = 1677722 */
87 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
88 { 270000, /* m2_int = 27, m2_fraction = 0 */
89 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
90 { 540000, /* m2_int = 27, m2_fraction = 0 */
91 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
94 static const int bxt_rates[] = { 162000, 216000, 243000, 270000,
95 324000, 432000, 540000 };
96 static const int skl_rates[] = { 162000, 216000, 270000,
97 324000, 432000, 540000 };
98 static const int default_rates[] = { 162000, 270000, 540000 };
101 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
102 * @intel_dp: DP struct
104 * If a CPU or PCH DP output is attached to an eDP panel, this function
105 * will return true, and false otherwise.
107 static bool is_edp(struct intel_dp *intel_dp)
109 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
111 return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
114 static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
116 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
118 return intel_dig_port->base.base.dev;
121 static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
123 return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
126 static void intel_dp_link_down(struct intel_dp *intel_dp);
127 static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
128 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
129 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
130 static void vlv_steal_power_sequencer(struct drm_device *dev,
133 static unsigned int intel_dp_unused_lane_mask(int lane_count)
135 return ~((1 << lane_count) - 1) & 0xf;
139 intel_dp_max_link_bw(struct intel_dp *intel_dp)
141 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
143 switch (max_link_bw) {
144 case DP_LINK_BW_1_62:
149 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
151 max_link_bw = DP_LINK_BW_1_62;
157 static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
159 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
160 struct drm_device *dev = intel_dig_port->base.base.dev;
161 u8 source_max, sink_max;
164 if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
165 (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
168 sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
170 return min(source_max, sink_max);
174 * The units on the numbers in the next two are... bizarre. Examples will
175 * make it clearer; this one parallels an example in the eDP spec.
177 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
179 * 270000 * 1 * 8 / 10 == 216000
181 * The actual data capacity of that configuration is 2.16Gbit/s, so the
182 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz -
183 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
184 * 119000. At 18bpp that's 2142000 kilobits per second.
186 * Thus the strange-looking division by 10 in intel_dp_link_required, to
187 * get the result in decakilobits instead of kilobits.
191 intel_dp_link_required(int pixel_clock, int bpp)
193 return (pixel_clock * bpp + 9) / 10;
197 intel_dp_max_data_rate(int max_link_clock, int max_lanes)
199 return (max_link_clock * max_lanes * 8) / 10;
202 static enum drm_mode_status
203 intel_dp_mode_valid(struct drm_connector *connector,
204 struct drm_display_mode *mode)
206 struct intel_dp *intel_dp = intel_attached_dp(connector);
207 struct intel_connector *intel_connector = to_intel_connector(connector);
208 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
209 int target_clock = mode->clock;
210 int max_rate, mode_rate, max_lanes, max_link_clock;
212 if (is_edp(intel_dp) && fixed_mode) {
213 if (mode->hdisplay > fixed_mode->hdisplay)
216 if (mode->vdisplay > fixed_mode->vdisplay)
219 target_clock = fixed_mode->clock;
222 max_link_clock = intel_dp_max_link_rate(intel_dp);
223 max_lanes = intel_dp_max_lane_count(intel_dp);
225 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
226 mode_rate = intel_dp_link_required(target_clock, 18);
228 if (mode_rate > max_rate)
229 return MODE_CLOCK_HIGH;
231 if (mode->clock < 10000)
232 return MODE_CLOCK_LOW;
234 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
235 return MODE_H_ILLEGAL;
240 uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
247 for (i = 0; i < src_bytes; i++)
248 v |= ((uint32_t) src[i]) << ((3-i) * 8);
252 static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
257 for (i = 0; i < dst_bytes; i++)
258 dst[i] = src >> ((3-i) * 8);
262 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
263 struct intel_dp *intel_dp);
265 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
266 struct intel_dp *intel_dp);
268 static void pps_lock(struct intel_dp *intel_dp)
270 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
271 struct intel_encoder *encoder = &intel_dig_port->base;
272 struct drm_device *dev = encoder->base.dev;
273 struct drm_i915_private *dev_priv = dev->dev_private;
274 enum intel_display_power_domain power_domain;
277 * See vlv_power_sequencer_reset() why we need
278 * a power domain reference here.
280 power_domain = intel_display_port_power_domain(encoder);
281 intel_display_power_get(dev_priv, power_domain);
283 mutex_lock(&dev_priv->pps_mutex);
286 static void pps_unlock(struct intel_dp *intel_dp)
288 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
289 struct intel_encoder *encoder = &intel_dig_port->base;
290 struct drm_device *dev = encoder->base.dev;
291 struct drm_i915_private *dev_priv = dev->dev_private;
292 enum intel_display_power_domain power_domain;
294 mutex_unlock(&dev_priv->pps_mutex);
296 power_domain = intel_display_port_power_domain(encoder);
297 intel_display_power_put(dev_priv, power_domain);
301 vlv_power_sequencer_kick(struct intel_dp *intel_dp)
303 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
304 struct drm_device *dev = intel_dig_port->base.base.dev;
305 struct drm_i915_private *dev_priv = dev->dev_private;
306 enum pipe pipe = intel_dp->pps_pipe;
307 bool pll_enabled, release_cl_override = false;
308 enum dpio_phy phy = DPIO_PHY(pipe);
309 enum dpio_channel ch = vlv_pipe_to_channel(pipe);
312 if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
313 "skipping pipe %c power seqeuncer kick due to port %c being active\n",
314 pipe_name(pipe), port_name(intel_dig_port->port)))
317 DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
318 pipe_name(pipe), port_name(intel_dig_port->port));
320 /* Preserve the BIOS-computed detected bit. This is
321 * supposed to be read-only.
323 DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
324 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
325 DP |= DP_PORT_WIDTH(1);
326 DP |= DP_LINK_TRAIN_PAT_1;
328 if (IS_CHERRYVIEW(dev))
329 DP |= DP_PIPE_SELECT_CHV(pipe);
330 else if (pipe == PIPE_B)
331 DP |= DP_PIPEB_SELECT;
333 pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
336 * The DPLL for the pipe must be enabled for this to work.
337 * So enable temporarily it if it's not already enabled.
340 release_cl_override = IS_CHERRYVIEW(dev) &&
341 !chv_phy_powergate_ch(dev_priv, phy, ch, true);
343 vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
344 &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
348 * Similar magic as in intel_dp_enable_port().
349 * We _must_ do this port enable + disable trick
350 * to make this power seqeuencer lock onto the port.
351 * Otherwise even VDD force bit won't work.
353 I915_WRITE(intel_dp->output_reg, DP);
354 POSTING_READ(intel_dp->output_reg);
356 I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
357 POSTING_READ(intel_dp->output_reg);
359 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
360 POSTING_READ(intel_dp->output_reg);
363 vlv_force_pll_off(dev, pipe);
365 if (release_cl_override)
366 chv_phy_powergate_ch(dev_priv, phy, ch, false);
371 vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
373 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
374 struct drm_device *dev = intel_dig_port->base.base.dev;
375 struct drm_i915_private *dev_priv = dev->dev_private;
376 struct intel_encoder *encoder;
377 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
380 lockdep_assert_held(&dev_priv->pps_mutex);
382 /* We should never land here with regular DP ports */
383 WARN_ON(!is_edp(intel_dp));
385 if (intel_dp->pps_pipe != INVALID_PIPE)
386 return intel_dp->pps_pipe;
389 * We don't have power sequencer currently.
390 * Pick one that's not used by other ports.
392 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
394 struct intel_dp *tmp;
396 if (encoder->type != INTEL_OUTPUT_EDP)
399 tmp = enc_to_intel_dp(&encoder->base);
401 if (tmp->pps_pipe != INVALID_PIPE)
402 pipes &= ~(1 << tmp->pps_pipe);
406 * Didn't find one. This should not happen since there
407 * are two power sequencers and up to two eDP ports.
409 if (WARN_ON(pipes == 0))
412 pipe = ffs(pipes) - 1;
414 vlv_steal_power_sequencer(dev, pipe);
415 intel_dp->pps_pipe = pipe;
417 DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
418 pipe_name(intel_dp->pps_pipe),
419 port_name(intel_dig_port->port));
421 /* init power sequencer on this pipe and port */
422 intel_dp_init_panel_power_sequencer(dev, intel_dp);
423 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
426 * Even vdd force doesn't work until we've made
427 * the power sequencer lock in on the port.
429 vlv_power_sequencer_kick(intel_dp);
431 return intel_dp->pps_pipe;
434 typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
437 static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
440 return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
443 static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
446 return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
449 static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
456 vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
458 vlv_pipe_check pipe_check)
462 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
463 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
464 PANEL_PORT_SELECT_MASK;
466 if (port_sel != PANEL_PORT_SELECT_VLV(port))
469 if (!pipe_check(dev_priv, pipe))
479 vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
481 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
482 struct drm_device *dev = intel_dig_port->base.base.dev;
483 struct drm_i915_private *dev_priv = dev->dev_private;
484 enum port port = intel_dig_port->port;
486 lockdep_assert_held(&dev_priv->pps_mutex);
488 /* try to find a pipe with this port selected */
489 /* first pick one where the panel is on */
490 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
492 /* didn't find one? pick one where vdd is on */
493 if (intel_dp->pps_pipe == INVALID_PIPE)
494 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
495 vlv_pipe_has_vdd_on);
496 /* didn't find one? pick one with just the correct port */
497 if (intel_dp->pps_pipe == INVALID_PIPE)
498 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
501 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
502 if (intel_dp->pps_pipe == INVALID_PIPE) {
503 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
508 DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
509 port_name(port), pipe_name(intel_dp->pps_pipe));
511 intel_dp_init_panel_power_sequencer(dev, intel_dp);
512 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
515 void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
517 struct drm_device *dev = dev_priv->dev;
518 struct intel_encoder *encoder;
520 if (WARN_ON(!IS_VALLEYVIEW(dev)))
524 * We can't grab pps_mutex here due to deadlock with power_domain
525 * mutex when power_domain functions are called while holding pps_mutex.
526 * That also means that in order to use pps_pipe the code needs to
527 * hold both a power domain reference and pps_mutex, and the power domain
528 * reference get/put must be done while _not_ holding pps_mutex.
529 * pps_{lock,unlock}() do these steps in the correct order, so one
530 * should use them always.
533 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
534 struct intel_dp *intel_dp;
536 if (encoder->type != INTEL_OUTPUT_EDP)
539 intel_dp = enc_to_intel_dp(&encoder->base);
540 intel_dp->pps_pipe = INVALID_PIPE;
544 static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
546 struct drm_device *dev = intel_dp_to_dev(intel_dp);
549 return BXT_PP_CONTROL(0);
550 else if (HAS_PCH_SPLIT(dev))
551 return PCH_PP_CONTROL;
553 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
556 static u32 _pp_stat_reg(struct intel_dp *intel_dp)
558 struct drm_device *dev = intel_dp_to_dev(intel_dp);
561 return BXT_PP_STATUS(0);
562 else if (HAS_PCH_SPLIT(dev))
563 return PCH_PP_STATUS;
565 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
568 /* Reboot notifier handler to shutdown panel power to guarantee T12 timing
569 This function only applicable when panel PM state is not to be tracked */
570 static int edp_notify_handler(struct notifier_block *this, unsigned long code,
573 struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
575 struct drm_device *dev = intel_dp_to_dev(intel_dp);
576 struct drm_i915_private *dev_priv = dev->dev_private;
578 if (!is_edp(intel_dp) || code != SYS_RESTART)
583 if (IS_VALLEYVIEW(dev)) {
584 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
585 u32 pp_ctrl_reg, pp_div_reg;
588 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
589 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
590 pp_div = I915_READ(pp_div_reg);
591 pp_div &= PP_REFERENCE_DIVIDER_MASK;
593 /* 0x1F write to PP_DIV_REG sets max cycle delay */
594 I915_WRITE(pp_div_reg, pp_div | 0x1F);
595 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
596 msleep(intel_dp->panel_power_cycle_delay);
599 pps_unlock(intel_dp);
604 static bool edp_have_panel_power(struct intel_dp *intel_dp)
606 struct drm_device *dev = intel_dp_to_dev(intel_dp);
607 struct drm_i915_private *dev_priv = dev->dev_private;
609 lockdep_assert_held(&dev_priv->pps_mutex);
611 if (IS_VALLEYVIEW(dev) &&
612 intel_dp->pps_pipe == INVALID_PIPE)
615 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
618 static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
620 struct drm_device *dev = intel_dp_to_dev(intel_dp);
621 struct drm_i915_private *dev_priv = dev->dev_private;
623 lockdep_assert_held(&dev_priv->pps_mutex);
625 if (IS_VALLEYVIEW(dev) &&
626 intel_dp->pps_pipe == INVALID_PIPE)
629 return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
633 intel_dp_check_edp(struct intel_dp *intel_dp)
635 struct drm_device *dev = intel_dp_to_dev(intel_dp);
636 struct drm_i915_private *dev_priv = dev->dev_private;
638 if (!is_edp(intel_dp))
641 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
642 WARN(1, "eDP powered off while attempting aux channel communication.\n");
643 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
644 I915_READ(_pp_stat_reg(intel_dp)),
645 I915_READ(_pp_ctrl_reg(intel_dp)));
650 intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
652 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
653 struct drm_device *dev = intel_dig_port->base.base.dev;
654 struct drm_i915_private *dev_priv = dev->dev_private;
655 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
659 #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
661 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
662 msecs_to_jiffies_timeout(10));
664 done = wait_for_atomic(C, 10) == 0;
666 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
673 static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
675 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
676 struct drm_device *dev = intel_dig_port->base.base.dev;
679 * The clock divider is based off the hrawclk, and would like to run at
680 * 2MHz. So, take the hrawclk value and divide by 2 and use that
682 return index ? 0 : intel_hrawclk(dev) / 2;
685 static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
687 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
688 struct drm_device *dev = intel_dig_port->base.base.dev;
689 struct drm_i915_private *dev_priv = dev->dev_private;
694 if (intel_dig_port->port == PORT_A) {
695 return DIV_ROUND_UP(dev_priv->cdclk_freq, 2000);
698 return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
702 static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
704 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
705 struct drm_device *dev = intel_dig_port->base.base.dev;
706 struct drm_i915_private *dev_priv = dev->dev_private;
708 if (intel_dig_port->port == PORT_A) {
711 return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
712 } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
713 /* Workaround for non-ULT HSW */
720 return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
724 static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
726 return index ? 0 : 100;
729 static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
732 * SKL doesn't need us to program the AUX clock divider (Hardware will
733 * derive the clock from CDCLK automatically). We still implement the
734 * get_aux_clock_divider vfunc to plug-in into the existing code.
736 return index ? 0 : 1;
739 static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
742 uint32_t aux_clock_divider)
744 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
745 struct drm_device *dev = intel_dig_port->base.base.dev;
746 uint32_t precharge, timeout;
753 if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL)
754 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
756 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
758 return DP_AUX_CH_CTL_SEND_BUSY |
760 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
761 DP_AUX_CH_CTL_TIME_OUT_ERROR |
763 DP_AUX_CH_CTL_RECEIVE_ERROR |
764 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
765 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
766 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
769 static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
774 return DP_AUX_CH_CTL_SEND_BUSY |
776 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
777 DP_AUX_CH_CTL_TIME_OUT_ERROR |
778 DP_AUX_CH_CTL_TIME_OUT_1600us |
779 DP_AUX_CH_CTL_RECEIVE_ERROR |
780 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
781 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
785 intel_dp_aux_ch(struct intel_dp *intel_dp,
786 const uint8_t *send, int send_bytes,
787 uint8_t *recv, int recv_size)
789 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
790 struct drm_device *dev = intel_dig_port->base.base.dev;
791 struct drm_i915_private *dev_priv = dev->dev_private;
792 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
793 uint32_t ch_data = ch_ctl + 4;
794 uint32_t aux_clock_divider;
795 int i, ret, recv_bytes;
798 bool has_aux_irq = HAS_AUX_IRQ(dev);
804 * We will be called with VDD already enabled for dpcd/edid/oui reads.
805 * In such cases we want to leave VDD enabled and it's up to upper layers
806 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
809 vdd = edp_panel_vdd_on(intel_dp);
811 /* dp aux is extremely sensitive to irq latency, hence request the
812 * lowest possible wakeup latency and so prevent the cpu from going into
815 pm_qos_update_request(&dev_priv->pm_qos, 0);
817 intel_dp_check_edp(intel_dp);
819 intel_aux_display_runtime_get(dev_priv);
821 /* Try to wait for any previous AUX channel activity */
822 for (try = 0; try < 3; try++) {
823 status = I915_READ_NOTRACE(ch_ctl);
824 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
830 static u32 last_status = -1;
831 const u32 status = I915_READ(ch_ctl);
833 if (status != last_status) {
834 WARN(1, "dp_aux_ch not started status 0x%08x\n",
836 last_status = status;
843 /* Only 5 data registers! */
844 if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
849 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
850 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
855 /* Must try at least 3 times according to DP spec */
856 for (try = 0; try < 5; try++) {
857 /* Load the send data into the aux channel data registers */
858 for (i = 0; i < send_bytes; i += 4)
859 I915_WRITE(ch_data + i,
860 intel_dp_pack_aux(send + i,
863 /* Send the command and wait for it to complete */
864 I915_WRITE(ch_ctl, send_ctl);
866 status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
868 /* Clear done status and any errors */
872 DP_AUX_CH_CTL_TIME_OUT_ERROR |
873 DP_AUX_CH_CTL_RECEIVE_ERROR);
875 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
878 /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
879 * 400us delay required for errors and timeouts
880 * Timeout errors from the HW already meet this
881 * requirement so skip to next iteration
883 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
884 usleep_range(400, 500);
887 if (status & DP_AUX_CH_CTL_DONE)
892 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
893 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
899 /* Check for timeout or receive error.
900 * Timeouts occur when the sink is not connected
902 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
903 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
908 /* Timeouts occur when the device isn't connected, so they're
909 * "normal" -- don't fill the kernel log with these */
910 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
911 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
916 /* Unload any bytes sent back from the other side */
917 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
918 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
919 if (recv_bytes > recv_size)
920 recv_bytes = recv_size;
922 for (i = 0; i < recv_bytes; i += 4)
923 intel_dp_unpack_aux(I915_READ(ch_data + i),
924 recv + i, recv_bytes - i);
928 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
929 intel_aux_display_runtime_put(dev_priv);
932 edp_panel_vdd_off(intel_dp, false);
934 pps_unlock(intel_dp);
939 #define BARE_ADDRESS_SIZE 3
940 #define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
942 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
944 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
945 uint8_t txbuf[20], rxbuf[20];
946 size_t txsize, rxsize;
949 txbuf[0] = (msg->request << 4) |
950 ((msg->address >> 16) & 0xf);
951 txbuf[1] = (msg->address >> 8) & 0xff;
952 txbuf[2] = msg->address & 0xff;
953 txbuf[3] = msg->size - 1;
955 switch (msg->request & ~DP_AUX_I2C_MOT) {
956 case DP_AUX_NATIVE_WRITE:
957 case DP_AUX_I2C_WRITE:
958 case DP_AUX_I2C_WRITE_STATUS_UPDATE:
959 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
960 rxsize = 2; /* 0 or 1 data bytes */
962 if (WARN_ON(txsize > 20))
965 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
967 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
969 msg->reply = rxbuf[0] >> 4;
972 /* Number of bytes written in a short write. */
973 ret = clamp_t(int, rxbuf[1], 0, msg->size);
975 /* Return payload size. */
981 case DP_AUX_NATIVE_READ:
982 case DP_AUX_I2C_READ:
983 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
984 rxsize = msg->size + 1;
986 if (WARN_ON(rxsize > 20))
989 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
991 msg->reply = rxbuf[0] >> 4;
993 * Assume happy day, and copy the data. The caller is
994 * expected to check msg->reply before touching it.
996 * Return payload size.
999 memcpy(msg->buffer, rxbuf + 1, ret);
1012 intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
1014 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1015 struct drm_i915_private *dev_priv = dev->dev_private;
1016 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1017 enum port port = intel_dig_port->port;
1018 struct ddi_vbt_port_info *info = &dev_priv->vbt.ddi_port_info[port];
1019 const char *name = NULL;
1020 uint32_t porte_aux_ctl_reg = DPA_AUX_CH_CTL;
1023 /* On SKL we don't have Aux for port E so we rely on VBT to set
1024 * a proper alternate aux channel.
1026 if ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) && port == PORT_E) {
1027 switch (info->alternate_aux_channel) {
1029 porte_aux_ctl_reg = DPB_AUX_CH_CTL;
1032 porte_aux_ctl_reg = DPC_AUX_CH_CTL;
1035 porte_aux_ctl_reg = DPD_AUX_CH_CTL;
1039 porte_aux_ctl_reg = DPA_AUX_CH_CTL;
1045 intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
1049 intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
1053 intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
1057 intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
1061 intel_dp->aux_ch_ctl_reg = porte_aux_ctl_reg;
1069 * The AUX_CTL register is usually DP_CTL + 0x10.
1071 * On Haswell and Broadwell though:
1072 * - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU
1073 * - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU
1075 * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
1077 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev) && port != PORT_E)
1078 intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
1080 intel_dp->aux.name = name;
1081 intel_dp->aux.dev = dev->dev;
1082 intel_dp->aux.transfer = intel_dp_aux_transfer;
1084 DRM_DEBUG_KMS("registering %s bus for %s\n", name,
1085 connector->base.kdev->kobj.name);
1087 ret = drm_dp_aux_register(&intel_dp->aux);
1089 DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
1094 ret = sysfs_create_link(&connector->base.kdev->kobj,
1095 &intel_dp->aux.ddc.dev.kobj,
1096 intel_dp->aux.ddc.dev.kobj.name);
1098 DRM_ERROR("sysfs_create_link() for %s failed (%d)\n", name, ret);
1099 drm_dp_aux_unregister(&intel_dp->aux);
1104 intel_dp_connector_unregister(struct intel_connector *intel_connector)
1106 struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
1108 if (!intel_connector->mst_port)
1109 sysfs_remove_link(&intel_connector->base.kdev->kobj,
1110 intel_dp->aux.ddc.dev.kobj.name);
1111 intel_connector_unregister(intel_connector);
1115 skl_edp_set_pll_config(struct intel_crtc_state *pipe_config)
1119 memset(&pipe_config->dpll_hw_state, 0,
1120 sizeof(pipe_config->dpll_hw_state));
1122 pipe_config->ddi_pll_sel = SKL_DPLL0;
1123 pipe_config->dpll_hw_state.cfgcr1 = 0;
1124 pipe_config->dpll_hw_state.cfgcr2 = 0;
1126 ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
1127 switch (pipe_config->port_clock / 2) {
1129 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
1133 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350,
1137 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700,
1141 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620,
1144 /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1145 results in CDCLK change. Need to handle the change of CDCLK by
1146 disabling pipes and re-enabling them */
1148 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
1152 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160,
1157 pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1161 hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config)
1163 memset(&pipe_config->dpll_hw_state, 0,
1164 sizeof(pipe_config->dpll_hw_state));
1166 switch (pipe_config->port_clock / 2) {
1168 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
1171 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
1174 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
1180 intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
1182 if (intel_dp->num_sink_rates) {
1183 *sink_rates = intel_dp->sink_rates;
1184 return intel_dp->num_sink_rates;
1187 *sink_rates = default_rates;
1189 return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
1192 bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
1194 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1195 struct drm_device *dev = dig_port->base.base.dev;
1197 /* WaDisableHBR2:skl */
1198 if (IS_SKL_REVID(dev, 0, SKL_REVID_B0))
1201 if ((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || IS_BROADWELL(dev) ||
1202 (INTEL_INFO(dev)->gen >= 9))
1209 intel_dp_source_rates(struct intel_dp *intel_dp, const int **source_rates)
1211 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1212 struct drm_device *dev = dig_port->base.base.dev;
1215 if (IS_BROXTON(dev)) {
1216 *source_rates = bxt_rates;
1217 size = ARRAY_SIZE(bxt_rates);
1218 } else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
1219 *source_rates = skl_rates;
1220 size = ARRAY_SIZE(skl_rates);
1222 *source_rates = default_rates;
1223 size = ARRAY_SIZE(default_rates);
1226 /* This depends on the fact that 5.4 is last value in the array */
1227 if (!intel_dp_source_supports_hbr2(intel_dp))
1234 intel_dp_set_clock(struct intel_encoder *encoder,
1235 struct intel_crtc_state *pipe_config)
1237 struct drm_device *dev = encoder->base.dev;
1238 const struct dp_link_dpll *divisor = NULL;
1242 divisor = gen4_dpll;
1243 count = ARRAY_SIZE(gen4_dpll);
1244 } else if (HAS_PCH_SPLIT(dev)) {
1246 count = ARRAY_SIZE(pch_dpll);
1247 } else if (IS_CHERRYVIEW(dev)) {
1249 count = ARRAY_SIZE(chv_dpll);
1250 } else if (IS_VALLEYVIEW(dev)) {
1252 count = ARRAY_SIZE(vlv_dpll);
1255 if (divisor && count) {
1256 for (i = 0; i < count; i++) {
1257 if (pipe_config->port_clock == divisor[i].clock) {
1258 pipe_config->dpll = divisor[i].dpll;
1259 pipe_config->clock_set = true;
1266 static int intersect_rates(const int *source_rates, int source_len,
1267 const int *sink_rates, int sink_len,
1270 int i = 0, j = 0, k = 0;
1272 while (i < source_len && j < sink_len) {
1273 if (source_rates[i] == sink_rates[j]) {
1274 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
1276 common_rates[k] = source_rates[i];
1280 } else if (source_rates[i] < sink_rates[j]) {
1289 static int intel_dp_common_rates(struct intel_dp *intel_dp,
1292 const int *source_rates, *sink_rates;
1293 int source_len, sink_len;
1295 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1296 source_len = intel_dp_source_rates(intel_dp, &source_rates);
1298 return intersect_rates(source_rates, source_len,
1299 sink_rates, sink_len,
1303 static void snprintf_int_array(char *str, size_t len,
1304 const int *array, int nelem)
1310 for (i = 0; i < nelem; i++) {
1311 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
1319 static void intel_dp_print_rates(struct intel_dp *intel_dp)
1321 const int *source_rates, *sink_rates;
1322 int source_len, sink_len, common_len;
1323 int common_rates[DP_MAX_SUPPORTED_RATES];
1324 char str[128]; /* FIXME: too big for stack? */
1326 if ((drm_debug & DRM_UT_KMS) == 0)
1329 source_len = intel_dp_source_rates(intel_dp, &source_rates);
1330 snprintf_int_array(str, sizeof(str), source_rates, source_len);
1331 DRM_DEBUG_KMS("source rates: %s\n", str);
1333 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1334 snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
1335 DRM_DEBUG_KMS("sink rates: %s\n", str);
1337 common_len = intel_dp_common_rates(intel_dp, common_rates);
1338 snprintf_int_array(str, sizeof(str), common_rates, common_len);
1339 DRM_DEBUG_KMS("common rates: %s\n", str);
1342 static int rate_to_index(int find, const int *rates)
1346 for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1347 if (find == rates[i])
1354 intel_dp_max_link_rate(struct intel_dp *intel_dp)
1356 int rates[DP_MAX_SUPPORTED_RATES] = {};
1359 len = intel_dp_common_rates(intel_dp, rates);
1360 if (WARN_ON(len <= 0))
1363 return rates[rate_to_index(0, rates) - 1];
1366 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1368 return rate_to_index(rate, intel_dp->sink_rates);
1371 void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
1372 uint8_t *link_bw, uint8_t *rate_select)
1374 if (intel_dp->num_sink_rates) {
1377 intel_dp_rate_select(intel_dp, port_clock);
1379 *link_bw = drm_dp_link_rate_to_bw_code(port_clock);
1385 intel_dp_compute_config(struct intel_encoder *encoder,
1386 struct intel_crtc_state *pipe_config)
1388 struct drm_device *dev = encoder->base.dev;
1389 struct drm_i915_private *dev_priv = dev->dev_private;
1390 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1391 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1392 enum port port = dp_to_dig_port(intel_dp)->port;
1393 struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
1394 struct intel_connector *intel_connector = intel_dp->attached_connector;
1395 int lane_count, clock;
1396 int min_lane_count = 1;
1397 int max_lane_count = intel_dp_max_lane_count(intel_dp);
1398 /* Conveniently, the link BW constants become indices with a shift...*/
1402 int link_avail, link_clock;
1403 int common_rates[DP_MAX_SUPPORTED_RATES] = {};
1405 uint8_t link_bw, rate_select;
1407 common_len = intel_dp_common_rates(intel_dp, common_rates);
1409 /* No common link rates between source and sink */
1410 WARN_ON(common_len <= 0);
1412 max_clock = common_len - 1;
1414 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
1415 pipe_config->has_pch_encoder = true;
1417 pipe_config->has_dp_encoder = true;
1418 pipe_config->has_drrs = false;
1419 pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
1421 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1422 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1425 if (INTEL_INFO(dev)->gen >= 9) {
1427 ret = skl_update_scaler_crtc(pipe_config);
1432 if (HAS_GMCH_DISPLAY(dev))
1433 intel_gmch_panel_fitting(intel_crtc, pipe_config,
1434 intel_connector->panel.fitting_mode);
1436 intel_pch_panel_fitting(intel_crtc, pipe_config,
1437 intel_connector->panel.fitting_mode);
1440 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
1443 DRM_DEBUG_KMS("DP link computation with max lane count %i "
1444 "max bw %d pixel clock %iKHz\n",
1445 max_lane_count, common_rates[max_clock],
1446 adjusted_mode->crtc_clock);
1448 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1449 * bpc in between. */
1450 bpp = pipe_config->pipe_bpp;
1451 if (is_edp(intel_dp)) {
1453 /* Get bpp from vbt only for panels that dont have bpp in edid */
1454 if (intel_connector->base.display_info.bpc == 0 &&
1455 (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp)) {
1456 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1457 dev_priv->vbt.edp_bpp);
1458 bpp = dev_priv->vbt.edp_bpp;
1462 * Use the maximum clock and number of lanes the eDP panel
1463 * advertizes being capable of. The panels are generally
1464 * designed to support only a single clock and lane
1465 * configuration, and typically these values correspond to the
1466 * native resolution of the panel.
1468 min_lane_count = max_lane_count;
1469 min_clock = max_clock;
1472 for (; bpp >= 6*3; bpp -= 2*3) {
1473 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1476 for (clock = min_clock; clock <= max_clock; clock++) {
1477 for (lane_count = min_lane_count;
1478 lane_count <= max_lane_count;
1481 link_clock = common_rates[clock];
1482 link_avail = intel_dp_max_data_rate(link_clock,
1485 if (mode_rate <= link_avail) {
1495 if (intel_dp->color_range_auto) {
1498 * CEA-861-E - 5.1 Default Encoding Parameters
1499 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1501 pipe_config->limited_color_range =
1502 bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1;
1504 pipe_config->limited_color_range =
1505 intel_dp->limited_color_range;
1508 pipe_config->lane_count = lane_count;
1510 pipe_config->pipe_bpp = bpp;
1511 pipe_config->port_clock = common_rates[clock];
1513 intel_dp_compute_rate(intel_dp, pipe_config->port_clock,
1514 &link_bw, &rate_select);
1516 DRM_DEBUG_KMS("DP link bw %02x rate select %02x lane count %d clock %d bpp %d\n",
1517 link_bw, rate_select, pipe_config->lane_count,
1518 pipe_config->port_clock, bpp);
1519 DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1520 mode_rate, link_avail);
1522 intel_link_compute_m_n(bpp, lane_count,
1523 adjusted_mode->crtc_clock,
1524 pipe_config->port_clock,
1525 &pipe_config->dp_m_n);
1527 if (intel_connector->panel.downclock_mode != NULL &&
1528 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
1529 pipe_config->has_drrs = true;
1530 intel_link_compute_m_n(bpp, lane_count,
1531 intel_connector->panel.downclock_mode->clock,
1532 pipe_config->port_clock,
1533 &pipe_config->dp_m2_n2);
1536 if ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) && is_edp(intel_dp))
1537 skl_edp_set_pll_config(pipe_config);
1538 else if (IS_BROXTON(dev))
1539 /* handled in ddi */;
1540 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
1541 hsw_dp_set_ddi_pll_sel(pipe_config);
1543 intel_dp_set_clock(encoder, pipe_config);
1548 static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
1550 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1551 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
1552 struct drm_device *dev = crtc->base.dev;
1553 struct drm_i915_private *dev_priv = dev->dev_private;
1555 DRM_DEBUG_KMS("eDP PLL enable for clock %d\n",
1556 crtc->config->port_clock);
1558 intel_dp->DP &= ~DP_PLL_FREQ_MASK;
1560 if (crtc->config->port_clock == 162000)
1561 intel_dp->DP |= DP_PLL_FREQ_162MHZ;
1563 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
1565 I915_WRITE(DP_A, intel_dp->DP);
1570 void intel_dp_set_link_params(struct intel_dp *intel_dp,
1571 const struct intel_crtc_state *pipe_config)
1573 intel_dp->link_rate = pipe_config->port_clock;
1574 intel_dp->lane_count = pipe_config->lane_count;
1577 static void intel_dp_prepare(struct intel_encoder *encoder)
1579 struct drm_device *dev = encoder->base.dev;
1580 struct drm_i915_private *dev_priv = dev->dev_private;
1581 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1582 enum port port = dp_to_dig_port(intel_dp)->port;
1583 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
1584 const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
1586 intel_dp_set_link_params(intel_dp, crtc->config);
1589 * There are four kinds of DP registers:
1596 * IBX PCH and CPU are the same for almost everything,
1597 * except that the CPU DP PLL is configured in this
1600 * CPT PCH is quite different, having many bits moved
1601 * to the TRANS_DP_CTL register instead. That
1602 * configuration happens (oddly) in ironlake_pch_enable
1605 /* Preserve the BIOS-computed detected bit. This is
1606 * supposed to be read-only.
1608 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
1610 /* Handle DP bits in common between all three register formats */
1611 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
1612 intel_dp->DP |= DP_PORT_WIDTH(crtc->config->lane_count);
1614 /* Split out the IBX/CPU vs CPT settings */
1616 if (IS_GEN7(dev) && port == PORT_A) {
1617 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1618 intel_dp->DP |= DP_SYNC_HS_HIGH;
1619 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1620 intel_dp->DP |= DP_SYNC_VS_HIGH;
1621 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1623 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1624 intel_dp->DP |= DP_ENHANCED_FRAMING;
1626 intel_dp->DP |= crtc->pipe << 29;
1627 } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
1630 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1632 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
1633 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1634 trans_dp |= TRANS_DP_ENH_FRAMING;
1636 trans_dp &= ~TRANS_DP_ENH_FRAMING;
1637 I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
1639 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
1640 crtc->config->limited_color_range)
1641 intel_dp->DP |= DP_COLOR_RANGE_16_235;
1643 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1644 intel_dp->DP |= DP_SYNC_HS_HIGH;
1645 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1646 intel_dp->DP |= DP_SYNC_VS_HIGH;
1647 intel_dp->DP |= DP_LINK_TRAIN_OFF;
1649 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1650 intel_dp->DP |= DP_ENHANCED_FRAMING;
1652 if (IS_CHERRYVIEW(dev))
1653 intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
1654 else if (crtc->pipe == PIPE_B)
1655 intel_dp->DP |= DP_PIPEB_SELECT;
1659 #define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
1660 #define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
1662 #define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
1663 #define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
1665 #define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1666 #define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
1668 static void wait_panel_status(struct intel_dp *intel_dp,
1672 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1673 struct drm_i915_private *dev_priv = dev->dev_private;
1674 u32 pp_stat_reg, pp_ctrl_reg;
1676 lockdep_assert_held(&dev_priv->pps_mutex);
1678 pp_stat_reg = _pp_stat_reg(intel_dp);
1679 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1681 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
1683 I915_READ(pp_stat_reg),
1684 I915_READ(pp_ctrl_reg));
1686 if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
1687 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
1688 I915_READ(pp_stat_reg),
1689 I915_READ(pp_ctrl_reg));
1692 DRM_DEBUG_KMS("Wait complete\n");
1695 static void wait_panel_on(struct intel_dp *intel_dp)
1697 DRM_DEBUG_KMS("Wait for panel power on\n");
1698 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
1701 static void wait_panel_off(struct intel_dp *intel_dp)
1703 DRM_DEBUG_KMS("Wait for panel power off time\n");
1704 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
1707 static void wait_panel_power_cycle(struct intel_dp *intel_dp)
1709 DRM_DEBUG_KMS("Wait for panel power cycle\n");
1711 /* When we disable the VDD override bit last we have to do the manual
1713 wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
1714 intel_dp->panel_power_cycle_delay);
1716 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
1719 static void wait_backlight_on(struct intel_dp *intel_dp)
1721 wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1722 intel_dp->backlight_on_delay);
1725 static void edp_wait_backlight_off(struct intel_dp *intel_dp)
1727 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1728 intel_dp->backlight_off_delay);
1731 /* Read the current pp_control value, unlocking the register if it
1735 static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
1737 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1738 struct drm_i915_private *dev_priv = dev->dev_private;
1741 lockdep_assert_held(&dev_priv->pps_mutex);
1743 control = I915_READ(_pp_ctrl_reg(intel_dp));
1744 if (!IS_BROXTON(dev)) {
1745 control &= ~PANEL_UNLOCK_MASK;
1746 control |= PANEL_UNLOCK_REGS;
1752 * Must be paired with edp_panel_vdd_off().
1753 * Must hold pps_mutex around the whole on/off sequence.
1754 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1756 static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
1758 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1759 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1760 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1761 struct drm_i915_private *dev_priv = dev->dev_private;
1762 enum intel_display_power_domain power_domain;
1764 u32 pp_stat_reg, pp_ctrl_reg;
1765 bool need_to_disable = !intel_dp->want_panel_vdd;
1767 lockdep_assert_held(&dev_priv->pps_mutex);
1769 if (!is_edp(intel_dp))
1772 cancel_delayed_work(&intel_dp->panel_vdd_work);
1773 intel_dp->want_panel_vdd = true;
1775 if (edp_have_panel_vdd(intel_dp))
1776 return need_to_disable;
1778 power_domain = intel_display_port_power_domain(intel_encoder);
1779 intel_display_power_get(dev_priv, power_domain);
1781 DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1782 port_name(intel_dig_port->port));
1784 if (!edp_have_panel_power(intel_dp))
1785 wait_panel_power_cycle(intel_dp);
1787 pp = ironlake_get_pp_control(intel_dp);
1788 pp |= EDP_FORCE_VDD;
1790 pp_stat_reg = _pp_stat_reg(intel_dp);
1791 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1793 I915_WRITE(pp_ctrl_reg, pp);
1794 POSTING_READ(pp_ctrl_reg);
1795 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1796 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1798 * If the panel wasn't on, delay before accessing aux channel
1800 if (!edp_have_panel_power(intel_dp)) {
1801 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1802 port_name(intel_dig_port->port));
1803 msleep(intel_dp->panel_power_up_delay);
1806 return need_to_disable;
1810 * Must be paired with intel_edp_panel_vdd_off() or
1811 * intel_edp_panel_off().
1812 * Nested calls to these functions are not allowed since
1813 * we drop the lock. Caller must use some higher level
1814 * locking to prevent nested calls from other threads.
1816 void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
1820 if (!is_edp(intel_dp))
1824 vdd = edp_panel_vdd_on(intel_dp);
1825 pps_unlock(intel_dp);
1827 I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
1828 port_name(dp_to_dig_port(intel_dp)->port));
1831 static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
1833 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1834 struct drm_i915_private *dev_priv = dev->dev_private;
1835 struct intel_digital_port *intel_dig_port =
1836 dp_to_dig_port(intel_dp);
1837 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1838 enum intel_display_power_domain power_domain;
1840 u32 pp_stat_reg, pp_ctrl_reg;
1842 lockdep_assert_held(&dev_priv->pps_mutex);
1844 WARN_ON(intel_dp->want_panel_vdd);
1846 if (!edp_have_panel_vdd(intel_dp))
1849 DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1850 port_name(intel_dig_port->port));
1852 pp = ironlake_get_pp_control(intel_dp);
1853 pp &= ~EDP_FORCE_VDD;
1855 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1856 pp_stat_reg = _pp_stat_reg(intel_dp);
1858 I915_WRITE(pp_ctrl_reg, pp);
1859 POSTING_READ(pp_ctrl_reg);
1861 /* Make sure sequencer is idle before allowing subsequent activity */
1862 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1863 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1865 if ((pp & POWER_TARGET_ON) == 0)
1866 intel_dp->last_power_cycle = jiffies;
1868 power_domain = intel_display_port_power_domain(intel_encoder);
1869 intel_display_power_put(dev_priv, power_domain);
1872 static void edp_panel_vdd_work(struct work_struct *__work)
1874 struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1875 struct intel_dp, panel_vdd_work);
1878 if (!intel_dp->want_panel_vdd)
1879 edp_panel_vdd_off_sync(intel_dp);
1880 pps_unlock(intel_dp);
1883 static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
1885 unsigned long delay;
1888 * Queue the timer to fire a long time from now (relative to the power
1889 * down delay) to keep the panel power up across a sequence of
1892 delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
1893 schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
1897 * Must be paired with edp_panel_vdd_on().
1898 * Must hold pps_mutex around the whole on/off sequence.
1899 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1901 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
1903 struct drm_i915_private *dev_priv =
1904 intel_dp_to_dev(intel_dp)->dev_private;
1906 lockdep_assert_held(&dev_priv->pps_mutex);
1908 if (!is_edp(intel_dp))
1911 I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
1912 port_name(dp_to_dig_port(intel_dp)->port));
1914 intel_dp->want_panel_vdd = false;
1917 edp_panel_vdd_off_sync(intel_dp);
1919 edp_panel_vdd_schedule_off(intel_dp);
1922 static void edp_panel_on(struct intel_dp *intel_dp)
1924 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1925 struct drm_i915_private *dev_priv = dev->dev_private;
1929 lockdep_assert_held(&dev_priv->pps_mutex);
1931 if (!is_edp(intel_dp))
1934 DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
1935 port_name(dp_to_dig_port(intel_dp)->port));
1937 if (WARN(edp_have_panel_power(intel_dp),
1938 "eDP port %c panel power already on\n",
1939 port_name(dp_to_dig_port(intel_dp)->port)))
1942 wait_panel_power_cycle(intel_dp);
1944 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1945 pp = ironlake_get_pp_control(intel_dp);
1947 /* ILK workaround: disable reset around power sequence */
1948 pp &= ~PANEL_POWER_RESET;
1949 I915_WRITE(pp_ctrl_reg, pp);
1950 POSTING_READ(pp_ctrl_reg);
1953 pp |= POWER_TARGET_ON;
1955 pp |= PANEL_POWER_RESET;
1957 I915_WRITE(pp_ctrl_reg, pp);
1958 POSTING_READ(pp_ctrl_reg);
1960 wait_panel_on(intel_dp);
1961 intel_dp->last_power_on = jiffies;
1964 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
1965 I915_WRITE(pp_ctrl_reg, pp);
1966 POSTING_READ(pp_ctrl_reg);
1970 void intel_edp_panel_on(struct intel_dp *intel_dp)
1972 if (!is_edp(intel_dp))
1976 edp_panel_on(intel_dp);
1977 pps_unlock(intel_dp);
1981 static void edp_panel_off(struct intel_dp *intel_dp)
1983 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1984 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1985 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1986 struct drm_i915_private *dev_priv = dev->dev_private;
1987 enum intel_display_power_domain power_domain;
1991 lockdep_assert_held(&dev_priv->pps_mutex);
1993 if (!is_edp(intel_dp))
1996 DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
1997 port_name(dp_to_dig_port(intel_dp)->port));
1999 WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
2000 port_name(dp_to_dig_port(intel_dp)->port));
2002 pp = ironlake_get_pp_control(intel_dp);
2003 /* We need to switch off panel power _and_ force vdd, for otherwise some
2004 * panels get very unhappy and cease to work. */
2005 pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
2008 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2010 intel_dp->want_panel_vdd = false;
2012 I915_WRITE(pp_ctrl_reg, pp);
2013 POSTING_READ(pp_ctrl_reg);
2015 intel_dp->last_power_cycle = jiffies;
2016 wait_panel_off(intel_dp);
2018 /* We got a reference when we enabled the VDD. */
2019 power_domain = intel_display_port_power_domain(intel_encoder);
2020 intel_display_power_put(dev_priv, power_domain);
2023 void intel_edp_panel_off(struct intel_dp *intel_dp)
2025 if (!is_edp(intel_dp))
2029 edp_panel_off(intel_dp);
2030 pps_unlock(intel_dp);
2033 /* Enable backlight in the panel power control. */
2034 static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
2036 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2037 struct drm_device *dev = intel_dig_port->base.base.dev;
2038 struct drm_i915_private *dev_priv = dev->dev_private;
2043 * If we enable the backlight right away following a panel power
2044 * on, we may see slight flicker as the panel syncs with the eDP
2045 * link. So delay a bit to make sure the image is solid before
2046 * allowing it to appear.
2048 wait_backlight_on(intel_dp);
2052 pp = ironlake_get_pp_control(intel_dp);
2053 pp |= EDP_BLC_ENABLE;
2055 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2057 I915_WRITE(pp_ctrl_reg, pp);
2058 POSTING_READ(pp_ctrl_reg);
2060 pps_unlock(intel_dp);
2063 /* Enable backlight PWM and backlight PP control. */
2064 void intel_edp_backlight_on(struct intel_dp *intel_dp)
2066 if (!is_edp(intel_dp))
2069 DRM_DEBUG_KMS("\n");
2071 intel_panel_enable_backlight(intel_dp->attached_connector);
2072 _intel_edp_backlight_on(intel_dp);
2075 /* Disable backlight in the panel power control. */
2076 static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
2078 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2079 struct drm_i915_private *dev_priv = dev->dev_private;
2083 if (!is_edp(intel_dp))
2088 pp = ironlake_get_pp_control(intel_dp);
2089 pp &= ~EDP_BLC_ENABLE;
2091 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2093 I915_WRITE(pp_ctrl_reg, pp);
2094 POSTING_READ(pp_ctrl_reg);
2096 pps_unlock(intel_dp);
2098 intel_dp->last_backlight_off = jiffies;
2099 edp_wait_backlight_off(intel_dp);
2102 /* Disable backlight PP control and backlight PWM. */
2103 void intel_edp_backlight_off(struct intel_dp *intel_dp)
2105 if (!is_edp(intel_dp))
2108 DRM_DEBUG_KMS("\n");
2110 _intel_edp_backlight_off(intel_dp);
2111 intel_panel_disable_backlight(intel_dp->attached_connector);
2115 * Hook for controlling the panel power control backlight through the bl_power
2116 * sysfs attribute. Take care to handle multiple calls.
2118 static void intel_edp_backlight_power(struct intel_connector *connector,
2121 struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
2125 is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
2126 pps_unlock(intel_dp);
2128 if (is_enabled == enable)
2131 DRM_DEBUG_KMS("panel power control backlight %s\n",
2132 enable ? "enable" : "disable");
2135 _intel_edp_backlight_on(intel_dp);
2137 _intel_edp_backlight_off(intel_dp);
2140 static const char *state_string(bool enabled)
2142 return enabled ? "on" : "off";
2145 static void assert_dp_port(struct intel_dp *intel_dp, bool state)
2147 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2148 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
2149 bool cur_state = I915_READ(intel_dp->output_reg) & DP_PORT_EN;
2151 I915_STATE_WARN(cur_state != state,
2152 "DP port %c state assertion failure (expected %s, current %s)\n",
2153 port_name(dig_port->port),
2154 state_string(state), state_string(cur_state));
2156 #define assert_dp_port_disabled(d) assert_dp_port((d), false)
2158 static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
2160 bool cur_state = I915_READ(DP_A) & DP_PLL_ENABLE;
2162 I915_STATE_WARN(cur_state != state,
2163 "eDP PLL state assertion failure (expected %s, current %s)\n",
2164 state_string(state), state_string(cur_state));
2166 #define assert_edp_pll_enabled(d) assert_edp_pll((d), true)
2167 #define assert_edp_pll_disabled(d) assert_edp_pll((d), false)
2169 static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
2171 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2172 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
2173 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2175 assert_pipe_disabled(dev_priv, crtc->pipe);
2176 assert_dp_port_disabled(intel_dp);
2177 assert_edp_pll_disabled(dev_priv);
2179 DRM_DEBUG_KMS("\n");
2180 intel_dp->DP |= DP_PLL_ENABLE;
2182 I915_WRITE(DP_A, intel_dp->DP);
2187 static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
2189 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2190 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
2191 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2193 assert_pipe_disabled(dev_priv, crtc->pipe);
2194 assert_dp_port_disabled(intel_dp);
2195 assert_edp_pll_enabled(dev_priv);
2197 intel_dp->DP &= ~DP_PLL_ENABLE;
2199 I915_WRITE(DP_A, intel_dp->DP);
2204 /* If the sink supports it, try to set the power state appropriately */
2205 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
2209 /* Should have a valid DPCD by this point */
2210 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2213 if (mode != DRM_MODE_DPMS_ON) {
2214 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2218 * When turning on, we need to retry for 1ms to give the sink
2221 for (i = 0; i < 3; i++) {
2222 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2231 DRM_DEBUG_KMS("failed to %s sink power state\n",
2232 mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
2235 static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2238 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2239 enum port port = dp_to_dig_port(intel_dp)->port;
2240 struct drm_device *dev = encoder->base.dev;
2241 struct drm_i915_private *dev_priv = dev->dev_private;
2242 enum intel_display_power_domain power_domain;
2245 power_domain = intel_display_port_power_domain(encoder);
2246 if (!intel_display_power_is_enabled(dev_priv, power_domain))
2249 tmp = I915_READ(intel_dp->output_reg);
2251 if (!(tmp & DP_PORT_EN))
2254 if (IS_GEN7(dev) && port == PORT_A) {
2255 *pipe = PORT_TO_PIPE_CPT(tmp);
2256 } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
2259 for_each_pipe(dev_priv, p) {
2260 u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
2261 if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
2267 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2268 intel_dp->output_reg);
2269 } else if (IS_CHERRYVIEW(dev)) {
2270 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
2272 *pipe = PORT_TO_PIPE(tmp);
2278 static void intel_dp_get_config(struct intel_encoder *encoder,
2279 struct intel_crtc_state *pipe_config)
2281 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2283 struct drm_device *dev = encoder->base.dev;
2284 struct drm_i915_private *dev_priv = dev->dev_private;
2285 enum port port = dp_to_dig_port(intel_dp)->port;
2286 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2289 tmp = I915_READ(intel_dp->output_reg);
2291 pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
2293 if (HAS_PCH_CPT(dev) && port != PORT_A) {
2294 u32 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2296 if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
2297 flags |= DRM_MODE_FLAG_PHSYNC;
2299 flags |= DRM_MODE_FLAG_NHSYNC;
2301 if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH)
2302 flags |= DRM_MODE_FLAG_PVSYNC;
2304 flags |= DRM_MODE_FLAG_NVSYNC;
2306 if (tmp & DP_SYNC_HS_HIGH)
2307 flags |= DRM_MODE_FLAG_PHSYNC;
2309 flags |= DRM_MODE_FLAG_NHSYNC;
2311 if (tmp & DP_SYNC_VS_HIGH)
2312 flags |= DRM_MODE_FLAG_PVSYNC;
2314 flags |= DRM_MODE_FLAG_NVSYNC;
2317 pipe_config->base.adjusted_mode.flags |= flags;
2319 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
2320 tmp & DP_COLOR_RANGE_16_235)
2321 pipe_config->limited_color_range = true;
2323 pipe_config->has_dp_encoder = true;
2325 pipe_config->lane_count =
2326 ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
2328 intel_dp_get_m_n(crtc, pipe_config);
2330 if (port == PORT_A) {
2331 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ)
2332 pipe_config->port_clock = 162000;
2334 pipe_config->port_clock = 270000;
2337 dotclock = intel_dotclock_calculate(pipe_config->port_clock,
2338 &pipe_config->dp_m_n);
2340 if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
2341 ironlake_check_encoder_dotclock(pipe_config, dotclock);
2343 pipe_config->base.adjusted_mode.crtc_clock = dotclock;
2345 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
2346 pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
2348 * This is a big fat ugly hack.
2350 * Some machines in UEFI boot mode provide us a VBT that has 18
2351 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2352 * unknown we fail to light up. Yet the same BIOS boots up with
2353 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2354 * max, not what it tells us to use.
2356 * Note: This will still be broken if the eDP panel is not lit
2357 * up by the BIOS, and thus we can't get the mode at module
2360 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2361 pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
2362 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
2366 static void intel_disable_dp(struct intel_encoder *encoder)
2368 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2369 struct drm_device *dev = encoder->base.dev;
2370 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2372 if (crtc->config->has_audio)
2373 intel_audio_codec_disable(encoder);
2375 if (HAS_PSR(dev) && !HAS_DDI(dev))
2376 intel_psr_disable(intel_dp);
2378 /* Make sure the panel is off before trying to change the mode. But also
2379 * ensure that we have vdd while we switch off the panel. */
2380 intel_edp_panel_vdd_on(intel_dp);
2381 intel_edp_backlight_off(intel_dp);
2382 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
2383 intel_edp_panel_off(intel_dp);
2385 /* disable the port before the pipe on g4x */
2386 if (INTEL_INFO(dev)->gen < 5)
2387 intel_dp_link_down(intel_dp);
2390 static void ilk_post_disable_dp(struct intel_encoder *encoder)
2392 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2393 enum port port = dp_to_dig_port(intel_dp)->port;
2395 intel_dp_link_down(intel_dp);
2397 ironlake_edp_pll_off(intel_dp);
2400 static void vlv_post_disable_dp(struct intel_encoder *encoder)
2402 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2404 intel_dp_link_down(intel_dp);
2407 static void chv_data_lane_soft_reset(struct intel_encoder *encoder,
2410 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2411 enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
2412 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2413 enum pipe pipe = crtc->pipe;
2416 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2418 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2420 val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
2421 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2423 if (crtc->config->lane_count > 2) {
2424 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2426 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2428 val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
2429 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2432 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2433 val |= CHV_PCS_REQ_SOFTRESET_EN;
2435 val &= ~DPIO_PCS_CLK_SOFT_RESET;
2437 val |= DPIO_PCS_CLK_SOFT_RESET;
2438 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2440 if (crtc->config->lane_count > 2) {
2441 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2442 val |= CHV_PCS_REQ_SOFTRESET_EN;
2444 val &= ~DPIO_PCS_CLK_SOFT_RESET;
2446 val |= DPIO_PCS_CLK_SOFT_RESET;
2447 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2451 static void chv_post_disable_dp(struct intel_encoder *encoder)
2453 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2454 struct drm_device *dev = encoder->base.dev;
2455 struct drm_i915_private *dev_priv = dev->dev_private;
2457 intel_dp_link_down(intel_dp);
2459 mutex_lock(&dev_priv->sb_lock);
2461 /* Assert data lane reset */
2462 chv_data_lane_soft_reset(encoder, true);
2464 mutex_unlock(&dev_priv->sb_lock);
2468 _intel_dp_set_link_train(struct intel_dp *intel_dp,
2470 uint8_t dp_train_pat)
2472 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2473 struct drm_device *dev = intel_dig_port->base.base.dev;
2474 struct drm_i915_private *dev_priv = dev->dev_private;
2475 enum port port = intel_dig_port->port;
2478 uint32_t temp = I915_READ(DP_TP_CTL(port));
2480 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2481 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2483 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2485 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2486 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2487 case DP_TRAINING_PATTERN_DISABLE:
2488 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2491 case DP_TRAINING_PATTERN_1:
2492 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2494 case DP_TRAINING_PATTERN_2:
2495 temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2497 case DP_TRAINING_PATTERN_3:
2498 temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2501 I915_WRITE(DP_TP_CTL(port), temp);
2503 } else if ((IS_GEN7(dev) && port == PORT_A) ||
2504 (HAS_PCH_CPT(dev) && port != PORT_A)) {
2505 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2507 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2508 case DP_TRAINING_PATTERN_DISABLE:
2509 *DP |= DP_LINK_TRAIN_OFF_CPT;
2511 case DP_TRAINING_PATTERN_1:
2512 *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2514 case DP_TRAINING_PATTERN_2:
2515 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2517 case DP_TRAINING_PATTERN_3:
2518 DRM_ERROR("DP training pattern 3 not supported\n");
2519 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2524 if (IS_CHERRYVIEW(dev))
2525 *DP &= ~DP_LINK_TRAIN_MASK_CHV;
2527 *DP &= ~DP_LINK_TRAIN_MASK;
2529 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2530 case DP_TRAINING_PATTERN_DISABLE:
2531 *DP |= DP_LINK_TRAIN_OFF;
2533 case DP_TRAINING_PATTERN_1:
2534 *DP |= DP_LINK_TRAIN_PAT_1;
2536 case DP_TRAINING_PATTERN_2:
2537 *DP |= DP_LINK_TRAIN_PAT_2;
2539 case DP_TRAINING_PATTERN_3:
2540 if (IS_CHERRYVIEW(dev)) {
2541 *DP |= DP_LINK_TRAIN_PAT_3_CHV;
2543 DRM_ERROR("DP training pattern 3 not supported\n");
2544 *DP |= DP_LINK_TRAIN_PAT_2;
2551 static void intel_dp_enable_port(struct intel_dp *intel_dp)
2553 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2554 struct drm_i915_private *dev_priv = dev->dev_private;
2555 struct intel_crtc *crtc =
2556 to_intel_crtc(dp_to_dig_port(intel_dp)->base.base.crtc);
2558 /* enable with pattern 1 (as per spec) */
2559 _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2560 DP_TRAINING_PATTERN_1);
2562 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2563 POSTING_READ(intel_dp->output_reg);
2566 * Magic for VLV/CHV. We _must_ first set up the register
2567 * without actually enabling the port, and then do another
2568 * write to enable the port. Otherwise link training will
2569 * fail when the power sequencer is freshly used for this port.
2571 intel_dp->DP |= DP_PORT_EN;
2572 if (crtc->config->has_audio)
2573 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
2575 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2576 POSTING_READ(intel_dp->output_reg);
2579 static void intel_enable_dp(struct intel_encoder *encoder)
2581 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2582 struct drm_device *dev = encoder->base.dev;
2583 struct drm_i915_private *dev_priv = dev->dev_private;
2584 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2585 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
2586 enum port port = dp_to_dig_port(intel_dp)->port;
2587 enum pipe pipe = crtc->pipe;
2589 if (WARN_ON(dp_reg & DP_PORT_EN))
2594 if (IS_VALLEYVIEW(dev))
2595 vlv_init_panel_power_sequencer(intel_dp);
2597 intel_dp_enable_port(intel_dp);
2599 if (port == PORT_A && IS_GEN5(dev_priv)) {
2601 * Underrun reporting for the other pipe was disabled in
2602 * g4x_pre_enable_dp(). The eDP PLL and port have now been
2603 * enabled, so it's now safe to re-enable underrun reporting.
2605 intel_wait_for_vblank_if_active(dev_priv->dev, !pipe);
2606 intel_set_cpu_fifo_underrun_reporting(dev_priv, !pipe, true);
2607 intel_set_pch_fifo_underrun_reporting(dev_priv, !pipe, true);
2610 edp_panel_vdd_on(intel_dp);
2611 edp_panel_on(intel_dp);
2612 edp_panel_vdd_off(intel_dp, true);
2614 pps_unlock(intel_dp);
2616 if (IS_VALLEYVIEW(dev)) {
2617 unsigned int lane_mask = 0x0;
2619 if (IS_CHERRYVIEW(dev))
2620 lane_mask = intel_dp_unused_lane_mask(crtc->config->lane_count);
2622 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
2626 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
2627 intel_dp_start_link_train(intel_dp);
2628 intel_dp_stop_link_train(intel_dp);
2630 if (crtc->config->has_audio) {
2631 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2633 intel_audio_codec_enable(encoder);
2637 static void g4x_enable_dp(struct intel_encoder *encoder)
2639 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2641 intel_enable_dp(encoder);
2642 intel_edp_backlight_on(intel_dp);
2645 static void vlv_enable_dp(struct intel_encoder *encoder)
2647 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2649 intel_edp_backlight_on(intel_dp);
2650 intel_psr_enable(intel_dp);
2653 static void g4x_pre_enable_dp(struct intel_encoder *encoder)
2655 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2656 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2657 enum port port = dp_to_dig_port(intel_dp)->port;
2658 enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
2660 intel_dp_prepare(encoder);
2662 if (port == PORT_A && IS_GEN5(dev_priv)) {
2664 * We get FIFO underruns on the other pipe when
2665 * enabling the CPU eDP PLL, and when enabling CPU
2666 * eDP port. We could potentially avoid the PLL
2667 * underrun with a vblank wait just prior to enabling
2668 * the PLL, but that doesn't appear to help the port
2669 * enable case. Just sweep it all under the rug.
2671 intel_set_cpu_fifo_underrun_reporting(dev_priv, !pipe, false);
2672 intel_set_pch_fifo_underrun_reporting(dev_priv, !pipe, false);
2675 /* Only ilk+ has port A */
2676 if (port == PORT_A) {
2677 ironlake_set_pll_cpu_edp(intel_dp);
2678 ironlake_edp_pll_on(intel_dp);
2682 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2684 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2685 struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2686 enum pipe pipe = intel_dp->pps_pipe;
2687 int pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
2689 edp_panel_vdd_off_sync(intel_dp);
2692 * VLV seems to get confused when multiple power seqeuencers
2693 * have the same port selected (even if only one has power/vdd
2694 * enabled). The failure manifests as vlv_wait_port_ready() failing
2695 * CHV on the other hand doesn't seem to mind having the same port
2696 * selected in multiple power seqeuencers, but let's clear the
2697 * port select always when logically disconnecting a power sequencer
2700 DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2701 pipe_name(pipe), port_name(intel_dig_port->port));
2702 I915_WRITE(pp_on_reg, 0);
2703 POSTING_READ(pp_on_reg);
2705 intel_dp->pps_pipe = INVALID_PIPE;
2708 static void vlv_steal_power_sequencer(struct drm_device *dev,
2711 struct drm_i915_private *dev_priv = dev->dev_private;
2712 struct intel_encoder *encoder;
2714 lockdep_assert_held(&dev_priv->pps_mutex);
2716 if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2719 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
2721 struct intel_dp *intel_dp;
2724 if (encoder->type != INTEL_OUTPUT_EDP)
2727 intel_dp = enc_to_intel_dp(&encoder->base);
2728 port = dp_to_dig_port(intel_dp)->port;
2730 if (intel_dp->pps_pipe != pipe)
2733 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
2734 pipe_name(pipe), port_name(port));
2736 WARN(encoder->base.crtc,
2737 "stealing pipe %c power sequencer from active eDP port %c\n",
2738 pipe_name(pipe), port_name(port));
2740 /* make sure vdd is off before we steal it */
2741 vlv_detach_power_sequencer(intel_dp);
2745 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2747 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2748 struct intel_encoder *encoder = &intel_dig_port->base;
2749 struct drm_device *dev = encoder->base.dev;
2750 struct drm_i915_private *dev_priv = dev->dev_private;
2751 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2753 lockdep_assert_held(&dev_priv->pps_mutex);
2755 if (!is_edp(intel_dp))
2758 if (intel_dp->pps_pipe == crtc->pipe)
2762 * If another power sequencer was being used on this
2763 * port previously make sure to turn off vdd there while
2764 * we still have control of it.
2766 if (intel_dp->pps_pipe != INVALID_PIPE)
2767 vlv_detach_power_sequencer(intel_dp);
2770 * We may be stealing the power
2771 * sequencer from another port.
2773 vlv_steal_power_sequencer(dev, crtc->pipe);
2775 /* now it's all ours */
2776 intel_dp->pps_pipe = crtc->pipe;
2778 DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2779 pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2781 /* init power sequencer on this pipe and port */
2782 intel_dp_init_panel_power_sequencer(dev, intel_dp);
2783 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
2786 static void vlv_pre_enable_dp(struct intel_encoder *encoder)
2788 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2789 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2790 struct drm_device *dev = encoder->base.dev;
2791 struct drm_i915_private *dev_priv = dev->dev_private;
2792 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
2793 enum dpio_channel port = vlv_dport_to_channel(dport);
2794 int pipe = intel_crtc->pipe;
2797 mutex_lock(&dev_priv->sb_lock);
2799 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
2806 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2807 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2808 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
2810 mutex_unlock(&dev_priv->sb_lock);
2812 intel_enable_dp(encoder);
2815 static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
2817 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2818 struct drm_device *dev = encoder->base.dev;
2819 struct drm_i915_private *dev_priv = dev->dev_private;
2820 struct intel_crtc *intel_crtc =
2821 to_intel_crtc(encoder->base.crtc);
2822 enum dpio_channel port = vlv_dport_to_channel(dport);
2823 int pipe = intel_crtc->pipe;
2825 intel_dp_prepare(encoder);
2827 /* Program Tx lane resets to default */
2828 mutex_lock(&dev_priv->sb_lock);
2829 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
2830 DPIO_PCS_TX_LANE2_RESET |
2831 DPIO_PCS_TX_LANE1_RESET);
2832 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
2833 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2834 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2835 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2836 DPIO_PCS_CLK_SOFT_RESET);
2838 /* Fix up inter-pair skew failure */
2839 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2840 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2841 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
2842 mutex_unlock(&dev_priv->sb_lock);
2845 static void chv_pre_enable_dp(struct intel_encoder *encoder)
2847 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2848 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2849 struct drm_device *dev = encoder->base.dev;
2850 struct drm_i915_private *dev_priv = dev->dev_private;
2851 struct intel_crtc *intel_crtc =
2852 to_intel_crtc(encoder->base.crtc);
2853 enum dpio_channel ch = vlv_dport_to_channel(dport);
2854 int pipe = intel_crtc->pipe;
2855 int data, i, stagger;
2858 mutex_lock(&dev_priv->sb_lock);
2860 /* allow hardware to manage TX FIFO reset source */
2861 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2862 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2863 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2865 if (intel_crtc->config->lane_count > 2) {
2866 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2867 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2868 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2871 /* Program Tx lane latency optimal setting*/
2872 for (i = 0; i < intel_crtc->config->lane_count; i++) {
2873 /* Set the upar bit */
2874 if (intel_crtc->config->lane_count == 1)
2877 data = (i == 1) ? 0x0 : 0x1;
2878 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
2879 data << DPIO_UPAR_SHIFT);
2882 /* Data lane stagger programming */
2883 if (intel_crtc->config->port_clock > 270000)
2885 else if (intel_crtc->config->port_clock > 135000)
2887 else if (intel_crtc->config->port_clock > 67500)
2889 else if (intel_crtc->config->port_clock > 33750)
2894 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2895 val |= DPIO_TX2_STAGGER_MASK(0x1f);
2896 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2898 if (intel_crtc->config->lane_count > 2) {
2899 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2900 val |= DPIO_TX2_STAGGER_MASK(0x1f);
2901 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2904 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
2905 DPIO_LANESTAGGER_STRAP(stagger) |
2906 DPIO_LANESTAGGER_STRAP_OVRD |
2907 DPIO_TX1_STAGGER_MASK(0x1f) |
2908 DPIO_TX1_STAGGER_MULT(6) |
2909 DPIO_TX2_STAGGER_MULT(0));
2911 if (intel_crtc->config->lane_count > 2) {
2912 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
2913 DPIO_LANESTAGGER_STRAP(stagger) |
2914 DPIO_LANESTAGGER_STRAP_OVRD |
2915 DPIO_TX1_STAGGER_MASK(0x1f) |
2916 DPIO_TX1_STAGGER_MULT(7) |
2917 DPIO_TX2_STAGGER_MULT(5));
2920 /* Deassert data lane reset */
2921 chv_data_lane_soft_reset(encoder, false);
2923 mutex_unlock(&dev_priv->sb_lock);
2925 intel_enable_dp(encoder);
2927 /* Second common lane will stay alive on its own now */
2928 if (dport->release_cl2_override) {
2929 chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false);
2930 dport->release_cl2_override = false;
2934 static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
2936 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2937 struct drm_device *dev = encoder->base.dev;
2938 struct drm_i915_private *dev_priv = dev->dev_private;
2939 struct intel_crtc *intel_crtc =
2940 to_intel_crtc(encoder->base.crtc);
2941 enum dpio_channel ch = vlv_dport_to_channel(dport);
2942 enum pipe pipe = intel_crtc->pipe;
2943 unsigned int lane_mask =
2944 intel_dp_unused_lane_mask(intel_crtc->config->lane_count);
2947 intel_dp_prepare(encoder);
2950 * Must trick the second common lane into life.
2951 * Otherwise we can't even access the PLL.
2953 if (ch == DPIO_CH0 && pipe == PIPE_B)
2954 dport->release_cl2_override =
2955 !chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true);
2957 chv_phy_powergate_lanes(encoder, true, lane_mask);
2959 mutex_lock(&dev_priv->sb_lock);
2961 /* Assert data lane reset */
2962 chv_data_lane_soft_reset(encoder, true);
2964 /* program left/right clock distribution */
2965 if (pipe != PIPE_B) {
2966 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
2967 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
2969 val |= CHV_BUFLEFTENA1_FORCE;
2971 val |= CHV_BUFRIGHTENA1_FORCE;
2972 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
2974 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
2975 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
2977 val |= CHV_BUFLEFTENA2_FORCE;
2979 val |= CHV_BUFRIGHTENA2_FORCE;
2980 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
2983 /* program clock channel usage */
2984 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
2985 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2987 val &= ~CHV_PCS_USEDCLKCHANNEL;
2989 val |= CHV_PCS_USEDCLKCHANNEL;
2990 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
2992 if (intel_crtc->config->lane_count > 2) {
2993 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
2994 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2996 val &= ~CHV_PCS_USEDCLKCHANNEL;
2998 val |= CHV_PCS_USEDCLKCHANNEL;
2999 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
3003 * This a a bit weird since generally CL
3004 * matches the pipe, but here we need to
3005 * pick the CL based on the port.
3007 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
3009 val &= ~CHV_CMN_USEDCLKCHANNEL;
3011 val |= CHV_CMN_USEDCLKCHANNEL;
3012 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
3014 mutex_unlock(&dev_priv->sb_lock);
3017 static void chv_dp_post_pll_disable(struct intel_encoder *encoder)
3019 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3020 enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
3023 mutex_lock(&dev_priv->sb_lock);
3025 /* disable left/right clock distribution */
3026 if (pipe != PIPE_B) {
3027 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
3028 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
3029 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
3031 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
3032 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
3033 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
3036 mutex_unlock(&dev_priv->sb_lock);
3039 * Leave the power down bit cleared for at least one
3040 * lane so that chv_powergate_phy_ch() will power
3041 * on something when the channel is otherwise unused.
3042 * When the port is off and the override is removed
3043 * the lanes power down anyway, so otherwise it doesn't
3044 * really matter what the state of power down bits is
3047 chv_phy_powergate_lanes(encoder, false, 0x0);
3051 * Native read with retry for link status and receiver capability reads for
3052 * cases where the sink may still be asleep.
3054 * Sinks are *supposed* to come up within 1ms from an off state, but we're also
3055 * supposed to retry 3 times per the spec.
3058 intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
3059 void *buffer, size_t size)
3065 * Sometime we just get the same incorrect byte repeated
3066 * over the entire buffer. Doing just one throw away read
3067 * initially seems to "solve" it.
3069 drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
3071 for (i = 0; i < 3; i++) {
3072 ret = drm_dp_dpcd_read(aux, offset, buffer, size);
3082 * Fetch AUX CH registers 0x202 - 0x207 which contain
3083 * link status information
3086 intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
3088 return intel_dp_dpcd_read_wake(&intel_dp->aux,
3091 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
3094 /* These are source-specific values. */
3096 intel_dp_voltage_max(struct intel_dp *intel_dp)
3098 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3099 struct drm_i915_private *dev_priv = dev->dev_private;
3100 enum port port = dp_to_dig_port(intel_dp)->port;
3102 if (IS_BROXTON(dev))
3103 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3104 else if (INTEL_INFO(dev)->gen >= 9) {
3105 if (dev_priv->edp_low_vswing && port == PORT_A)
3106 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3107 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3108 } else if (IS_VALLEYVIEW(dev))
3109 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3110 else if (IS_GEN7(dev) && port == PORT_A)
3111 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3112 else if (HAS_PCH_CPT(dev) && port != PORT_A)
3113 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3115 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3119 intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
3121 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3122 enum port port = dp_to_dig_port(intel_dp)->port;
3124 if (INTEL_INFO(dev)->gen >= 9) {
3125 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3126 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3127 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3128 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3129 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3130 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3131 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3132 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3133 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3135 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3137 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
3138 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3139 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3140 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3141 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3142 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3143 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3144 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3145 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3147 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3149 } else if (IS_VALLEYVIEW(dev)) {
3150 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3151 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3152 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3153 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3154 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3155 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3156 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3157 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3159 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3161 } else if (IS_GEN7(dev) && port == PORT_A) {
3162 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3163 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3164 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3165 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3166 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3167 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3169 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3172 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3173 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3174 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3175 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3176 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3177 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3178 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3179 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3181 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3186 static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
3188 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3189 struct drm_i915_private *dev_priv = dev->dev_private;
3190 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3191 struct intel_crtc *intel_crtc =
3192 to_intel_crtc(dport->base.base.crtc);
3193 unsigned long demph_reg_value, preemph_reg_value,
3194 uniqtranscale_reg_value;
3195 uint8_t train_set = intel_dp->train_set[0];
3196 enum dpio_channel port = vlv_dport_to_channel(dport);
3197 int pipe = intel_crtc->pipe;
3199 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3200 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3201 preemph_reg_value = 0x0004000;
3202 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3203 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3204 demph_reg_value = 0x2B405555;
3205 uniqtranscale_reg_value = 0x552AB83A;
3207 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3208 demph_reg_value = 0x2B404040;
3209 uniqtranscale_reg_value = 0x5548B83A;
3211 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3212 demph_reg_value = 0x2B245555;
3213 uniqtranscale_reg_value = 0x5560B83A;
3215 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3216 demph_reg_value = 0x2B405555;
3217 uniqtranscale_reg_value = 0x5598DA3A;
3223 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3224 preemph_reg_value = 0x0002000;
3225 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3226 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3227 demph_reg_value = 0x2B404040;
3228 uniqtranscale_reg_value = 0x5552B83A;
3230 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3231 demph_reg_value = 0x2B404848;
3232 uniqtranscale_reg_value = 0x5580B83A;
3234 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3235 demph_reg_value = 0x2B404040;
3236 uniqtranscale_reg_value = 0x55ADDA3A;
3242 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3243 preemph_reg_value = 0x0000000;
3244 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3245 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3246 demph_reg_value = 0x2B305555;
3247 uniqtranscale_reg_value = 0x5570B83A;
3249 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3250 demph_reg_value = 0x2B2B4040;
3251 uniqtranscale_reg_value = 0x55ADDA3A;
3257 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3258 preemph_reg_value = 0x0006000;
3259 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3260 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3261 demph_reg_value = 0x1B405555;
3262 uniqtranscale_reg_value = 0x55ADDA3A;
3272 mutex_lock(&dev_priv->sb_lock);
3273 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
3274 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
3275 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
3276 uniqtranscale_reg_value);
3277 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
3278 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
3279 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
3280 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
3281 mutex_unlock(&dev_priv->sb_lock);
3286 static bool chv_need_uniq_trans_scale(uint8_t train_set)
3288 return (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) == DP_TRAIN_PRE_EMPH_LEVEL_0 &&
3289 (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) == DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3292 static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
3294 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3295 struct drm_i915_private *dev_priv = dev->dev_private;
3296 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3297 struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
3298 u32 deemph_reg_value, margin_reg_value, val;
3299 uint8_t train_set = intel_dp->train_set[0];
3300 enum dpio_channel ch = vlv_dport_to_channel(dport);
3301 enum pipe pipe = intel_crtc->pipe;
3304 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3305 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3306 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3307 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3308 deemph_reg_value = 128;
3309 margin_reg_value = 52;
3311 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3312 deemph_reg_value = 128;
3313 margin_reg_value = 77;
3315 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3316 deemph_reg_value = 128;
3317 margin_reg_value = 102;
3319 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3320 deemph_reg_value = 128;
3321 margin_reg_value = 154;
3322 /* FIXME extra to set for 1200 */
3328 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3329 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3330 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3331 deemph_reg_value = 85;
3332 margin_reg_value = 78;
3334 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3335 deemph_reg_value = 85;
3336 margin_reg_value = 116;
3338 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3339 deemph_reg_value = 85;
3340 margin_reg_value = 154;
3346 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3347 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3348 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3349 deemph_reg_value = 64;
3350 margin_reg_value = 104;
3352 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3353 deemph_reg_value = 64;
3354 margin_reg_value = 154;
3360 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3361 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3362 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3363 deemph_reg_value = 43;
3364 margin_reg_value = 154;
3374 mutex_lock(&dev_priv->sb_lock);
3376 /* Clear calc init */
3377 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3378 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3379 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3380 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3381 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3383 if (intel_crtc->config->lane_count > 2) {
3384 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3385 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3386 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3387 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3388 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3391 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3392 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3393 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3394 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3396 if (intel_crtc->config->lane_count > 2) {
3397 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3398 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3399 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3400 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3403 /* Program swing deemph */
3404 for (i = 0; i < intel_crtc->config->lane_count; i++) {
3405 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3406 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3407 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3408 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3411 /* Program swing margin */
3412 for (i = 0; i < intel_crtc->config->lane_count; i++) {
3413 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3415 val &= ~DPIO_SWING_MARGIN000_MASK;
3416 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
3419 * Supposedly this value shouldn't matter when unique transition
3420 * scale is disabled, but in fact it does matter. Let's just
3421 * always program the same value and hope it's OK.
3423 val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3424 val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT;
3426 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3430 * The document said it needs to set bit 27 for ch0 and bit 26
3431 * for ch1. Might be a typo in the doc.
3432 * For now, for this unique transition scale selection, set bit
3433 * 27 for ch0 and ch1.
3435 for (i = 0; i < intel_crtc->config->lane_count; i++) {
3436 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3437 if (chv_need_uniq_trans_scale(train_set))
3438 val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
3440 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3441 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3444 /* Start swing calculation */
3445 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3446 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3447 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3449 if (intel_crtc->config->lane_count > 2) {
3450 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3451 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3452 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3455 mutex_unlock(&dev_priv->sb_lock);
3461 gen4_signal_levels(uint8_t train_set)
3463 uint32_t signal_levels = 0;
3465 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3466 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3468 signal_levels |= DP_VOLTAGE_0_4;
3470 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3471 signal_levels |= DP_VOLTAGE_0_6;
3473 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3474 signal_levels |= DP_VOLTAGE_0_8;
3476 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3477 signal_levels |= DP_VOLTAGE_1_2;
3480 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3481 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3483 signal_levels |= DP_PRE_EMPHASIS_0;
3485 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3486 signal_levels |= DP_PRE_EMPHASIS_3_5;
3488 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3489 signal_levels |= DP_PRE_EMPHASIS_6;
3491 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3492 signal_levels |= DP_PRE_EMPHASIS_9_5;
3495 return signal_levels;
3498 /* Gen6's DP voltage swing and pre-emphasis control */
3500 gen6_edp_signal_levels(uint8_t train_set)
3502 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3503 DP_TRAIN_PRE_EMPHASIS_MASK);
3504 switch (signal_levels) {
3505 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3506 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3507 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3508 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3509 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
3510 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3511 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3512 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
3513 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3514 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3515 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
3516 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3517 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3518 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
3520 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3521 "0x%x\n", signal_levels);
3522 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3526 /* Gen7's DP voltage swing and pre-emphasis control */
3528 gen7_edp_signal_levels(uint8_t train_set)
3530 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3531 DP_TRAIN_PRE_EMPHASIS_MASK);
3532 switch (signal_levels) {
3533 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3534 return EDP_LINK_TRAIN_400MV_0DB_IVB;
3535 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3536 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
3537 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3538 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3540 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3541 return EDP_LINK_TRAIN_600MV_0DB_IVB;
3542 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3543 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3545 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3546 return EDP_LINK_TRAIN_800MV_0DB_IVB;
3547 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3548 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3551 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3552 "0x%x\n", signal_levels);
3553 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3558 intel_dp_set_signal_levels(struct intel_dp *intel_dp)
3560 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3561 enum port port = intel_dig_port->port;
3562 struct drm_device *dev = intel_dig_port->base.base.dev;
3563 struct drm_i915_private *dev_priv = to_i915(dev);
3564 uint32_t signal_levels, mask = 0;
3565 uint8_t train_set = intel_dp->train_set[0];
3568 signal_levels = ddi_signal_levels(intel_dp);
3570 if (IS_BROXTON(dev))
3573 mask = DDI_BUF_EMP_MASK;
3574 } else if (IS_CHERRYVIEW(dev)) {
3575 signal_levels = chv_signal_levels(intel_dp);
3576 } else if (IS_VALLEYVIEW(dev)) {
3577 signal_levels = vlv_signal_levels(intel_dp);
3578 } else if (IS_GEN7(dev) && port == PORT_A) {
3579 signal_levels = gen7_edp_signal_levels(train_set);
3580 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
3581 } else if (IS_GEN6(dev) && port == PORT_A) {
3582 signal_levels = gen6_edp_signal_levels(train_set);
3583 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3585 signal_levels = gen4_signal_levels(train_set);
3586 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3590 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3592 DRM_DEBUG_KMS("Using vswing level %d\n",
3593 train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
3594 DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3595 (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
3596 DP_TRAIN_PRE_EMPHASIS_SHIFT);
3598 intel_dp->DP = (intel_dp->DP & ~mask) | signal_levels;
3600 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
3601 POSTING_READ(intel_dp->output_reg);
3605 intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
3606 uint8_t dp_train_pat)
3608 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3609 struct drm_i915_private *dev_priv =
3610 to_i915(intel_dig_port->base.base.dev);
3612 _intel_dp_set_link_train(intel_dp, &intel_dp->DP, dp_train_pat);
3614 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
3615 POSTING_READ(intel_dp->output_reg);
3618 void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3620 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3621 struct drm_device *dev = intel_dig_port->base.base.dev;
3622 struct drm_i915_private *dev_priv = dev->dev_private;
3623 enum port port = intel_dig_port->port;
3629 val = I915_READ(DP_TP_CTL(port));
3630 val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3631 val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3632 I915_WRITE(DP_TP_CTL(port), val);
3635 * On PORT_A we can have only eDP in SST mode. There the only reason
3636 * we need to set idle transmission mode is to work around a HW issue
3637 * where we enable the pipe while not in idle link-training mode.
3638 * In this case there is requirement to wait for a minimum number of
3639 * idle patterns to be sent.
3644 if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3646 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3650 intel_dp_link_down(struct intel_dp *intel_dp)
3652 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3653 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
3654 enum port port = intel_dig_port->port;
3655 struct drm_device *dev = intel_dig_port->base.base.dev;
3656 struct drm_i915_private *dev_priv = dev->dev_private;
3657 uint32_t DP = intel_dp->DP;
3659 if (WARN_ON(HAS_DDI(dev)))
3662 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
3665 DRM_DEBUG_KMS("\n");
3667 if ((IS_GEN7(dev) && port == PORT_A) ||
3668 (HAS_PCH_CPT(dev) && port != PORT_A)) {
3669 DP &= ~DP_LINK_TRAIN_MASK_CPT;
3670 DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
3672 if (IS_CHERRYVIEW(dev))
3673 DP &= ~DP_LINK_TRAIN_MASK_CHV;
3675 DP &= ~DP_LINK_TRAIN_MASK;
3676 DP |= DP_LINK_TRAIN_PAT_IDLE;
3678 I915_WRITE(intel_dp->output_reg, DP);
3679 POSTING_READ(intel_dp->output_reg);
3681 DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
3682 I915_WRITE(intel_dp->output_reg, DP);
3683 POSTING_READ(intel_dp->output_reg);
3686 * HW workaround for IBX, we need to move the port
3687 * to transcoder A after disabling it to allow the
3688 * matching HDMI port to be enabled on transcoder A.
3690 if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B && port != PORT_A) {
3692 * We get CPU/PCH FIFO underruns on the other pipe when
3693 * doing the workaround. Sweep them under the rug.
3695 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
3696 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
3698 /* always enable with pattern 1 (as per spec) */
3699 DP &= ~(DP_PIPEB_SELECT | DP_LINK_TRAIN_MASK);
3700 DP |= DP_PORT_EN | DP_LINK_TRAIN_PAT_1;
3701 I915_WRITE(intel_dp->output_reg, DP);
3702 POSTING_READ(intel_dp->output_reg);
3705 I915_WRITE(intel_dp->output_reg, DP);
3706 POSTING_READ(intel_dp->output_reg);
3708 intel_wait_for_vblank_if_active(dev_priv->dev, PIPE_A);
3709 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
3710 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
3713 msleep(intel_dp->panel_power_down_delay);
3719 intel_dp_get_dpcd(struct intel_dp *intel_dp)
3721 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3722 struct drm_device *dev = dig_port->base.base.dev;
3723 struct drm_i915_private *dev_priv = dev->dev_private;
3726 if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
3727 sizeof(intel_dp->dpcd)) < 0)
3728 return false; /* aux transfer failed */
3730 DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
3732 if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3733 return false; /* DPCD not present */
3735 /* Check if the panel supports PSR */
3736 memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
3737 if (is_edp(intel_dp)) {
3738 intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
3740 sizeof(intel_dp->psr_dpcd));
3741 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3742 dev_priv->psr.sink_support = true;
3743 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
3746 if (INTEL_INFO(dev)->gen >= 9 &&
3747 (intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
3748 uint8_t frame_sync_cap;
3750 dev_priv->psr.sink_support = true;
3751 intel_dp_dpcd_read_wake(&intel_dp->aux,
3752 DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
3753 &frame_sync_cap, 1);
3754 dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
3755 /* PSR2 needs frame sync as well */
3756 dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
3757 DRM_DEBUG_KMS("PSR2 %s on sink",
3758 dev_priv->psr.psr2_support ? "supported" : "not supported");
3762 DRM_DEBUG_KMS("Display Port TPS3 support: source %s, sink %s\n",
3763 yesno(intel_dp_source_supports_hbr2(intel_dp)),
3764 yesno(drm_dp_tps3_supported(intel_dp->dpcd)));
3766 /* Intermediate frequency support */
3767 if (is_edp(intel_dp) &&
3768 (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
3769 (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
3770 (rev >= 0x03)) { /* eDp v1.4 or higher */
3771 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
3774 intel_dp_dpcd_read_wake(&intel_dp->aux,
3775 DP_SUPPORTED_LINK_RATES,
3777 sizeof(sink_rates));
3779 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
3780 int val = le16_to_cpu(sink_rates[i]);
3785 /* Value read is in kHz while drm clock is saved in deca-kHz */
3786 intel_dp->sink_rates[i] = (val * 200) / 10;
3788 intel_dp->num_sink_rates = i;
3791 intel_dp_print_rates(intel_dp);
3793 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
3794 DP_DWN_STRM_PORT_PRESENT))
3795 return true; /* native DP sink */
3797 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
3798 return true; /* no per-port downstream info */
3800 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
3801 intel_dp->downstream_ports,
3802 DP_MAX_DOWNSTREAM_PORTS) < 0)
3803 return false; /* downstream port status fetch failed */
3809 intel_dp_probe_oui(struct intel_dp *intel_dp)
3813 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
3816 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
3817 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
3818 buf[0], buf[1], buf[2]);
3820 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
3821 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
3822 buf[0], buf[1], buf[2]);
3826 intel_dp_probe_mst(struct intel_dp *intel_dp)
3830 if (!intel_dp->can_mst)
3833 if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
3836 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
3837 if (buf[0] & DP_MST_CAP) {
3838 DRM_DEBUG_KMS("Sink is MST capable\n");
3839 intel_dp->is_mst = true;
3841 DRM_DEBUG_KMS("Sink is not MST capable\n");
3842 intel_dp->is_mst = false;
3846 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
3847 return intel_dp->is_mst;
3850 static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
3852 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3853 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
3857 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
3858 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
3863 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
3864 buf & ~DP_TEST_SINK_START) < 0) {
3865 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
3870 intel_dp->sink_crc.started = false;
3872 hsw_enable_ips(intel_crtc);
3876 static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
3878 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3879 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
3883 if (intel_dp->sink_crc.started) {
3884 ret = intel_dp_sink_crc_stop(intel_dp);
3889 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
3892 if (!(buf & DP_TEST_CRC_SUPPORTED))
3895 intel_dp->sink_crc.last_count = buf & DP_TEST_COUNT_MASK;
3897 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
3900 hsw_disable_ips(intel_crtc);
3902 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
3903 buf | DP_TEST_SINK_START) < 0) {
3904 hsw_enable_ips(intel_crtc);
3908 intel_dp->sink_crc.started = true;
3912 int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
3914 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3915 struct drm_device *dev = dig_port->base.base.dev;
3916 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
3922 ret = intel_dp_sink_crc_start(intel_dp);
3927 intel_wait_for_vblank(dev, intel_crtc->pipe);
3929 if (drm_dp_dpcd_readb(&intel_dp->aux,
3930 DP_TEST_SINK_MISC, &buf) < 0) {
3934 count = buf & DP_TEST_COUNT_MASK;
3937 * Count might be reset during the loop. In this case
3938 * last known count needs to be reset as well.
3941 intel_dp->sink_crc.last_count = 0;
3943 if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) {
3948 old_equal_new = (count == intel_dp->sink_crc.last_count &&
3949 !memcmp(intel_dp->sink_crc.last_crc, crc,
3952 } while (--attempts && (count == 0 || old_equal_new));
3954 intel_dp->sink_crc.last_count = buf & DP_TEST_COUNT_MASK;
3955 memcpy(intel_dp->sink_crc.last_crc, crc, 6 * sizeof(u8));
3957 if (attempts == 0) {
3958 if (old_equal_new) {
3959 DRM_DEBUG_KMS("Unreliable Sink CRC counter: Current returned CRC is identical to the previous one\n");
3961 DRM_ERROR("Panel is unable to calculate any CRC after 6 vblanks\n");
3968 intel_dp_sink_crc_stop(intel_dp);
3973 intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
3975 return intel_dp_dpcd_read_wake(&intel_dp->aux,
3976 DP_DEVICE_SERVICE_IRQ_VECTOR,
3977 sink_irq_vector, 1) == 1;
3981 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
3985 ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
3987 sink_irq_vector, 14);
3994 static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
3996 uint8_t test_result = DP_TEST_ACK;
4000 static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
4002 uint8_t test_result = DP_TEST_NAK;
4006 static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
4008 uint8_t test_result = DP_TEST_NAK;
4009 struct intel_connector *intel_connector = intel_dp->attached_connector;
4010 struct drm_connector *connector = &intel_connector->base;
4012 if (intel_connector->detect_edid == NULL ||
4013 connector->edid_corrupt ||
4014 intel_dp->aux.i2c_defer_count > 6) {
4015 /* Check EDID read for NACKs, DEFERs and corruption
4016 * (DP CTS 1.2 Core r1.1)
4017 * 4.2.2.4 : Failed EDID read, I2C_NAK
4018 * 4.2.2.5 : Failed EDID read, I2C_DEFER
4019 * 4.2.2.6 : EDID corruption detected
4020 * Use failsafe mode for all cases
4022 if (intel_dp->aux.i2c_nack_count > 0 ||
4023 intel_dp->aux.i2c_defer_count > 0)
4024 DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
4025 intel_dp->aux.i2c_nack_count,
4026 intel_dp->aux.i2c_defer_count);
4027 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_FAILSAFE;
4029 struct edid *block = intel_connector->detect_edid;
4031 /* We have to write the checksum
4032 * of the last block read
4034 block += intel_connector->detect_edid->extensions;
4036 if (!drm_dp_dpcd_write(&intel_dp->aux,
4037 DP_TEST_EDID_CHECKSUM,
4040 DRM_DEBUG_KMS("Failed to write EDID checksum\n");
4042 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
4043 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_STANDARD;
4046 /* Set test active flag here so userspace doesn't interrupt things */
4047 intel_dp->compliance_test_active = 1;
4052 static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
4054 uint8_t test_result = DP_TEST_NAK;
4058 static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
4060 uint8_t response = DP_TEST_NAK;
4064 intel_dp->compliance_test_active = 0;
4065 intel_dp->compliance_test_type = 0;
4066 intel_dp->compliance_test_data = 0;
4068 intel_dp->aux.i2c_nack_count = 0;
4069 intel_dp->aux.i2c_defer_count = 0;
4071 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_REQUEST, &rxdata, 1);
4073 DRM_DEBUG_KMS("Could not read test request from sink\n");
4078 case DP_TEST_LINK_TRAINING:
4079 DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
4080 intel_dp->compliance_test_type = DP_TEST_LINK_TRAINING;
4081 response = intel_dp_autotest_link_training(intel_dp);
4083 case DP_TEST_LINK_VIDEO_PATTERN:
4084 DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
4085 intel_dp->compliance_test_type = DP_TEST_LINK_VIDEO_PATTERN;
4086 response = intel_dp_autotest_video_pattern(intel_dp);
4088 case DP_TEST_LINK_EDID_READ:
4089 DRM_DEBUG_KMS("EDID test requested\n");
4090 intel_dp->compliance_test_type = DP_TEST_LINK_EDID_READ;
4091 response = intel_dp_autotest_edid(intel_dp);
4093 case DP_TEST_LINK_PHY_TEST_PATTERN:
4094 DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
4095 intel_dp->compliance_test_type = DP_TEST_LINK_PHY_TEST_PATTERN;
4096 response = intel_dp_autotest_phy_pattern(intel_dp);
4099 DRM_DEBUG_KMS("Invalid test request '%02x'\n", rxdata);
4104 status = drm_dp_dpcd_write(&intel_dp->aux,
4108 DRM_DEBUG_KMS("Could not write test response to sink\n");
4112 intel_dp_check_mst_status(struct intel_dp *intel_dp)
4116 if (intel_dp->is_mst) {
4121 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4125 /* check link status - esi[10] = 0x200c */
4126 if (intel_dp->active_mst_links &&
4127 !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
4128 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4129 intel_dp_start_link_train(intel_dp);
4130 intel_dp_stop_link_train(intel_dp);
4133 DRM_DEBUG_KMS("got esi %3ph\n", esi);
4134 ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
4137 for (retry = 0; retry < 3; retry++) {
4139 wret = drm_dp_dpcd_write(&intel_dp->aux,
4140 DP_SINK_COUNT_ESI+1,
4147 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4149 DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
4157 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4158 DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4159 intel_dp->is_mst = false;
4160 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4161 /* send a hotplug event */
4162 drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
4169 * According to DP spec
4172 * 2. Configure link according to Receiver Capabilities
4173 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
4174 * 4. Check link status on receipt of hot-plug interrupt
4177 intel_dp_check_link_status(struct intel_dp *intel_dp)
4179 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4180 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4182 u8 link_status[DP_LINK_STATUS_SIZE];
4184 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4186 if (!intel_encoder->base.crtc)
4189 if (!to_intel_crtc(intel_encoder->base.crtc)->active)
4192 /* Try to read receiver status if the link appears to be up */
4193 if (!intel_dp_get_link_status(intel_dp, link_status)) {
4197 /* Now read the DPCD to see if it's actually running */
4198 if (!intel_dp_get_dpcd(intel_dp)) {
4202 /* Try to read the source of the interrupt */
4203 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4204 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4205 /* Clear interrupt source */
4206 drm_dp_dpcd_writeb(&intel_dp->aux,
4207 DP_DEVICE_SERVICE_IRQ_VECTOR,
4210 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4211 DRM_DEBUG_DRIVER("Test request in short pulse not handled\n");
4212 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4213 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4216 if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
4217 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
4218 intel_encoder->base.name);
4219 intel_dp_start_link_train(intel_dp);
4220 intel_dp_stop_link_train(intel_dp);
4224 /* XXX this is probably wrong for multiple downstream ports */
4225 static enum drm_connector_status
4226 intel_dp_detect_dpcd(struct intel_dp *intel_dp)
4228 uint8_t *dpcd = intel_dp->dpcd;
4231 if (!intel_dp_get_dpcd(intel_dp))
4232 return connector_status_disconnected;
4234 /* if there's no downstream port, we're done */
4235 if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
4236 return connector_status_connected;
4238 /* If we're HPD-aware, SINK_COUNT changes dynamically */
4239 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4240 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
4243 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
4245 return connector_status_unknown;
4247 return DP_GET_SINK_COUNT(reg) ? connector_status_connected
4248 : connector_status_disconnected;
4251 /* If no HPD, poke DDC gently */
4252 if (drm_probe_ddc(&intel_dp->aux.ddc))
4253 return connector_status_connected;
4255 /* Well we tried, say unknown for unreliable port types */
4256 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4257 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4258 if (type == DP_DS_PORT_TYPE_VGA ||
4259 type == DP_DS_PORT_TYPE_NON_EDID)
4260 return connector_status_unknown;
4262 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4263 DP_DWN_STRM_PORT_TYPE_MASK;
4264 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4265 type == DP_DWN_STRM_PORT_TYPE_OTHER)
4266 return connector_status_unknown;
4269 /* Anything else is out of spec, warn and ignore */
4270 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
4271 return connector_status_disconnected;
4274 static enum drm_connector_status
4275 edp_detect(struct intel_dp *intel_dp)
4277 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4278 enum drm_connector_status status;
4280 status = intel_panel_detect(dev);
4281 if (status == connector_status_unknown)
4282 status = connector_status_connected;
4287 static bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
4288 struct intel_digital_port *port)
4292 switch (port->port) {
4296 bit = SDE_PORTB_HOTPLUG;
4299 bit = SDE_PORTC_HOTPLUG;
4302 bit = SDE_PORTD_HOTPLUG;
4305 MISSING_CASE(port->port);
4309 return I915_READ(SDEISR) & bit;
4312 static bool cpt_digital_port_connected(struct drm_i915_private *dev_priv,
4313 struct intel_digital_port *port)
4317 switch (port->port) {
4321 bit = SDE_PORTB_HOTPLUG_CPT;
4324 bit = SDE_PORTC_HOTPLUG_CPT;
4327 bit = SDE_PORTD_HOTPLUG_CPT;
4330 bit = SDE_PORTE_HOTPLUG_SPT;
4333 MISSING_CASE(port->port);
4337 return I915_READ(SDEISR) & bit;
4340 static bool g4x_digital_port_connected(struct drm_i915_private *dev_priv,
4341 struct intel_digital_port *port)
4345 switch (port->port) {
4347 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4350 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4353 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4356 MISSING_CASE(port->port);
4360 return I915_READ(PORT_HOTPLUG_STAT) & bit;
4363 static bool vlv_digital_port_connected(struct drm_i915_private *dev_priv,
4364 struct intel_digital_port *port)
4368 switch (port->port) {
4370 bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
4373 bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
4376 bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
4379 MISSING_CASE(port->port);
4383 return I915_READ(PORT_HOTPLUG_STAT) & bit;
4386 static bool bxt_digital_port_connected(struct drm_i915_private *dev_priv,
4387 struct intel_digital_port *intel_dig_port)
4389 struct intel_encoder *intel_encoder = &intel_dig_port->base;
4393 intel_hpd_pin_to_port(intel_encoder->hpd_pin, &port);
4396 bit = BXT_DE_PORT_HP_DDIA;
4399 bit = BXT_DE_PORT_HP_DDIB;
4402 bit = BXT_DE_PORT_HP_DDIC;
4409 return I915_READ(GEN8_DE_PORT_ISR) & bit;
4413 * intel_digital_port_connected - is the specified port connected?
4414 * @dev_priv: i915 private structure
4415 * @port: the port to test
4417 * Return %true if @port is connected, %false otherwise.
4419 bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
4420 struct intel_digital_port *port)
4422 if (HAS_PCH_IBX(dev_priv))
4423 return ibx_digital_port_connected(dev_priv, port);
4424 if (HAS_PCH_SPLIT(dev_priv))
4425 return cpt_digital_port_connected(dev_priv, port);
4426 else if (IS_BROXTON(dev_priv))
4427 return bxt_digital_port_connected(dev_priv, port);
4428 else if (IS_VALLEYVIEW(dev_priv))
4429 return vlv_digital_port_connected(dev_priv, port);
4431 return g4x_digital_port_connected(dev_priv, port);
4434 static enum drm_connector_status
4435 ironlake_dp_detect(struct intel_dp *intel_dp)
4437 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4438 struct drm_i915_private *dev_priv = dev->dev_private;
4439 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4441 if (!intel_digital_port_connected(dev_priv, intel_dig_port))
4442 return connector_status_disconnected;
4444 return intel_dp_detect_dpcd(intel_dp);
4447 static enum drm_connector_status
4448 g4x_dp_detect(struct intel_dp *intel_dp)
4450 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4451 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4453 /* Can't disconnect eDP, but you can close the lid... */
4454 if (is_edp(intel_dp)) {
4455 enum drm_connector_status status;
4457 status = intel_panel_detect(dev);
4458 if (status == connector_status_unknown)
4459 status = connector_status_connected;
4463 if (!intel_digital_port_connected(dev->dev_private, intel_dig_port))
4464 return connector_status_disconnected;
4466 return intel_dp_detect_dpcd(intel_dp);
4469 static struct edid *
4470 intel_dp_get_edid(struct intel_dp *intel_dp)
4472 struct intel_connector *intel_connector = intel_dp->attached_connector;
4474 /* use cached edid if we have one */
4475 if (intel_connector->edid) {
4477 if (IS_ERR(intel_connector->edid))
4480 return drm_edid_duplicate(intel_connector->edid);
4482 return drm_get_edid(&intel_connector->base,
4483 &intel_dp->aux.ddc);
4487 intel_dp_set_edid(struct intel_dp *intel_dp)
4489 struct intel_connector *intel_connector = intel_dp->attached_connector;
4492 edid = intel_dp_get_edid(intel_dp);
4493 intel_connector->detect_edid = edid;
4495 if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4496 intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4498 intel_dp->has_audio = drm_detect_monitor_audio(edid);
4502 intel_dp_unset_edid(struct intel_dp *intel_dp)
4504 struct intel_connector *intel_connector = intel_dp->attached_connector;
4506 kfree(intel_connector->detect_edid);
4507 intel_connector->detect_edid = NULL;
4509 intel_dp->has_audio = false;
4512 static enum intel_display_power_domain
4513 intel_dp_power_get(struct intel_dp *dp)
4515 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4516 enum intel_display_power_domain power_domain;
4518 power_domain = intel_display_port_power_domain(encoder);
4519 intel_display_power_get(to_i915(encoder->base.dev), power_domain);
4521 return power_domain;
4525 intel_dp_power_put(struct intel_dp *dp,
4526 enum intel_display_power_domain power_domain)
4528 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4529 intel_display_power_put(to_i915(encoder->base.dev), power_domain);
4532 static enum drm_connector_status
4533 intel_dp_detect(struct drm_connector *connector, bool force)
4535 struct intel_dp *intel_dp = intel_attached_dp(connector);
4536 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4537 struct intel_encoder *intel_encoder = &intel_dig_port->base;
4538 struct drm_device *dev = connector->dev;
4539 enum drm_connector_status status;
4540 enum intel_display_power_domain power_domain;
4544 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4545 connector->base.id, connector->name);
4546 intel_dp_unset_edid(intel_dp);
4548 if (intel_dp->is_mst) {
4549 /* MST devices are disconnected from a monitor POV */
4550 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4551 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4552 return connector_status_disconnected;
4555 power_domain = intel_dp_power_get(intel_dp);
4557 /* Can't disconnect eDP, but you can close the lid... */
4558 if (is_edp(intel_dp))
4559 status = edp_detect(intel_dp);
4560 else if (HAS_PCH_SPLIT(dev))
4561 status = ironlake_dp_detect(intel_dp);
4563 status = g4x_dp_detect(intel_dp);
4564 if (status != connector_status_connected)
4567 intel_dp_probe_oui(intel_dp);
4569 ret = intel_dp_probe_mst(intel_dp);
4571 /* if we are in MST mode then this connector
4572 won't appear connected or have anything with EDID on it */
4573 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4574 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4575 status = connector_status_disconnected;
4579 intel_dp_set_edid(intel_dp);
4581 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4582 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4583 status = connector_status_connected;
4585 /* Try to read the source of the interrupt */
4586 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4587 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4588 /* Clear interrupt source */
4589 drm_dp_dpcd_writeb(&intel_dp->aux,
4590 DP_DEVICE_SERVICE_IRQ_VECTOR,
4593 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4594 intel_dp_handle_test_request(intel_dp);
4595 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4596 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4600 intel_dp_power_put(intel_dp, power_domain);
4605 intel_dp_force(struct drm_connector *connector)
4607 struct intel_dp *intel_dp = intel_attached_dp(connector);
4608 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4609 enum intel_display_power_domain power_domain;
4611 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4612 connector->base.id, connector->name);
4613 intel_dp_unset_edid(intel_dp);
4615 if (connector->status != connector_status_connected)
4618 power_domain = intel_dp_power_get(intel_dp);
4620 intel_dp_set_edid(intel_dp);
4622 intel_dp_power_put(intel_dp, power_domain);
4624 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4625 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4628 static int intel_dp_get_modes(struct drm_connector *connector)
4630 struct intel_connector *intel_connector = to_intel_connector(connector);
4633 edid = intel_connector->detect_edid;
4635 int ret = intel_connector_update_modes(connector, edid);
4640 /* if eDP has no EDID, fall back to fixed mode */
4641 if (is_edp(intel_attached_dp(connector)) &&
4642 intel_connector->panel.fixed_mode) {
4643 struct drm_display_mode *mode;
4645 mode = drm_mode_duplicate(connector->dev,
4646 intel_connector->panel.fixed_mode);
4648 drm_mode_probed_add(connector, mode);
4657 intel_dp_detect_audio(struct drm_connector *connector)
4659 bool has_audio = false;
4662 edid = to_intel_connector(connector)->detect_edid;
4664 has_audio = drm_detect_monitor_audio(edid);
4670 intel_dp_set_property(struct drm_connector *connector,
4671 struct drm_property *property,
4674 struct drm_i915_private *dev_priv = connector->dev->dev_private;
4675 struct intel_connector *intel_connector = to_intel_connector(connector);
4676 struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4677 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4680 ret = drm_object_property_set_value(&connector->base, property, val);
4684 if (property == dev_priv->force_audio_property) {
4688 if (i == intel_dp->force_audio)
4691 intel_dp->force_audio = i;
4693 if (i == HDMI_AUDIO_AUTO)
4694 has_audio = intel_dp_detect_audio(connector);
4696 has_audio = (i == HDMI_AUDIO_ON);
4698 if (has_audio == intel_dp->has_audio)
4701 intel_dp->has_audio = has_audio;
4705 if (property == dev_priv->broadcast_rgb_property) {
4706 bool old_auto = intel_dp->color_range_auto;
4707 bool old_range = intel_dp->limited_color_range;
4710 case INTEL_BROADCAST_RGB_AUTO:
4711 intel_dp->color_range_auto = true;
4713 case INTEL_BROADCAST_RGB_FULL:
4714 intel_dp->color_range_auto = false;
4715 intel_dp->limited_color_range = false;
4717 case INTEL_BROADCAST_RGB_LIMITED:
4718 intel_dp->color_range_auto = false;
4719 intel_dp->limited_color_range = true;
4725 if (old_auto == intel_dp->color_range_auto &&
4726 old_range == intel_dp->limited_color_range)
4732 if (is_edp(intel_dp) &&
4733 property == connector->dev->mode_config.scaling_mode_property) {
4734 if (val == DRM_MODE_SCALE_NONE) {
4735 DRM_DEBUG_KMS("no scaling not supported\n");
4739 if (intel_connector->panel.fitting_mode == val) {
4740 /* the eDP scaling property is not changed */
4743 intel_connector->panel.fitting_mode = val;
4751 if (intel_encoder->base.crtc)
4752 intel_crtc_restore_mode(intel_encoder->base.crtc);
4758 intel_dp_connector_destroy(struct drm_connector *connector)
4760 struct intel_connector *intel_connector = to_intel_connector(connector);
4762 kfree(intel_connector->detect_edid);
4764 if (!IS_ERR_OR_NULL(intel_connector->edid))
4765 kfree(intel_connector->edid);
4767 /* Can't call is_edp() since the encoder may have been destroyed
4769 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4770 intel_panel_fini(&intel_connector->panel);
4772 drm_connector_cleanup(connector);
4776 void intel_dp_encoder_destroy(struct drm_encoder *encoder)
4778 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4779 struct intel_dp *intel_dp = &intel_dig_port->dp;
4781 drm_dp_aux_unregister(&intel_dp->aux);
4782 intel_dp_mst_encoder_cleanup(intel_dig_port);
4783 if (is_edp(intel_dp)) {
4784 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4786 * vdd might still be enabled do to the delayed vdd off.
4787 * Make sure vdd is actually turned off here.
4790 edp_panel_vdd_off_sync(intel_dp);
4791 pps_unlock(intel_dp);
4793 if (intel_dp->edp_notifier.notifier_call) {
4794 unregister_reboot_notifier(&intel_dp->edp_notifier);
4795 intel_dp->edp_notifier.notifier_call = NULL;
4798 drm_encoder_cleanup(encoder);
4799 kfree(intel_dig_port);
4802 static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4804 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4806 if (!is_edp(intel_dp))
4810 * vdd might still be enabled do to the delayed vdd off.
4811 * Make sure vdd is actually turned off here.
4813 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4815 edp_panel_vdd_off_sync(intel_dp);
4816 pps_unlock(intel_dp);
4819 static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
4821 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4822 struct drm_device *dev = intel_dig_port->base.base.dev;
4823 struct drm_i915_private *dev_priv = dev->dev_private;
4824 enum intel_display_power_domain power_domain;
4826 lockdep_assert_held(&dev_priv->pps_mutex);
4828 if (!edp_have_panel_vdd(intel_dp))
4832 * The VDD bit needs a power domain reference, so if the bit is
4833 * already enabled when we boot or resume, grab this reference and
4834 * schedule a vdd off, so we don't hold on to the reference
4837 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
4838 power_domain = intel_display_port_power_domain(&intel_dig_port->base);
4839 intel_display_power_get(dev_priv, power_domain);
4841 edp_panel_vdd_schedule_off(intel_dp);
4844 static void intel_dp_encoder_reset(struct drm_encoder *encoder)
4846 struct intel_dp *intel_dp;
4848 if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
4851 intel_dp = enc_to_intel_dp(encoder);
4856 * Read out the current power sequencer assignment,
4857 * in case the BIOS did something with it.
4859 if (IS_VALLEYVIEW(encoder->dev))
4860 vlv_initial_power_sequencer_setup(intel_dp);
4862 intel_edp_panel_vdd_sanitize(intel_dp);
4864 pps_unlock(intel_dp);
4867 static const struct drm_connector_funcs intel_dp_connector_funcs = {
4868 .dpms = drm_atomic_helper_connector_dpms,
4869 .detect = intel_dp_detect,
4870 .force = intel_dp_force,
4871 .fill_modes = drm_helper_probe_single_connector_modes,
4872 .set_property = intel_dp_set_property,
4873 .atomic_get_property = intel_connector_atomic_get_property,
4874 .destroy = intel_dp_connector_destroy,
4875 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
4876 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
4879 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
4880 .get_modes = intel_dp_get_modes,
4881 .mode_valid = intel_dp_mode_valid,
4882 .best_encoder = intel_best_encoder,
4885 static const struct drm_encoder_funcs intel_dp_enc_funcs = {
4886 .reset = intel_dp_encoder_reset,
4887 .destroy = intel_dp_encoder_destroy,
4891 intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4893 struct intel_dp *intel_dp = &intel_dig_port->dp;
4894 struct intel_encoder *intel_encoder = &intel_dig_port->base;
4895 struct drm_device *dev = intel_dig_port->base.base.dev;
4896 struct drm_i915_private *dev_priv = dev->dev_private;
4897 enum intel_display_power_domain power_domain;
4898 enum irqreturn ret = IRQ_NONE;
4900 if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
4901 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
4903 if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
4905 * vdd off can generate a long pulse on eDP which
4906 * would require vdd on to handle it, and thus we
4907 * would end up in an endless cycle of
4908 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
4910 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
4911 port_name(intel_dig_port->port));
4915 DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
4916 port_name(intel_dig_port->port),
4917 long_hpd ? "long" : "short");
4919 power_domain = intel_display_port_power_domain(intel_encoder);
4920 intel_display_power_get(dev_priv, power_domain);
4923 /* indicate that we need to restart link training */
4924 intel_dp->train_set_valid = false;
4926 if (!intel_digital_port_connected(dev_priv, intel_dig_port))
4929 if (!intel_dp_get_dpcd(intel_dp)) {
4933 intel_dp_probe_oui(intel_dp);
4935 if (!intel_dp_probe_mst(intel_dp)) {
4936 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
4937 intel_dp_check_link_status(intel_dp);
4938 drm_modeset_unlock(&dev->mode_config.connection_mutex);
4942 if (intel_dp->is_mst) {
4943 if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
4947 if (!intel_dp->is_mst) {
4948 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
4949 intel_dp_check_link_status(intel_dp);
4950 drm_modeset_unlock(&dev->mode_config.connection_mutex);
4958 /* if we were in MST mode, and device is not there get out of MST mode */
4959 if (intel_dp->is_mst) {
4960 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
4961 intel_dp->is_mst = false;
4962 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4965 intel_display_power_put(dev_priv, power_domain);
4970 /* Return which DP Port should be selected for Transcoder DP control */
4972 intel_trans_dp_port_sel(struct drm_crtc *crtc)
4974 struct drm_device *dev = crtc->dev;
4975 struct intel_encoder *intel_encoder;
4976 struct intel_dp *intel_dp;
4978 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
4979 intel_dp = enc_to_intel_dp(&intel_encoder->base);
4981 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
4982 intel_encoder->type == INTEL_OUTPUT_EDP)
4983 return intel_dp->output_reg;
4989 /* check the VBT to see whether the eDP is on another port */
4990 bool intel_dp_is_edp(struct drm_device *dev, enum port port)
4992 struct drm_i915_private *dev_priv = dev->dev_private;
4993 union child_device_config *p_child;
4995 static const short port_mapping[] = {
4996 [PORT_B] = DVO_PORT_DPB,
4997 [PORT_C] = DVO_PORT_DPC,
4998 [PORT_D] = DVO_PORT_DPD,
4999 [PORT_E] = DVO_PORT_DPE,
5003 * eDP not supported on g4x. so bail out early just
5004 * for a bit extra safety in case the VBT is bonkers.
5006 if (INTEL_INFO(dev)->gen < 5)
5012 if (!dev_priv->vbt.child_dev_num)
5015 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
5016 p_child = dev_priv->vbt.child_dev + i;
5018 if (p_child->common.dvo_port == port_mapping[port] &&
5019 (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
5020 (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
5027 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
5029 struct intel_connector *intel_connector = to_intel_connector(connector);
5031 intel_attach_force_audio_property(connector);
5032 intel_attach_broadcast_rgb_property(connector);
5033 intel_dp->color_range_auto = true;
5035 if (is_edp(intel_dp)) {
5036 drm_mode_create_scaling_mode_property(connector->dev);
5037 drm_object_attach_property(
5039 connector->dev->mode_config.scaling_mode_property,
5040 DRM_MODE_SCALE_ASPECT);
5041 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
5045 static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
5047 intel_dp->last_power_cycle = jiffies;
5048 intel_dp->last_power_on = jiffies;
5049 intel_dp->last_backlight_off = jiffies;
5053 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
5054 struct intel_dp *intel_dp)
5056 struct drm_i915_private *dev_priv = dev->dev_private;
5057 struct edp_power_seq cur, vbt, spec,
5058 *final = &intel_dp->pps_delays;
5059 u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0;
5060 int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg = 0;
5062 lockdep_assert_held(&dev_priv->pps_mutex);
5064 /* already initialized? */
5065 if (final->t11_t12 != 0)
5068 if (IS_BROXTON(dev)) {
5070 * TODO: BXT has 2 sets of PPS registers.
5071 * Correct Register for Broxton need to be identified
5072 * using VBT. hardcoding for now
5074 pp_ctrl_reg = BXT_PP_CONTROL(0);
5075 pp_on_reg = BXT_PP_ON_DELAYS(0);
5076 pp_off_reg = BXT_PP_OFF_DELAYS(0);
5077 } else if (HAS_PCH_SPLIT(dev)) {
5078 pp_ctrl_reg = PCH_PP_CONTROL;
5079 pp_on_reg = PCH_PP_ON_DELAYS;
5080 pp_off_reg = PCH_PP_OFF_DELAYS;
5081 pp_div_reg = PCH_PP_DIVISOR;
5083 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5085 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
5086 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5087 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5088 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
5091 /* Workaround: Need to write PP_CONTROL with the unlock key as
5092 * the very first thing. */
5093 pp_ctl = ironlake_get_pp_control(intel_dp);
5095 pp_on = I915_READ(pp_on_reg);
5096 pp_off = I915_READ(pp_off_reg);
5097 if (!IS_BROXTON(dev)) {
5098 I915_WRITE(pp_ctrl_reg, pp_ctl);
5099 pp_div = I915_READ(pp_div_reg);
5102 /* Pull timing values out of registers */
5103 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
5104 PANEL_POWER_UP_DELAY_SHIFT;
5106 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
5107 PANEL_LIGHT_ON_DELAY_SHIFT;
5109 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
5110 PANEL_LIGHT_OFF_DELAY_SHIFT;
5112 cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
5113 PANEL_POWER_DOWN_DELAY_SHIFT;
5115 if (IS_BROXTON(dev)) {
5116 u16 tmp = (pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
5117 BXT_POWER_CYCLE_DELAY_SHIFT;
5119 cur.t11_t12 = (tmp - 1) * 1000;
5123 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
5124 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
5127 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5128 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
5130 vbt = dev_priv->vbt.edp_pps;
5132 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
5133 * our hw here, which are all in 100usec. */
5134 spec.t1_t3 = 210 * 10;
5135 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
5136 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
5137 spec.t10 = 500 * 10;
5138 /* This one is special and actually in units of 100ms, but zero
5139 * based in the hw (so we need to add 100 ms). But the sw vbt
5140 * table multiplies it with 1000 to make it in units of 100usec,
5142 spec.t11_t12 = (510 + 100) * 10;
5144 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5145 vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
5147 /* Use the max of the register settings and vbt. If both are
5148 * unset, fall back to the spec limits. */
5149 #define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
5151 max(cur.field, vbt.field))
5152 assign_final(t1_t3);
5156 assign_final(t11_t12);
5159 #define get_delay(field) (DIV_ROUND_UP(final->field, 10))
5160 intel_dp->panel_power_up_delay = get_delay(t1_t3);
5161 intel_dp->backlight_on_delay = get_delay(t8);
5162 intel_dp->backlight_off_delay = get_delay(t9);
5163 intel_dp->panel_power_down_delay = get_delay(t10);
5164 intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
5167 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
5168 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
5169 intel_dp->panel_power_cycle_delay);
5171 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
5172 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
5176 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
5177 struct intel_dp *intel_dp)
5179 struct drm_i915_private *dev_priv = dev->dev_private;
5180 u32 pp_on, pp_off, pp_div, port_sel = 0;
5181 int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
5182 int pp_on_reg, pp_off_reg, pp_div_reg = 0, pp_ctrl_reg;
5183 enum port port = dp_to_dig_port(intel_dp)->port;
5184 const struct edp_power_seq *seq = &intel_dp->pps_delays;
5186 lockdep_assert_held(&dev_priv->pps_mutex);
5188 if (IS_BROXTON(dev)) {
5190 * TODO: BXT has 2 sets of PPS registers.
5191 * Correct Register for Broxton need to be identified
5192 * using VBT. hardcoding for now
5194 pp_ctrl_reg = BXT_PP_CONTROL(0);
5195 pp_on_reg = BXT_PP_ON_DELAYS(0);
5196 pp_off_reg = BXT_PP_OFF_DELAYS(0);
5198 } else if (HAS_PCH_SPLIT(dev)) {
5199 pp_on_reg = PCH_PP_ON_DELAYS;
5200 pp_off_reg = PCH_PP_OFF_DELAYS;
5201 pp_div_reg = PCH_PP_DIVISOR;
5203 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5205 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5206 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5207 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
5211 * And finally store the new values in the power sequencer. The
5212 * backlight delays are set to 1 because we do manual waits on them. For
5213 * T8, even BSpec recommends doing it. For T9, if we don't do this,
5214 * we'll end up waiting for the backlight off delay twice: once when we
5215 * do the manual sleep, and once when we disable the panel and wait for
5216 * the PP_STATUS bit to become zero.
5218 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
5219 (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
5220 pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
5221 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
5222 /* Compute the divisor for the pp clock, simply match the Bspec
5224 if (IS_BROXTON(dev)) {
5225 pp_div = I915_READ(pp_ctrl_reg);
5226 pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
5227 pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000)
5228 << BXT_POWER_CYCLE_DELAY_SHIFT);
5230 pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
5231 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
5232 << PANEL_POWER_CYCLE_DELAY_SHIFT);
5235 /* Haswell doesn't have any port selection bits for the panel
5236 * power sequencer any more. */
5237 if (IS_VALLEYVIEW(dev)) {
5238 port_sel = PANEL_PORT_SELECT_VLV(port);
5239 } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
5241 port_sel = PANEL_PORT_SELECT_DPA;
5243 port_sel = PANEL_PORT_SELECT_DPD;
5248 I915_WRITE(pp_on_reg, pp_on);
5249 I915_WRITE(pp_off_reg, pp_off);
5250 if (IS_BROXTON(dev))
5251 I915_WRITE(pp_ctrl_reg, pp_div);
5253 I915_WRITE(pp_div_reg, pp_div);
5255 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
5256 I915_READ(pp_on_reg),
5257 I915_READ(pp_off_reg),
5259 (I915_READ(pp_ctrl_reg) & BXT_POWER_CYCLE_DELAY_MASK) :
5260 I915_READ(pp_div_reg));
5264 * intel_dp_set_drrs_state - program registers for RR switch to take effect
5266 * @refresh_rate: RR to be programmed
5268 * This function gets called when refresh rate (RR) has to be changed from
5269 * one frequency to another. Switches can be between high and low RR
5270 * supported by the panel or to any other RR based on media playback (in
5271 * this case, RR value needs to be passed from user space).
5273 * The caller of this function needs to take a lock on dev_priv->drrs.
5275 static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
5277 struct drm_i915_private *dev_priv = dev->dev_private;
5278 struct intel_encoder *encoder;
5279 struct intel_digital_port *dig_port = NULL;
5280 struct intel_dp *intel_dp = dev_priv->drrs.dp;
5281 struct intel_crtc_state *config = NULL;
5282 struct intel_crtc *intel_crtc = NULL;
5283 enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
5285 if (refresh_rate <= 0) {
5286 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
5290 if (intel_dp == NULL) {
5291 DRM_DEBUG_KMS("DRRS not supported.\n");
5296 * FIXME: This needs proper synchronization with psr state for some
5297 * platforms that cannot have PSR and DRRS enabled at the same time.
5300 dig_port = dp_to_dig_port(intel_dp);
5301 encoder = &dig_port->base;
5302 intel_crtc = to_intel_crtc(encoder->base.crtc);
5305 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5309 config = intel_crtc->config;
5311 if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
5312 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5316 if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
5318 index = DRRS_LOW_RR;
5320 if (index == dev_priv->drrs.refresh_rate_type) {
5322 "DRRS requested for previously set RR...ignoring\n");
5326 if (!intel_crtc->active) {
5327 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5331 if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
5334 intel_dp_set_m_n(intel_crtc, M1_N1);
5337 intel_dp_set_m_n(intel_crtc, M2_N2);
5341 DRM_ERROR("Unsupported refreshrate type\n");
5343 } else if (INTEL_INFO(dev)->gen > 6) {
5344 u32 reg = PIPECONF(intel_crtc->config->cpu_transcoder);
5347 val = I915_READ(reg);
5348 if (index > DRRS_HIGH_RR) {
5349 if (IS_VALLEYVIEW(dev))
5350 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5352 val |= PIPECONF_EDP_RR_MODE_SWITCH;
5354 if (IS_VALLEYVIEW(dev))
5355 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5357 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
5359 I915_WRITE(reg, val);
5362 dev_priv->drrs.refresh_rate_type = index;
5364 DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5368 * intel_edp_drrs_enable - init drrs struct if supported
5369 * @intel_dp: DP struct
5371 * Initializes frontbuffer_bits and drrs.dp
5373 void intel_edp_drrs_enable(struct intel_dp *intel_dp)
5375 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5376 struct drm_i915_private *dev_priv = dev->dev_private;
5377 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5378 struct drm_crtc *crtc = dig_port->base.base.crtc;
5379 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5381 if (!intel_crtc->config->has_drrs) {
5382 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5386 mutex_lock(&dev_priv->drrs.mutex);
5387 if (WARN_ON(dev_priv->drrs.dp)) {
5388 DRM_ERROR("DRRS already enabled\n");
5392 dev_priv->drrs.busy_frontbuffer_bits = 0;
5394 dev_priv->drrs.dp = intel_dp;
5397 mutex_unlock(&dev_priv->drrs.mutex);
5401 * intel_edp_drrs_disable - Disable DRRS
5402 * @intel_dp: DP struct
5405 void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5407 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5408 struct drm_i915_private *dev_priv = dev->dev_private;
5409 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5410 struct drm_crtc *crtc = dig_port->base.base.crtc;
5411 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5413 if (!intel_crtc->config->has_drrs)
5416 mutex_lock(&dev_priv->drrs.mutex);
5417 if (!dev_priv->drrs.dp) {
5418 mutex_unlock(&dev_priv->drrs.mutex);
5422 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5423 intel_dp_set_drrs_state(dev_priv->dev,
5424 intel_dp->attached_connector->panel.
5425 fixed_mode->vrefresh);
5427 dev_priv->drrs.dp = NULL;
5428 mutex_unlock(&dev_priv->drrs.mutex);
5430 cancel_delayed_work_sync(&dev_priv->drrs.work);
5433 static void intel_edp_drrs_downclock_work(struct work_struct *work)
5435 struct drm_i915_private *dev_priv =
5436 container_of(work, typeof(*dev_priv), drrs.work.work);
5437 struct intel_dp *intel_dp;
5439 mutex_lock(&dev_priv->drrs.mutex);
5441 intel_dp = dev_priv->drrs.dp;
5447 * The delayed work can race with an invalidate hence we need to
5451 if (dev_priv->drrs.busy_frontbuffer_bits)
5454 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5455 intel_dp_set_drrs_state(dev_priv->dev,
5456 intel_dp->attached_connector->panel.
5457 downclock_mode->vrefresh);
5460 mutex_unlock(&dev_priv->drrs.mutex);
5464 * intel_edp_drrs_invalidate - Disable Idleness DRRS
5466 * @frontbuffer_bits: frontbuffer plane tracking bits
5468 * This function gets called everytime rendering on the given planes start.
5469 * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
5471 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5473 void intel_edp_drrs_invalidate(struct drm_device *dev,
5474 unsigned frontbuffer_bits)
5476 struct drm_i915_private *dev_priv = dev->dev_private;
5477 struct drm_crtc *crtc;
5480 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5483 cancel_delayed_work(&dev_priv->drrs.work);
5485 mutex_lock(&dev_priv->drrs.mutex);
5486 if (!dev_priv->drrs.dp) {
5487 mutex_unlock(&dev_priv->drrs.mutex);
5491 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5492 pipe = to_intel_crtc(crtc)->pipe;
5494 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5495 dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5497 /* invalidate means busy screen hence upclock */
5498 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5499 intel_dp_set_drrs_state(dev_priv->dev,
5500 dev_priv->drrs.dp->attached_connector->panel.
5501 fixed_mode->vrefresh);
5503 mutex_unlock(&dev_priv->drrs.mutex);
5507 * intel_edp_drrs_flush - Restart Idleness DRRS
5509 * @frontbuffer_bits: frontbuffer plane tracking bits
5511 * This function gets called every time rendering on the given planes has
5512 * completed or flip on a crtc is completed. So DRRS should be upclocked
5513 * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
5514 * if no other planes are dirty.
5516 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5518 void intel_edp_drrs_flush(struct drm_device *dev,
5519 unsigned frontbuffer_bits)
5521 struct drm_i915_private *dev_priv = dev->dev_private;
5522 struct drm_crtc *crtc;
5525 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5528 cancel_delayed_work(&dev_priv->drrs.work);
5530 mutex_lock(&dev_priv->drrs.mutex);
5531 if (!dev_priv->drrs.dp) {
5532 mutex_unlock(&dev_priv->drrs.mutex);
5536 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5537 pipe = to_intel_crtc(crtc)->pipe;
5539 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5540 dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5542 /* flush means busy screen hence upclock */
5543 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5544 intel_dp_set_drrs_state(dev_priv->dev,
5545 dev_priv->drrs.dp->attached_connector->panel.
5546 fixed_mode->vrefresh);
5549 * flush also means no more activity hence schedule downclock, if all
5550 * other fbs are quiescent too
5552 if (!dev_priv->drrs.busy_frontbuffer_bits)
5553 schedule_delayed_work(&dev_priv->drrs.work,
5554 msecs_to_jiffies(1000));
5555 mutex_unlock(&dev_priv->drrs.mutex);
5559 * DOC: Display Refresh Rate Switching (DRRS)
5561 * Display Refresh Rate Switching (DRRS) is a power conservation feature
5562 * which enables swtching between low and high refresh rates,
5563 * dynamically, based on the usage scenario. This feature is applicable
5564 * for internal panels.
5566 * Indication that the panel supports DRRS is given by the panel EDID, which
5567 * would list multiple refresh rates for one resolution.
5569 * DRRS is of 2 types - static and seamless.
5570 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5571 * (may appear as a blink on screen) and is used in dock-undock scenario.
5572 * Seamless DRRS involves changing RR without any visual effect to the user
5573 * and can be used during normal system usage. This is done by programming
5574 * certain registers.
5576 * Support for static/seamless DRRS may be indicated in the VBT based on
5577 * inputs from the panel spec.
5579 * DRRS saves power by switching to low RR based on usage scenarios.
5582 * The implementation is based on frontbuffer tracking implementation.
5583 * When there is a disturbance on the screen triggered by user activity or a
5584 * periodic system activity, DRRS is disabled (RR is changed to high RR).
5585 * When there is no movement on screen, after a timeout of 1 second, a switch
5586 * to low RR is made.
5587 * For integration with frontbuffer tracking code,
5588 * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5590 * DRRS can be further extended to support other internal panels and also
5591 * the scenario of video playback wherein RR is set based on the rate
5592 * requested by userspace.
5596 * intel_dp_drrs_init - Init basic DRRS work and mutex.
5597 * @intel_connector: eDP connector
5598 * @fixed_mode: preferred mode of panel
5600 * This function is called only once at driver load to initialize basic
5604 * Downclock mode if panel supports it, else return NULL.
5605 * DRRS support is determined by the presence of downclock mode (apart
5606 * from VBT setting).
5608 static struct drm_display_mode *
5609 intel_dp_drrs_init(struct intel_connector *intel_connector,
5610 struct drm_display_mode *fixed_mode)
5612 struct drm_connector *connector = &intel_connector->base;
5613 struct drm_device *dev = connector->dev;
5614 struct drm_i915_private *dev_priv = dev->dev_private;
5615 struct drm_display_mode *downclock_mode = NULL;
5617 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5618 mutex_init(&dev_priv->drrs.mutex);
5620 if (INTEL_INFO(dev)->gen <= 6) {
5621 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5625 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
5626 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
5630 downclock_mode = intel_find_panel_downclock
5631 (dev, fixed_mode, connector);
5633 if (!downclock_mode) {
5634 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
5638 dev_priv->drrs.type = dev_priv->vbt.drrs_type;
5640 dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
5641 DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
5642 return downclock_mode;
5645 static bool intel_edp_init_connector(struct intel_dp *intel_dp,
5646 struct intel_connector *intel_connector)
5648 struct drm_connector *connector = &intel_connector->base;
5649 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
5650 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5651 struct drm_device *dev = intel_encoder->base.dev;
5652 struct drm_i915_private *dev_priv = dev->dev_private;
5653 struct drm_display_mode *fixed_mode = NULL;
5654 struct drm_display_mode *downclock_mode = NULL;
5656 struct drm_display_mode *scan;
5658 enum pipe pipe = INVALID_PIPE;
5660 if (!is_edp(intel_dp))
5664 intel_edp_panel_vdd_sanitize(intel_dp);
5665 pps_unlock(intel_dp);
5667 /* Cache DPCD and EDID for edp. */
5668 has_dpcd = intel_dp_get_dpcd(intel_dp);
5671 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
5672 dev_priv->no_aux_handshake =
5673 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
5674 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
5676 /* if this fails, presume the device is a ghost */
5677 DRM_INFO("failed to retrieve link info, disabling eDP\n");
5681 /* We now know it's not a ghost, init power sequence regs. */
5683 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
5684 pps_unlock(intel_dp);
5686 mutex_lock(&dev->mode_config.mutex);
5687 edid = drm_get_edid(connector, &intel_dp->aux.ddc);
5689 if (drm_add_edid_modes(connector, edid)) {
5690 drm_mode_connector_update_edid_property(connector,
5692 drm_edid_to_eld(connector, edid);
5695 edid = ERR_PTR(-EINVAL);
5698 edid = ERR_PTR(-ENOENT);
5700 intel_connector->edid = edid;
5702 /* prefer fixed mode from EDID if available */
5703 list_for_each_entry(scan, &connector->probed_modes, head) {
5704 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5705 fixed_mode = drm_mode_duplicate(dev, scan);
5706 downclock_mode = intel_dp_drrs_init(
5707 intel_connector, fixed_mode);
5712 /* fallback to VBT if available for eDP */
5713 if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5714 fixed_mode = drm_mode_duplicate(dev,
5715 dev_priv->vbt.lfp_lvds_vbt_mode);
5717 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5719 mutex_unlock(&dev->mode_config.mutex);
5721 if (IS_VALLEYVIEW(dev)) {
5722 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5723 register_reboot_notifier(&intel_dp->edp_notifier);
5726 * Figure out the current pipe for the initial backlight setup.
5727 * If the current pipe isn't valid, try the PPS pipe, and if that
5728 * fails just assume pipe A.
5730 if (IS_CHERRYVIEW(dev))
5731 pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5733 pipe = PORT_TO_PIPE(intel_dp->DP);
5735 if (pipe != PIPE_A && pipe != PIPE_B)
5736 pipe = intel_dp->pps_pipe;
5738 if (pipe != PIPE_A && pipe != PIPE_B)
5741 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5745 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
5746 intel_connector->panel.backlight.power = intel_edp_backlight_power;
5747 intel_panel_setup_backlight(connector, pipe);
5753 intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5754 struct intel_connector *intel_connector)
5756 struct drm_connector *connector = &intel_connector->base;
5757 struct intel_dp *intel_dp = &intel_dig_port->dp;
5758 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5759 struct drm_device *dev = intel_encoder->base.dev;
5760 struct drm_i915_private *dev_priv = dev->dev_private;
5761 enum port port = intel_dig_port->port;
5764 intel_dp->pps_pipe = INVALID_PIPE;
5766 /* intel_dp vfuncs */
5767 if (INTEL_INFO(dev)->gen >= 9)
5768 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
5769 else if (IS_VALLEYVIEW(dev))
5770 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
5771 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
5772 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
5773 else if (HAS_PCH_SPLIT(dev))
5774 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
5776 intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
5778 if (INTEL_INFO(dev)->gen >= 9)
5779 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
5781 intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
5784 intel_dp->prepare_link_retrain = intel_ddi_prepare_link_retrain;
5786 /* Preserve the current hw state. */
5787 intel_dp->DP = I915_READ(intel_dp->output_reg);
5788 intel_dp->attached_connector = intel_connector;
5790 if (intel_dp_is_edp(dev, port))
5791 type = DRM_MODE_CONNECTOR_eDP;
5793 type = DRM_MODE_CONNECTOR_DisplayPort;
5796 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
5797 * for DP the encoder type can be set by the caller to
5798 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
5800 if (type == DRM_MODE_CONNECTOR_eDP)
5801 intel_encoder->type = INTEL_OUTPUT_EDP;
5803 /* eDP only on port B and/or C on vlv/chv */
5804 if (WARN_ON(IS_VALLEYVIEW(dev) && is_edp(intel_dp) &&
5805 port != PORT_B && port != PORT_C))
5808 DRM_DEBUG_KMS("Adding %s connector on port %c\n",
5809 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
5812 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
5813 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
5815 connector->interlace_allowed = true;
5816 connector->doublescan_allowed = 0;
5818 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
5819 edp_panel_vdd_work);
5821 intel_connector_attach_encoder(intel_connector, intel_encoder);
5822 drm_connector_register(connector);
5825 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
5827 intel_connector->get_hw_state = intel_connector_get_hw_state;
5828 intel_connector->unregister = intel_dp_connector_unregister;
5830 /* Set up the hotplug pin. */
5833 intel_encoder->hpd_pin = HPD_PORT_A;
5836 intel_encoder->hpd_pin = HPD_PORT_B;
5837 if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
5838 intel_encoder->hpd_pin = HPD_PORT_A;
5841 intel_encoder->hpd_pin = HPD_PORT_C;
5844 intel_encoder->hpd_pin = HPD_PORT_D;
5847 intel_encoder->hpd_pin = HPD_PORT_E;
5853 if (is_edp(intel_dp)) {
5855 intel_dp_init_panel_power_timestamps(intel_dp);
5856 if (IS_VALLEYVIEW(dev))
5857 vlv_initial_power_sequencer_setup(intel_dp);
5859 intel_dp_init_panel_power_sequencer(dev, intel_dp);
5860 pps_unlock(intel_dp);
5863 intel_dp_aux_init(intel_dp, intel_connector);
5865 /* init MST on ports that can support it */
5866 if (HAS_DP_MST(dev) &&
5867 (port == PORT_B || port == PORT_C || port == PORT_D))
5868 intel_dp_mst_encoder_init(intel_dig_port,
5869 intel_connector->base.base.id);
5871 if (!intel_edp_init_connector(intel_dp, intel_connector)) {
5872 drm_dp_aux_unregister(&intel_dp->aux);
5873 if (is_edp(intel_dp)) {
5874 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5876 * vdd might still be enabled do to the delayed vdd off.
5877 * Make sure vdd is actually turned off here.
5880 edp_panel_vdd_off_sync(intel_dp);
5881 pps_unlock(intel_dp);
5883 drm_connector_unregister(connector);
5884 drm_connector_cleanup(connector);
5888 intel_dp_add_properties(intel_dp, connector);
5890 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
5891 * 0xd. Failure to do so will result in spurious interrupts being
5892 * generated on the port when a cable is not attached.
5894 if (IS_G4X(dev) && !IS_GM45(dev)) {
5895 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
5896 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
5899 i915_debugfs_connector_add(connector);
5905 intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
5907 struct drm_i915_private *dev_priv = dev->dev_private;
5908 struct intel_digital_port *intel_dig_port;
5909 struct intel_encoder *intel_encoder;
5910 struct drm_encoder *encoder;
5911 struct intel_connector *intel_connector;
5913 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
5914 if (!intel_dig_port)
5917 intel_connector = intel_connector_alloc();
5918 if (!intel_connector)
5919 goto err_connector_alloc;
5921 intel_encoder = &intel_dig_port->base;
5922 encoder = &intel_encoder->base;
5924 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
5925 DRM_MODE_ENCODER_TMDS);
5927 intel_encoder->compute_config = intel_dp_compute_config;
5928 intel_encoder->disable = intel_disable_dp;
5929 intel_encoder->get_hw_state = intel_dp_get_hw_state;
5930 intel_encoder->get_config = intel_dp_get_config;
5931 intel_encoder->suspend = intel_dp_encoder_suspend;
5932 if (IS_CHERRYVIEW(dev)) {
5933 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
5934 intel_encoder->pre_enable = chv_pre_enable_dp;
5935 intel_encoder->enable = vlv_enable_dp;
5936 intel_encoder->post_disable = chv_post_disable_dp;
5937 intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
5938 } else if (IS_VALLEYVIEW(dev)) {
5939 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
5940 intel_encoder->pre_enable = vlv_pre_enable_dp;
5941 intel_encoder->enable = vlv_enable_dp;
5942 intel_encoder->post_disable = vlv_post_disable_dp;
5944 intel_encoder->pre_enable = g4x_pre_enable_dp;
5945 intel_encoder->enable = g4x_enable_dp;
5946 if (INTEL_INFO(dev)->gen >= 5)
5947 intel_encoder->post_disable = ilk_post_disable_dp;
5950 intel_dig_port->port = port;
5951 intel_dig_port->dp.output_reg = output_reg;
5953 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
5954 if (IS_CHERRYVIEW(dev)) {
5956 intel_encoder->crtc_mask = 1 << 2;
5958 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
5960 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
5962 intel_encoder->cloneable = 0;
5964 intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
5965 dev_priv->hotplug.irq_port[port] = intel_dig_port;
5967 if (!intel_dp_init_connector(intel_dig_port, intel_connector))
5968 goto err_init_connector;
5973 drm_encoder_cleanup(encoder);
5974 kfree(intel_connector);
5975 err_connector_alloc:
5976 kfree(intel_dig_port);
5981 void intel_dp_mst_suspend(struct drm_device *dev)
5983 struct drm_i915_private *dev_priv = dev->dev_private;
5987 for (i = 0; i < I915_MAX_PORTS; i++) {
5988 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
5989 if (!intel_dig_port)
5992 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
5993 if (!intel_dig_port->dp.can_mst)
5995 if (intel_dig_port->dp.is_mst)
5996 drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
6001 void intel_dp_mst_resume(struct drm_device *dev)
6003 struct drm_i915_private *dev_priv = dev->dev_private;
6006 for (i = 0; i < I915_MAX_PORTS; i++) {
6007 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
6008 if (!intel_dig_port)
6010 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6013 if (!intel_dig_port->dp.can_mst)
6016 ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
6018 intel_dp_check_mst_status(&intel_dig_port->dp);