drm/doc: Appease sphinx
[cascardo/linux.git] / drivers / gpu / drm / i915 / intel_dp.c
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Keith Packard <keithp@keithp.com>
25  *
26  */
27
28 #include <linux/i2c.h>
29 #include <linux/slab.h>
30 #include <linux/export.h>
31 #include <linux/notifier.h>
32 #include <linux/reboot.h>
33 #include <drm/drmP.h>
34 #include <drm/drm_atomic_helper.h>
35 #include <drm/drm_crtc.h>
36 #include <drm/drm_crtc_helper.h>
37 #include <drm/drm_edid.h>
38 #include "intel_drv.h"
39 #include <drm/i915_drm.h>
40 #include "i915_drv.h"
41
42 #define DP_LINK_CHECK_TIMEOUT   (10 * 1000)
43
44 /* Compliance test status bits  */
45 #define INTEL_DP_RESOLUTION_SHIFT_MASK  0
46 #define INTEL_DP_RESOLUTION_PREFERRED   (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
47 #define INTEL_DP_RESOLUTION_STANDARD    (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
48 #define INTEL_DP_RESOLUTION_FAILSAFE    (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
49
50 struct dp_link_dpll {
51         int clock;
52         struct dpll dpll;
53 };
54
55 static const struct dp_link_dpll gen4_dpll[] = {
56         { 162000,
57                 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
58         { 270000,
59                 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
60 };
61
62 static const struct dp_link_dpll pch_dpll[] = {
63         { 162000,
64                 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
65         { 270000,
66                 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
67 };
68
69 static const struct dp_link_dpll vlv_dpll[] = {
70         { 162000,
71                 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
72         { 270000,
73                 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
74 };
75
76 /*
77  * CHV supports eDP 1.4 that have  more link rates.
78  * Below only provides the fixed rate but exclude variable rate.
79  */
80 static const struct dp_link_dpll chv_dpll[] = {
81         /*
82          * CHV requires to program fractional division for m2.
83          * m2 is stored in fixed point format using formula below
84          * (m2_int << 22) | m2_fraction
85          */
86         { 162000,       /* m2_int = 32, m2_fraction = 1677722 */
87                 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
88         { 270000,       /* m2_int = 27, m2_fraction = 0 */
89                 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
90         { 540000,       /* m2_int = 27, m2_fraction = 0 */
91                 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
92 };
93
94 static const int bxt_rates[] = { 162000, 216000, 243000, 270000,
95                                   324000, 432000, 540000 };
96 static const int skl_rates[] = { 162000, 216000, 270000,
97                                   324000, 432000, 540000 };
98 static const int default_rates[] = { 162000, 270000, 540000 };
99
100 /**
101  * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
102  * @intel_dp: DP struct
103  *
104  * If a CPU or PCH DP output is attached to an eDP panel, this function
105  * will return true, and false otherwise.
106  */
107 static bool is_edp(struct intel_dp *intel_dp)
108 {
109         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
110
111         return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
112 }
113
114 static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
115 {
116         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
117
118         return intel_dig_port->base.base.dev;
119 }
120
121 static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
122 {
123         return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
124 }
125
126 static void intel_dp_link_down(struct intel_dp *intel_dp);
127 static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
128 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
129 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
130 static void vlv_steal_power_sequencer(struct drm_device *dev,
131                                       enum pipe pipe);
132 static void intel_dp_unset_edid(struct intel_dp *intel_dp);
133
134 static int
135 intel_dp_max_link_bw(struct intel_dp  *intel_dp)
136 {
137         int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
138
139         switch (max_link_bw) {
140         case DP_LINK_BW_1_62:
141         case DP_LINK_BW_2_7:
142         case DP_LINK_BW_5_4:
143                 break;
144         default:
145                 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
146                      max_link_bw);
147                 max_link_bw = DP_LINK_BW_1_62;
148                 break;
149         }
150         return max_link_bw;
151 }
152
153 static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
154 {
155         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
156         u8 source_max, sink_max;
157
158         source_max = intel_dig_port->max_lanes;
159         sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
160
161         return min(source_max, sink_max);
162 }
163
164 /*
165  * The units on the numbers in the next two are... bizarre.  Examples will
166  * make it clearer; this one parallels an example in the eDP spec.
167  *
168  * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
169  *
170  *     270000 * 1 * 8 / 10 == 216000
171  *
172  * The actual data capacity of that configuration is 2.16Gbit/s, so the
173  * units are decakilobits.  ->clock in a drm_display_mode is in kilohertz -
174  * or equivalently, kilopixels per second - so for 1680x1050R it'd be
175  * 119000.  At 18bpp that's 2142000 kilobits per second.
176  *
177  * Thus the strange-looking division by 10 in intel_dp_link_required, to
178  * get the result in decakilobits instead of kilobits.
179  */
180
181 static int
182 intel_dp_link_required(int pixel_clock, int bpp)
183 {
184         return (pixel_clock * bpp + 9) / 10;
185 }
186
187 static int
188 intel_dp_max_data_rate(int max_link_clock, int max_lanes)
189 {
190         return (max_link_clock * max_lanes * 8) / 10;
191 }
192
193 static enum drm_mode_status
194 intel_dp_mode_valid(struct drm_connector *connector,
195                     struct drm_display_mode *mode)
196 {
197         struct intel_dp *intel_dp = intel_attached_dp(connector);
198         struct intel_connector *intel_connector = to_intel_connector(connector);
199         struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
200         int target_clock = mode->clock;
201         int max_rate, mode_rate, max_lanes, max_link_clock;
202         int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
203
204         if (is_edp(intel_dp) && fixed_mode) {
205                 if (mode->hdisplay > fixed_mode->hdisplay)
206                         return MODE_PANEL;
207
208                 if (mode->vdisplay > fixed_mode->vdisplay)
209                         return MODE_PANEL;
210
211                 target_clock = fixed_mode->clock;
212         }
213
214         max_link_clock = intel_dp_max_link_rate(intel_dp);
215         max_lanes = intel_dp_max_lane_count(intel_dp);
216
217         max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
218         mode_rate = intel_dp_link_required(target_clock, 18);
219
220         if (mode_rate > max_rate || target_clock > max_dotclk)
221                 return MODE_CLOCK_HIGH;
222
223         if (mode->clock < 10000)
224                 return MODE_CLOCK_LOW;
225
226         if (mode->flags & DRM_MODE_FLAG_DBLCLK)
227                 return MODE_H_ILLEGAL;
228
229         return MODE_OK;
230 }
231
232 uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
233 {
234         int     i;
235         uint32_t v = 0;
236
237         if (src_bytes > 4)
238                 src_bytes = 4;
239         for (i = 0; i < src_bytes; i++)
240                 v |= ((uint32_t) src[i]) << ((3-i) * 8);
241         return v;
242 }
243
244 static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
245 {
246         int i;
247         if (dst_bytes > 4)
248                 dst_bytes = 4;
249         for (i = 0; i < dst_bytes; i++)
250                 dst[i] = src >> ((3-i) * 8);
251 }
252
253 static void
254 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
255                                     struct intel_dp *intel_dp);
256 static void
257 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
258                                               struct intel_dp *intel_dp);
259
260 static void pps_lock(struct intel_dp *intel_dp)
261 {
262         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
263         struct intel_encoder *encoder = &intel_dig_port->base;
264         struct drm_device *dev = encoder->base.dev;
265         struct drm_i915_private *dev_priv = dev->dev_private;
266         enum intel_display_power_domain power_domain;
267
268         /*
269          * See vlv_power_sequencer_reset() why we need
270          * a power domain reference here.
271          */
272         power_domain = intel_display_port_aux_power_domain(encoder);
273         intel_display_power_get(dev_priv, power_domain);
274
275         mutex_lock(&dev_priv->pps_mutex);
276 }
277
278 static void pps_unlock(struct intel_dp *intel_dp)
279 {
280         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
281         struct intel_encoder *encoder = &intel_dig_port->base;
282         struct drm_device *dev = encoder->base.dev;
283         struct drm_i915_private *dev_priv = dev->dev_private;
284         enum intel_display_power_domain power_domain;
285
286         mutex_unlock(&dev_priv->pps_mutex);
287
288         power_domain = intel_display_port_aux_power_domain(encoder);
289         intel_display_power_put(dev_priv, power_domain);
290 }
291
292 static void
293 vlv_power_sequencer_kick(struct intel_dp *intel_dp)
294 {
295         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
296         struct drm_device *dev = intel_dig_port->base.base.dev;
297         struct drm_i915_private *dev_priv = dev->dev_private;
298         enum pipe pipe = intel_dp->pps_pipe;
299         bool pll_enabled, release_cl_override = false;
300         enum dpio_phy phy = DPIO_PHY(pipe);
301         enum dpio_channel ch = vlv_pipe_to_channel(pipe);
302         uint32_t DP;
303
304         if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
305                  "skipping pipe %c power seqeuncer kick due to port %c being active\n",
306                  pipe_name(pipe), port_name(intel_dig_port->port)))
307                 return;
308
309         DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
310                       pipe_name(pipe), port_name(intel_dig_port->port));
311
312         /* Preserve the BIOS-computed detected bit. This is
313          * supposed to be read-only.
314          */
315         DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
316         DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
317         DP |= DP_PORT_WIDTH(1);
318         DP |= DP_LINK_TRAIN_PAT_1;
319
320         if (IS_CHERRYVIEW(dev))
321                 DP |= DP_PIPE_SELECT_CHV(pipe);
322         else if (pipe == PIPE_B)
323                 DP |= DP_PIPEB_SELECT;
324
325         pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
326
327         /*
328          * The DPLL for the pipe must be enabled for this to work.
329          * So enable temporarily it if it's not already enabled.
330          */
331         if (!pll_enabled) {
332                 release_cl_override = IS_CHERRYVIEW(dev) &&
333                         !chv_phy_powergate_ch(dev_priv, phy, ch, true);
334
335                 if (vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
336                                      &chv_dpll[0].dpll : &vlv_dpll[0].dpll)) {
337                         DRM_ERROR("Failed to force on pll for pipe %c!\n",
338                                   pipe_name(pipe));
339                         return;
340                 }
341         }
342
343         /*
344          * Similar magic as in intel_dp_enable_port().
345          * We _must_ do this port enable + disable trick
346          * to make this power seqeuencer lock onto the port.
347          * Otherwise even VDD force bit won't work.
348          */
349         I915_WRITE(intel_dp->output_reg, DP);
350         POSTING_READ(intel_dp->output_reg);
351
352         I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
353         POSTING_READ(intel_dp->output_reg);
354
355         I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
356         POSTING_READ(intel_dp->output_reg);
357
358         if (!pll_enabled) {
359                 vlv_force_pll_off(dev, pipe);
360
361                 if (release_cl_override)
362                         chv_phy_powergate_ch(dev_priv, phy, ch, false);
363         }
364 }
365
366 static enum pipe
367 vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
368 {
369         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
370         struct drm_device *dev = intel_dig_port->base.base.dev;
371         struct drm_i915_private *dev_priv = dev->dev_private;
372         struct intel_encoder *encoder;
373         unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
374         enum pipe pipe;
375
376         lockdep_assert_held(&dev_priv->pps_mutex);
377
378         /* We should never land here with regular DP ports */
379         WARN_ON(!is_edp(intel_dp));
380
381         if (intel_dp->pps_pipe != INVALID_PIPE)
382                 return intel_dp->pps_pipe;
383
384         /*
385          * We don't have power sequencer currently.
386          * Pick one that's not used by other ports.
387          */
388         for_each_intel_encoder(dev, encoder) {
389                 struct intel_dp *tmp;
390
391                 if (encoder->type != INTEL_OUTPUT_EDP)
392                         continue;
393
394                 tmp = enc_to_intel_dp(&encoder->base);
395
396                 if (tmp->pps_pipe != INVALID_PIPE)
397                         pipes &= ~(1 << tmp->pps_pipe);
398         }
399
400         /*
401          * Didn't find one. This should not happen since there
402          * are two power sequencers and up to two eDP ports.
403          */
404         if (WARN_ON(pipes == 0))
405                 pipe = PIPE_A;
406         else
407                 pipe = ffs(pipes) - 1;
408
409         vlv_steal_power_sequencer(dev, pipe);
410         intel_dp->pps_pipe = pipe;
411
412         DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
413                       pipe_name(intel_dp->pps_pipe),
414                       port_name(intel_dig_port->port));
415
416         /* init power sequencer on this pipe and port */
417         intel_dp_init_panel_power_sequencer(dev, intel_dp);
418         intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
419
420         /*
421          * Even vdd force doesn't work until we've made
422          * the power sequencer lock in on the port.
423          */
424         vlv_power_sequencer_kick(intel_dp);
425
426         return intel_dp->pps_pipe;
427 }
428
429 typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
430                                enum pipe pipe);
431
432 static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
433                                enum pipe pipe)
434 {
435         return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
436 }
437
438 static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
439                                 enum pipe pipe)
440 {
441         return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
442 }
443
444 static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
445                          enum pipe pipe)
446 {
447         return true;
448 }
449
450 static enum pipe
451 vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
452                      enum port port,
453                      vlv_pipe_check pipe_check)
454 {
455         enum pipe pipe;
456
457         for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
458                 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
459                         PANEL_PORT_SELECT_MASK;
460
461                 if (port_sel != PANEL_PORT_SELECT_VLV(port))
462                         continue;
463
464                 if (!pipe_check(dev_priv, pipe))
465                         continue;
466
467                 return pipe;
468         }
469
470         return INVALID_PIPE;
471 }
472
473 static void
474 vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
475 {
476         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
477         struct drm_device *dev = intel_dig_port->base.base.dev;
478         struct drm_i915_private *dev_priv = dev->dev_private;
479         enum port port = intel_dig_port->port;
480
481         lockdep_assert_held(&dev_priv->pps_mutex);
482
483         /* try to find a pipe with this port selected */
484         /* first pick one where the panel is on */
485         intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
486                                                   vlv_pipe_has_pp_on);
487         /* didn't find one? pick one where vdd is on */
488         if (intel_dp->pps_pipe == INVALID_PIPE)
489                 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
490                                                           vlv_pipe_has_vdd_on);
491         /* didn't find one? pick one with just the correct port */
492         if (intel_dp->pps_pipe == INVALID_PIPE)
493                 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
494                                                           vlv_pipe_any);
495
496         /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
497         if (intel_dp->pps_pipe == INVALID_PIPE) {
498                 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
499                               port_name(port));
500                 return;
501         }
502
503         DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
504                       port_name(port), pipe_name(intel_dp->pps_pipe));
505
506         intel_dp_init_panel_power_sequencer(dev, intel_dp);
507         intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
508 }
509
510 void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
511 {
512         struct drm_device *dev = dev_priv->dev;
513         struct intel_encoder *encoder;
514
515         if (WARN_ON(!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)))
516                 return;
517
518         /*
519          * We can't grab pps_mutex here due to deadlock with power_domain
520          * mutex when power_domain functions are called while holding pps_mutex.
521          * That also means that in order to use pps_pipe the code needs to
522          * hold both a power domain reference and pps_mutex, and the power domain
523          * reference get/put must be done while _not_ holding pps_mutex.
524          * pps_{lock,unlock}() do these steps in the correct order, so one
525          * should use them always.
526          */
527
528         for_each_intel_encoder(dev, encoder) {
529                 struct intel_dp *intel_dp;
530
531                 if (encoder->type != INTEL_OUTPUT_EDP)
532                         continue;
533
534                 intel_dp = enc_to_intel_dp(&encoder->base);
535                 intel_dp->pps_pipe = INVALID_PIPE;
536         }
537 }
538
539 static i915_reg_t
540 _pp_ctrl_reg(struct intel_dp *intel_dp)
541 {
542         struct drm_device *dev = intel_dp_to_dev(intel_dp);
543
544         if (IS_BROXTON(dev))
545                 return BXT_PP_CONTROL(0);
546         else if (HAS_PCH_SPLIT(dev))
547                 return PCH_PP_CONTROL;
548         else
549                 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
550 }
551
552 static i915_reg_t
553 _pp_stat_reg(struct intel_dp *intel_dp)
554 {
555         struct drm_device *dev = intel_dp_to_dev(intel_dp);
556
557         if (IS_BROXTON(dev))
558                 return BXT_PP_STATUS(0);
559         else if (HAS_PCH_SPLIT(dev))
560                 return PCH_PP_STATUS;
561         else
562                 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
563 }
564
565 /* Reboot notifier handler to shutdown panel power to guarantee T12 timing
566    This function only applicable when panel PM state is not to be tracked */
567 static int edp_notify_handler(struct notifier_block *this, unsigned long code,
568                               void *unused)
569 {
570         struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
571                                                  edp_notifier);
572         struct drm_device *dev = intel_dp_to_dev(intel_dp);
573         struct drm_i915_private *dev_priv = dev->dev_private;
574
575         if (!is_edp(intel_dp) || code != SYS_RESTART)
576                 return 0;
577
578         pps_lock(intel_dp);
579
580         if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
581                 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
582                 i915_reg_t pp_ctrl_reg, pp_div_reg;
583                 u32 pp_div;
584
585                 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
586                 pp_div_reg  = VLV_PIPE_PP_DIVISOR(pipe);
587                 pp_div = I915_READ(pp_div_reg);
588                 pp_div &= PP_REFERENCE_DIVIDER_MASK;
589
590                 /* 0x1F write to PP_DIV_REG sets max cycle delay */
591                 I915_WRITE(pp_div_reg, pp_div | 0x1F);
592                 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
593                 msleep(intel_dp->panel_power_cycle_delay);
594         }
595
596         pps_unlock(intel_dp);
597
598         return 0;
599 }
600
601 static bool edp_have_panel_power(struct intel_dp *intel_dp)
602 {
603         struct drm_device *dev = intel_dp_to_dev(intel_dp);
604         struct drm_i915_private *dev_priv = dev->dev_private;
605
606         lockdep_assert_held(&dev_priv->pps_mutex);
607
608         if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
609             intel_dp->pps_pipe == INVALID_PIPE)
610                 return false;
611
612         return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
613 }
614
615 static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
616 {
617         struct drm_device *dev = intel_dp_to_dev(intel_dp);
618         struct drm_i915_private *dev_priv = dev->dev_private;
619
620         lockdep_assert_held(&dev_priv->pps_mutex);
621
622         if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
623             intel_dp->pps_pipe == INVALID_PIPE)
624                 return false;
625
626         return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
627 }
628
629 static void
630 intel_dp_check_edp(struct intel_dp *intel_dp)
631 {
632         struct drm_device *dev = intel_dp_to_dev(intel_dp);
633         struct drm_i915_private *dev_priv = dev->dev_private;
634
635         if (!is_edp(intel_dp))
636                 return;
637
638         if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
639                 WARN(1, "eDP powered off while attempting aux channel communication.\n");
640                 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
641                               I915_READ(_pp_stat_reg(intel_dp)),
642                               I915_READ(_pp_ctrl_reg(intel_dp)));
643         }
644 }
645
646 static uint32_t
647 intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
648 {
649         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
650         struct drm_device *dev = intel_dig_port->base.base.dev;
651         struct drm_i915_private *dev_priv = dev->dev_private;
652         i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg;
653         uint32_t status;
654         bool done;
655
656 #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
657         if (has_aux_irq)
658                 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
659                                           msecs_to_jiffies_timeout(10));
660         else
661                 done = wait_for_atomic(C, 10) == 0;
662         if (!done)
663                 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
664                           has_aux_irq);
665 #undef C
666
667         return status;
668 }
669
670 static uint32_t g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
671 {
672         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
673         struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
674
675         if (index)
676                 return 0;
677
678         /*
679          * The clock divider is based off the hrawclk, and would like to run at
680          * 2MHz.  So, take the hrawclk value and divide by 2000 and use that
681          */
682         return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000);
683 }
684
685 static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
686 {
687         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
688         struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
689
690         if (index)
691                 return 0;
692
693         /*
694          * The clock divider is based off the cdclk or PCH rawclk, and would
695          * like to run at 2MHz.  So, take the cdclk or PCH rawclk value and
696          * divide by 2000 and use that
697          */
698         if (intel_dig_port->port == PORT_A)
699                 return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
700         else
701                 return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000);
702 }
703
704 static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
705 {
706         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
707         struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
708
709         if (intel_dig_port->port != PORT_A && HAS_PCH_LPT_H(dev_priv)) {
710                 /* Workaround for non-ULT HSW */
711                 switch (index) {
712                 case 0: return 63;
713                 case 1: return 72;
714                 default: return 0;
715                 }
716         }
717
718         return ilk_get_aux_clock_divider(intel_dp, index);
719 }
720
721 static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
722 {
723         /*
724          * SKL doesn't need us to program the AUX clock divider (Hardware will
725          * derive the clock from CDCLK automatically). We still implement the
726          * get_aux_clock_divider vfunc to plug-in into the existing code.
727          */
728         return index ? 0 : 1;
729 }
730
731 static uint32_t g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
732                                      bool has_aux_irq,
733                                      int send_bytes,
734                                      uint32_t aux_clock_divider)
735 {
736         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
737         struct drm_device *dev = intel_dig_port->base.base.dev;
738         uint32_t precharge, timeout;
739
740         if (IS_GEN6(dev))
741                 precharge = 3;
742         else
743                 precharge = 5;
744
745         if (IS_BROADWELL(dev) && intel_dig_port->port == PORT_A)
746                 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
747         else
748                 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
749
750         return DP_AUX_CH_CTL_SEND_BUSY |
751                DP_AUX_CH_CTL_DONE |
752                (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
753                DP_AUX_CH_CTL_TIME_OUT_ERROR |
754                timeout |
755                DP_AUX_CH_CTL_RECEIVE_ERROR |
756                (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
757                (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
758                (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
759 }
760
761 static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
762                                       bool has_aux_irq,
763                                       int send_bytes,
764                                       uint32_t unused)
765 {
766         return DP_AUX_CH_CTL_SEND_BUSY |
767                DP_AUX_CH_CTL_DONE |
768                (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
769                DP_AUX_CH_CTL_TIME_OUT_ERROR |
770                DP_AUX_CH_CTL_TIME_OUT_1600us |
771                DP_AUX_CH_CTL_RECEIVE_ERROR |
772                (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
773                DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) |
774                DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
775 }
776
777 static int
778 intel_dp_aux_ch(struct intel_dp *intel_dp,
779                 const uint8_t *send, int send_bytes,
780                 uint8_t *recv, int recv_size)
781 {
782         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
783         struct drm_device *dev = intel_dig_port->base.base.dev;
784         struct drm_i915_private *dev_priv = dev->dev_private;
785         i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg;
786         uint32_t aux_clock_divider;
787         int i, ret, recv_bytes;
788         uint32_t status;
789         int try, clock = 0;
790         bool has_aux_irq = HAS_AUX_IRQ(dev);
791         bool vdd;
792
793         pps_lock(intel_dp);
794
795         /*
796          * We will be called with VDD already enabled for dpcd/edid/oui reads.
797          * In such cases we want to leave VDD enabled and it's up to upper layers
798          * to turn it off. But for eg. i2c-dev access we need to turn it on/off
799          * ourselves.
800          */
801         vdd = edp_panel_vdd_on(intel_dp);
802
803         /* dp aux is extremely sensitive to irq latency, hence request the
804          * lowest possible wakeup latency and so prevent the cpu from going into
805          * deep sleep states.
806          */
807         pm_qos_update_request(&dev_priv->pm_qos, 0);
808
809         intel_dp_check_edp(intel_dp);
810
811         /* Try to wait for any previous AUX channel activity */
812         for (try = 0; try < 3; try++) {
813                 status = I915_READ_NOTRACE(ch_ctl);
814                 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
815                         break;
816                 msleep(1);
817         }
818
819         if (try == 3) {
820                 static u32 last_status = -1;
821                 const u32 status = I915_READ(ch_ctl);
822
823                 if (status != last_status) {
824                         WARN(1, "dp_aux_ch not started status 0x%08x\n",
825                              status);
826                         last_status = status;
827                 }
828
829                 ret = -EBUSY;
830                 goto out;
831         }
832
833         /* Only 5 data registers! */
834         if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
835                 ret = -E2BIG;
836                 goto out;
837         }
838
839         while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
840                 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
841                                                           has_aux_irq,
842                                                           send_bytes,
843                                                           aux_clock_divider);
844
845                 /* Must try at least 3 times according to DP spec */
846                 for (try = 0; try < 5; try++) {
847                         /* Load the send data into the aux channel data registers */
848                         for (i = 0; i < send_bytes; i += 4)
849                                 I915_WRITE(intel_dp->aux_ch_data_reg[i >> 2],
850                                            intel_dp_pack_aux(send + i,
851                                                              send_bytes - i));
852
853                         /* Send the command and wait for it to complete */
854                         I915_WRITE(ch_ctl, send_ctl);
855
856                         status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
857
858                         /* Clear done status and any errors */
859                         I915_WRITE(ch_ctl,
860                                    status |
861                                    DP_AUX_CH_CTL_DONE |
862                                    DP_AUX_CH_CTL_TIME_OUT_ERROR |
863                                    DP_AUX_CH_CTL_RECEIVE_ERROR);
864
865                         if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
866                                 continue;
867
868                         /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
869                          *   400us delay required for errors and timeouts
870                          *   Timeout errors from the HW already meet this
871                          *   requirement so skip to next iteration
872                          */
873                         if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
874                                 usleep_range(400, 500);
875                                 continue;
876                         }
877                         if (status & DP_AUX_CH_CTL_DONE)
878                                 goto done;
879                 }
880         }
881
882         if ((status & DP_AUX_CH_CTL_DONE) == 0) {
883                 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
884                 ret = -EBUSY;
885                 goto out;
886         }
887
888 done:
889         /* Check for timeout or receive error.
890          * Timeouts occur when the sink is not connected
891          */
892         if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
893                 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
894                 ret = -EIO;
895                 goto out;
896         }
897
898         /* Timeouts occur when the device isn't connected, so they're
899          * "normal" -- don't fill the kernel log with these */
900         if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
901                 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
902                 ret = -ETIMEDOUT;
903                 goto out;
904         }
905
906         /* Unload any bytes sent back from the other side */
907         recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
908                       DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
909
910         /*
911          * By BSpec: "Message sizes of 0 or >20 are not allowed."
912          * We have no idea of what happened so we return -EBUSY so
913          * drm layer takes care for the necessary retries.
914          */
915         if (recv_bytes == 0 || recv_bytes > 20) {
916                 DRM_DEBUG_KMS("Forbidden recv_bytes = %d on aux transaction\n",
917                               recv_bytes);
918                 /*
919                  * FIXME: This patch was created on top of a series that
920                  * organize the retries at drm level. There EBUSY should
921                  * also take care for 1ms wait before retrying.
922                  * That aux retries re-org is still needed and after that is
923                  * merged we remove this sleep from here.
924                  */
925                 usleep_range(1000, 1500);
926                 ret = -EBUSY;
927                 goto out;
928         }
929
930         if (recv_bytes > recv_size)
931                 recv_bytes = recv_size;
932
933         for (i = 0; i < recv_bytes; i += 4)
934                 intel_dp_unpack_aux(I915_READ(intel_dp->aux_ch_data_reg[i >> 2]),
935                                     recv + i, recv_bytes - i);
936
937         ret = recv_bytes;
938 out:
939         pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
940
941         if (vdd)
942                 edp_panel_vdd_off(intel_dp, false);
943
944         pps_unlock(intel_dp);
945
946         return ret;
947 }
948
949 #define BARE_ADDRESS_SIZE       3
950 #define HEADER_SIZE             (BARE_ADDRESS_SIZE + 1)
951 static ssize_t
952 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
953 {
954         struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
955         uint8_t txbuf[20], rxbuf[20];
956         size_t txsize, rxsize;
957         int ret;
958
959         txbuf[0] = (msg->request << 4) |
960                 ((msg->address >> 16) & 0xf);
961         txbuf[1] = (msg->address >> 8) & 0xff;
962         txbuf[2] = msg->address & 0xff;
963         txbuf[3] = msg->size - 1;
964
965         switch (msg->request & ~DP_AUX_I2C_MOT) {
966         case DP_AUX_NATIVE_WRITE:
967         case DP_AUX_I2C_WRITE:
968         case DP_AUX_I2C_WRITE_STATUS_UPDATE:
969                 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
970                 rxsize = 2; /* 0 or 1 data bytes */
971
972                 if (WARN_ON(txsize > 20))
973                         return -E2BIG;
974
975                 if (msg->buffer)
976                         memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
977                 else
978                         WARN_ON(msg->size);
979
980                 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
981                 if (ret > 0) {
982                         msg->reply = rxbuf[0] >> 4;
983
984                         if (ret > 1) {
985                                 /* Number of bytes written in a short write. */
986                                 ret = clamp_t(int, rxbuf[1], 0, msg->size);
987                         } else {
988                                 /* Return payload size. */
989                                 ret = msg->size;
990                         }
991                 }
992                 break;
993
994         case DP_AUX_NATIVE_READ:
995         case DP_AUX_I2C_READ:
996                 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
997                 rxsize = msg->size + 1;
998
999                 if (WARN_ON(rxsize > 20))
1000                         return -E2BIG;
1001
1002                 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
1003                 if (ret > 0) {
1004                         msg->reply = rxbuf[0] >> 4;
1005                         /*
1006                          * Assume happy day, and copy the data. The caller is
1007                          * expected to check msg->reply before touching it.
1008                          *
1009                          * Return payload size.
1010                          */
1011                         ret--;
1012                         memcpy(msg->buffer, rxbuf + 1, ret);
1013                 }
1014                 break;
1015
1016         default:
1017                 ret = -EINVAL;
1018                 break;
1019         }
1020
1021         return ret;
1022 }
1023
1024 static i915_reg_t g4x_aux_ctl_reg(struct drm_i915_private *dev_priv,
1025                                        enum port port)
1026 {
1027         switch (port) {
1028         case PORT_B:
1029         case PORT_C:
1030         case PORT_D:
1031                 return DP_AUX_CH_CTL(port);
1032         default:
1033                 MISSING_CASE(port);
1034                 return DP_AUX_CH_CTL(PORT_B);
1035         }
1036 }
1037
1038 static i915_reg_t g4x_aux_data_reg(struct drm_i915_private *dev_priv,
1039                                         enum port port, int index)
1040 {
1041         switch (port) {
1042         case PORT_B:
1043         case PORT_C:
1044         case PORT_D:
1045                 return DP_AUX_CH_DATA(port, index);
1046         default:
1047                 MISSING_CASE(port);
1048                 return DP_AUX_CH_DATA(PORT_B, index);
1049         }
1050 }
1051
1052 static i915_reg_t ilk_aux_ctl_reg(struct drm_i915_private *dev_priv,
1053                                        enum port port)
1054 {
1055         switch (port) {
1056         case PORT_A:
1057                 return DP_AUX_CH_CTL(port);
1058         case PORT_B:
1059         case PORT_C:
1060         case PORT_D:
1061                 return PCH_DP_AUX_CH_CTL(port);
1062         default:
1063                 MISSING_CASE(port);
1064                 return DP_AUX_CH_CTL(PORT_A);
1065         }
1066 }
1067
1068 static i915_reg_t ilk_aux_data_reg(struct drm_i915_private *dev_priv,
1069                                         enum port port, int index)
1070 {
1071         switch (port) {
1072         case PORT_A:
1073                 return DP_AUX_CH_DATA(port, index);
1074         case PORT_B:
1075         case PORT_C:
1076         case PORT_D:
1077                 return PCH_DP_AUX_CH_DATA(port, index);
1078         default:
1079                 MISSING_CASE(port);
1080                 return DP_AUX_CH_DATA(PORT_A, index);
1081         }
1082 }
1083
1084 /*
1085  * On SKL we don't have Aux for port E so we rely
1086  * on VBT to set a proper alternate aux channel.
1087  */
1088 static enum port skl_porte_aux_port(struct drm_i915_private *dev_priv)
1089 {
1090         const struct ddi_vbt_port_info *info =
1091                 &dev_priv->vbt.ddi_port_info[PORT_E];
1092
1093         switch (info->alternate_aux_channel) {
1094         case DP_AUX_A:
1095                 return PORT_A;
1096         case DP_AUX_B:
1097                 return PORT_B;
1098         case DP_AUX_C:
1099                 return PORT_C;
1100         case DP_AUX_D:
1101                 return PORT_D;
1102         default:
1103                 MISSING_CASE(info->alternate_aux_channel);
1104                 return PORT_A;
1105         }
1106 }
1107
1108 static i915_reg_t skl_aux_ctl_reg(struct drm_i915_private *dev_priv,
1109                                        enum port port)
1110 {
1111         if (port == PORT_E)
1112                 port = skl_porte_aux_port(dev_priv);
1113
1114         switch (port) {
1115         case PORT_A:
1116         case PORT_B:
1117         case PORT_C:
1118         case PORT_D:
1119                 return DP_AUX_CH_CTL(port);
1120         default:
1121                 MISSING_CASE(port);
1122                 return DP_AUX_CH_CTL(PORT_A);
1123         }
1124 }
1125
1126 static i915_reg_t skl_aux_data_reg(struct drm_i915_private *dev_priv,
1127                                         enum port port, int index)
1128 {
1129         if (port == PORT_E)
1130                 port = skl_porte_aux_port(dev_priv);
1131
1132         switch (port) {
1133         case PORT_A:
1134         case PORT_B:
1135         case PORT_C:
1136         case PORT_D:
1137                 return DP_AUX_CH_DATA(port, index);
1138         default:
1139                 MISSING_CASE(port);
1140                 return DP_AUX_CH_DATA(PORT_A, index);
1141         }
1142 }
1143
1144 static i915_reg_t intel_aux_ctl_reg(struct drm_i915_private *dev_priv,
1145                                          enum port port)
1146 {
1147         if (INTEL_INFO(dev_priv)->gen >= 9)
1148                 return skl_aux_ctl_reg(dev_priv, port);
1149         else if (HAS_PCH_SPLIT(dev_priv))
1150                 return ilk_aux_ctl_reg(dev_priv, port);
1151         else
1152                 return g4x_aux_ctl_reg(dev_priv, port);
1153 }
1154
1155 static i915_reg_t intel_aux_data_reg(struct drm_i915_private *dev_priv,
1156                                           enum port port, int index)
1157 {
1158         if (INTEL_INFO(dev_priv)->gen >= 9)
1159                 return skl_aux_data_reg(dev_priv, port, index);
1160         else if (HAS_PCH_SPLIT(dev_priv))
1161                 return ilk_aux_data_reg(dev_priv, port, index);
1162         else
1163                 return g4x_aux_data_reg(dev_priv, port, index);
1164 }
1165
1166 static void intel_aux_reg_init(struct intel_dp *intel_dp)
1167 {
1168         struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
1169         enum port port = dp_to_dig_port(intel_dp)->port;
1170         int i;
1171
1172         intel_dp->aux_ch_ctl_reg = intel_aux_ctl_reg(dev_priv, port);
1173         for (i = 0; i < ARRAY_SIZE(intel_dp->aux_ch_data_reg); i++)
1174                 intel_dp->aux_ch_data_reg[i] = intel_aux_data_reg(dev_priv, port, i);
1175 }
1176
1177 static void
1178 intel_dp_aux_fini(struct intel_dp *intel_dp)
1179 {
1180         drm_dp_aux_unregister(&intel_dp->aux);
1181         kfree(intel_dp->aux.name);
1182 }
1183
1184 static int
1185 intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
1186 {
1187         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1188         enum port port = intel_dig_port->port;
1189         int ret;
1190
1191         intel_aux_reg_init(intel_dp);
1192
1193         intel_dp->aux.name = kasprintf(GFP_KERNEL, "DPDDC-%c", port_name(port));
1194         if (!intel_dp->aux.name)
1195                 return -ENOMEM;
1196
1197         intel_dp->aux.dev = connector->base.kdev;
1198         intel_dp->aux.transfer = intel_dp_aux_transfer;
1199
1200         DRM_DEBUG_KMS("registering %s bus for %s\n",
1201                       intel_dp->aux.name,
1202                       connector->base.kdev->kobj.name);
1203
1204         ret = drm_dp_aux_register(&intel_dp->aux);
1205         if (ret < 0) {
1206                 DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
1207                           intel_dp->aux.name, ret);
1208                 kfree(intel_dp->aux.name);
1209                 return ret;
1210         }
1211
1212         return 0;
1213 }
1214
1215 static void
1216 intel_dp_connector_unregister(struct intel_connector *intel_connector)
1217 {
1218         struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
1219
1220         intel_dp_aux_fini(intel_dp);
1221         intel_connector_unregister(intel_connector);
1222 }
1223
1224 static int
1225 intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
1226 {
1227         if (intel_dp->num_sink_rates) {
1228                 *sink_rates = intel_dp->sink_rates;
1229                 return intel_dp->num_sink_rates;
1230         }
1231
1232         *sink_rates = default_rates;
1233
1234         return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
1235 }
1236
1237 bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
1238 {
1239         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1240         struct drm_device *dev = dig_port->base.base.dev;
1241
1242         /* WaDisableHBR2:skl */
1243         if (IS_SKL_REVID(dev, 0, SKL_REVID_B0))
1244                 return false;
1245
1246         if ((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || IS_BROADWELL(dev) ||
1247             (INTEL_INFO(dev)->gen >= 9))
1248                 return true;
1249         else
1250                 return false;
1251 }
1252
1253 static int
1254 intel_dp_source_rates(struct intel_dp *intel_dp, const int **source_rates)
1255 {
1256         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1257         struct drm_device *dev = dig_port->base.base.dev;
1258         int size;
1259
1260         if (IS_BROXTON(dev)) {
1261                 *source_rates = bxt_rates;
1262                 size = ARRAY_SIZE(bxt_rates);
1263         } else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
1264                 *source_rates = skl_rates;
1265                 size = ARRAY_SIZE(skl_rates);
1266         } else {
1267                 *source_rates = default_rates;
1268                 size = ARRAY_SIZE(default_rates);
1269         }
1270
1271         /* This depends on the fact that 5.4 is last value in the array */
1272         if (!intel_dp_source_supports_hbr2(intel_dp))
1273                 size--;
1274
1275         return size;
1276 }
1277
1278 static void
1279 intel_dp_set_clock(struct intel_encoder *encoder,
1280                    struct intel_crtc_state *pipe_config)
1281 {
1282         struct drm_device *dev = encoder->base.dev;
1283         const struct dp_link_dpll *divisor = NULL;
1284         int i, count = 0;
1285
1286         if (IS_G4X(dev)) {
1287                 divisor = gen4_dpll;
1288                 count = ARRAY_SIZE(gen4_dpll);
1289         } else if (HAS_PCH_SPLIT(dev)) {
1290                 divisor = pch_dpll;
1291                 count = ARRAY_SIZE(pch_dpll);
1292         } else if (IS_CHERRYVIEW(dev)) {
1293                 divisor = chv_dpll;
1294                 count = ARRAY_SIZE(chv_dpll);
1295         } else if (IS_VALLEYVIEW(dev)) {
1296                 divisor = vlv_dpll;
1297                 count = ARRAY_SIZE(vlv_dpll);
1298         }
1299
1300         if (divisor && count) {
1301                 for (i = 0; i < count; i++) {
1302                         if (pipe_config->port_clock == divisor[i].clock) {
1303                                 pipe_config->dpll = divisor[i].dpll;
1304                                 pipe_config->clock_set = true;
1305                                 break;
1306                         }
1307                 }
1308         }
1309 }
1310
1311 static int intersect_rates(const int *source_rates, int source_len,
1312                            const int *sink_rates, int sink_len,
1313                            int *common_rates)
1314 {
1315         int i = 0, j = 0, k = 0;
1316
1317         while (i < source_len && j < sink_len) {
1318                 if (source_rates[i] == sink_rates[j]) {
1319                         if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
1320                                 return k;
1321                         common_rates[k] = source_rates[i];
1322                         ++k;
1323                         ++i;
1324                         ++j;
1325                 } else if (source_rates[i] < sink_rates[j]) {
1326                         ++i;
1327                 } else {
1328                         ++j;
1329                 }
1330         }
1331         return k;
1332 }
1333
1334 static int intel_dp_common_rates(struct intel_dp *intel_dp,
1335                                  int *common_rates)
1336 {
1337         const int *source_rates, *sink_rates;
1338         int source_len, sink_len;
1339
1340         sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1341         source_len = intel_dp_source_rates(intel_dp, &source_rates);
1342
1343         return intersect_rates(source_rates, source_len,
1344                                sink_rates, sink_len,
1345                                common_rates);
1346 }
1347
1348 static void snprintf_int_array(char *str, size_t len,
1349                                const int *array, int nelem)
1350 {
1351         int i;
1352
1353         str[0] = '\0';
1354
1355         for (i = 0; i < nelem; i++) {
1356                 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
1357                 if (r >= len)
1358                         return;
1359                 str += r;
1360                 len -= r;
1361         }
1362 }
1363
1364 static void intel_dp_print_rates(struct intel_dp *intel_dp)
1365 {
1366         const int *source_rates, *sink_rates;
1367         int source_len, sink_len, common_len;
1368         int common_rates[DP_MAX_SUPPORTED_RATES];
1369         char str[128]; /* FIXME: too big for stack? */
1370
1371         if ((drm_debug & DRM_UT_KMS) == 0)
1372                 return;
1373
1374         source_len = intel_dp_source_rates(intel_dp, &source_rates);
1375         snprintf_int_array(str, sizeof(str), source_rates, source_len);
1376         DRM_DEBUG_KMS("source rates: %s\n", str);
1377
1378         sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1379         snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
1380         DRM_DEBUG_KMS("sink rates: %s\n", str);
1381
1382         common_len = intel_dp_common_rates(intel_dp, common_rates);
1383         snprintf_int_array(str, sizeof(str), common_rates, common_len);
1384         DRM_DEBUG_KMS("common rates: %s\n", str);
1385 }
1386
1387 static int rate_to_index(int find, const int *rates)
1388 {
1389         int i = 0;
1390
1391         for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1392                 if (find == rates[i])
1393                         break;
1394
1395         return i;
1396 }
1397
1398 int
1399 intel_dp_max_link_rate(struct intel_dp *intel_dp)
1400 {
1401         int rates[DP_MAX_SUPPORTED_RATES] = {};
1402         int len;
1403
1404         len = intel_dp_common_rates(intel_dp, rates);
1405         if (WARN_ON(len <= 0))
1406                 return 162000;
1407
1408         return rates[rate_to_index(0, rates) - 1];
1409 }
1410
1411 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1412 {
1413         return rate_to_index(rate, intel_dp->sink_rates);
1414 }
1415
1416 void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
1417                            uint8_t *link_bw, uint8_t *rate_select)
1418 {
1419         if (intel_dp->num_sink_rates) {
1420                 *link_bw = 0;
1421                 *rate_select =
1422                         intel_dp_rate_select(intel_dp, port_clock);
1423         } else {
1424                 *link_bw = drm_dp_link_rate_to_bw_code(port_clock);
1425                 *rate_select = 0;
1426         }
1427 }
1428
1429 bool
1430 intel_dp_compute_config(struct intel_encoder *encoder,
1431                         struct intel_crtc_state *pipe_config)
1432 {
1433         struct drm_device *dev = encoder->base.dev;
1434         struct drm_i915_private *dev_priv = dev->dev_private;
1435         struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1436         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1437         enum port port = dp_to_dig_port(intel_dp)->port;
1438         struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
1439         struct intel_connector *intel_connector = intel_dp->attached_connector;
1440         int lane_count, clock;
1441         int min_lane_count = 1;
1442         int max_lane_count = intel_dp_max_lane_count(intel_dp);
1443         /* Conveniently, the link BW constants become indices with a shift...*/
1444         int min_clock = 0;
1445         int max_clock;
1446         int bpp, mode_rate;
1447         int link_avail, link_clock;
1448         int common_rates[DP_MAX_SUPPORTED_RATES] = {};
1449         int common_len;
1450         uint8_t link_bw, rate_select;
1451
1452         common_len = intel_dp_common_rates(intel_dp, common_rates);
1453
1454         /* No common link rates between source and sink */
1455         WARN_ON(common_len <= 0);
1456
1457         max_clock = common_len - 1;
1458
1459         if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
1460                 pipe_config->has_pch_encoder = true;
1461
1462         pipe_config->has_dp_encoder = true;
1463         pipe_config->has_drrs = false;
1464         pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
1465
1466         if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1467                 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1468                                        adjusted_mode);
1469
1470                 if (INTEL_INFO(dev)->gen >= 9) {
1471                         int ret;
1472                         ret = skl_update_scaler_crtc(pipe_config);
1473                         if (ret)
1474                                 return ret;
1475                 }
1476
1477                 if (HAS_GMCH_DISPLAY(dev))
1478                         intel_gmch_panel_fitting(intel_crtc, pipe_config,
1479                                                  intel_connector->panel.fitting_mode);
1480                 else
1481                         intel_pch_panel_fitting(intel_crtc, pipe_config,
1482                                                 intel_connector->panel.fitting_mode);
1483         }
1484
1485         if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
1486                 return false;
1487
1488         DRM_DEBUG_KMS("DP link computation with max lane count %i "
1489                       "max bw %d pixel clock %iKHz\n",
1490                       max_lane_count, common_rates[max_clock],
1491                       adjusted_mode->crtc_clock);
1492
1493         /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1494          * bpc in between. */
1495         bpp = pipe_config->pipe_bpp;
1496         if (is_edp(intel_dp)) {
1497
1498                 /* Get bpp from vbt only for panels that dont have bpp in edid */
1499                 if (intel_connector->base.display_info.bpc == 0 &&
1500                         (dev_priv->vbt.edp.bpp && dev_priv->vbt.edp.bpp < bpp)) {
1501                         DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1502                                       dev_priv->vbt.edp.bpp);
1503                         bpp = dev_priv->vbt.edp.bpp;
1504                 }
1505
1506                 /*
1507                  * Use the maximum clock and number of lanes the eDP panel
1508                  * advertizes being capable of. The panels are generally
1509                  * designed to support only a single clock and lane
1510                  * configuration, and typically these values correspond to the
1511                  * native resolution of the panel.
1512                  */
1513                 min_lane_count = max_lane_count;
1514                 min_clock = max_clock;
1515         }
1516
1517         for (; bpp >= 6*3; bpp -= 2*3) {
1518                 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1519                                                    bpp);
1520
1521                 for (clock = min_clock; clock <= max_clock; clock++) {
1522                         for (lane_count = min_lane_count;
1523                                 lane_count <= max_lane_count;
1524                                 lane_count <<= 1) {
1525
1526                                 link_clock = common_rates[clock];
1527                                 link_avail = intel_dp_max_data_rate(link_clock,
1528                                                                     lane_count);
1529
1530                                 if (mode_rate <= link_avail) {
1531                                         goto found;
1532                                 }
1533                         }
1534                 }
1535         }
1536
1537         return false;
1538
1539 found:
1540         if (intel_dp->color_range_auto) {
1541                 /*
1542                  * See:
1543                  * CEA-861-E - 5.1 Default Encoding Parameters
1544                  * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1545                  */
1546                 pipe_config->limited_color_range =
1547                         bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1;
1548         } else {
1549                 pipe_config->limited_color_range =
1550                         intel_dp->limited_color_range;
1551         }
1552
1553         pipe_config->lane_count = lane_count;
1554
1555         pipe_config->pipe_bpp = bpp;
1556         pipe_config->port_clock = common_rates[clock];
1557
1558         intel_dp_compute_rate(intel_dp, pipe_config->port_clock,
1559                               &link_bw, &rate_select);
1560
1561         DRM_DEBUG_KMS("DP link bw %02x rate select %02x lane count %d clock %d bpp %d\n",
1562                       link_bw, rate_select, pipe_config->lane_count,
1563                       pipe_config->port_clock, bpp);
1564         DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1565                       mode_rate, link_avail);
1566
1567         intel_link_compute_m_n(bpp, lane_count,
1568                                adjusted_mode->crtc_clock,
1569                                pipe_config->port_clock,
1570                                &pipe_config->dp_m_n);
1571
1572         if (intel_connector->panel.downclock_mode != NULL &&
1573                 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
1574                         pipe_config->has_drrs = true;
1575                         intel_link_compute_m_n(bpp, lane_count,
1576                                 intel_connector->panel.downclock_mode->clock,
1577                                 pipe_config->port_clock,
1578                                 &pipe_config->dp_m2_n2);
1579         }
1580
1581         if (!HAS_DDI(dev))
1582                 intel_dp_set_clock(encoder, pipe_config);
1583
1584         return true;
1585 }
1586
1587 void intel_dp_set_link_params(struct intel_dp *intel_dp,
1588                               const struct intel_crtc_state *pipe_config)
1589 {
1590         intel_dp->link_rate = pipe_config->port_clock;
1591         intel_dp->lane_count = pipe_config->lane_count;
1592 }
1593
1594 static void intel_dp_prepare(struct intel_encoder *encoder)
1595 {
1596         struct drm_device *dev = encoder->base.dev;
1597         struct drm_i915_private *dev_priv = dev->dev_private;
1598         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1599         enum port port = dp_to_dig_port(intel_dp)->port;
1600         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
1601         const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
1602
1603         intel_dp_set_link_params(intel_dp, crtc->config);
1604
1605         /*
1606          * There are four kinds of DP registers:
1607          *
1608          *      IBX PCH
1609          *      SNB CPU
1610          *      IVB CPU
1611          *      CPT PCH
1612          *
1613          * IBX PCH and CPU are the same for almost everything,
1614          * except that the CPU DP PLL is configured in this
1615          * register
1616          *
1617          * CPT PCH is quite different, having many bits moved
1618          * to the TRANS_DP_CTL register instead. That
1619          * configuration happens (oddly) in ironlake_pch_enable
1620          */
1621
1622         /* Preserve the BIOS-computed detected bit. This is
1623          * supposed to be read-only.
1624          */
1625         intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
1626
1627         /* Handle DP bits in common between all three register formats */
1628         intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
1629         intel_dp->DP |= DP_PORT_WIDTH(crtc->config->lane_count);
1630
1631         /* Split out the IBX/CPU vs CPT settings */
1632
1633         if (IS_GEN7(dev) && port == PORT_A) {
1634                 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1635                         intel_dp->DP |= DP_SYNC_HS_HIGH;
1636                 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1637                         intel_dp->DP |= DP_SYNC_VS_HIGH;
1638                 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1639
1640                 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1641                         intel_dp->DP |= DP_ENHANCED_FRAMING;
1642
1643                 intel_dp->DP |= crtc->pipe << 29;
1644         } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
1645                 u32 trans_dp;
1646
1647                 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1648
1649                 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
1650                 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1651                         trans_dp |= TRANS_DP_ENH_FRAMING;
1652                 else
1653                         trans_dp &= ~TRANS_DP_ENH_FRAMING;
1654                 I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
1655         } else {
1656                 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
1657                     !IS_CHERRYVIEW(dev) && crtc->config->limited_color_range)
1658                         intel_dp->DP |= DP_COLOR_RANGE_16_235;
1659
1660                 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1661                         intel_dp->DP |= DP_SYNC_HS_HIGH;
1662                 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1663                         intel_dp->DP |= DP_SYNC_VS_HIGH;
1664                 intel_dp->DP |= DP_LINK_TRAIN_OFF;
1665
1666                 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1667                         intel_dp->DP |= DP_ENHANCED_FRAMING;
1668
1669                 if (IS_CHERRYVIEW(dev))
1670                         intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
1671                 else if (crtc->pipe == PIPE_B)
1672                         intel_dp->DP |= DP_PIPEB_SELECT;
1673         }
1674 }
1675
1676 #define IDLE_ON_MASK            (PP_ON | PP_SEQUENCE_MASK | 0                     | PP_SEQUENCE_STATE_MASK)
1677 #define IDLE_ON_VALUE           (PP_ON | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_ON_IDLE)
1678
1679 #define IDLE_OFF_MASK           (PP_ON | PP_SEQUENCE_MASK | 0                     | 0)
1680 #define IDLE_OFF_VALUE          (0     | PP_SEQUENCE_NONE | 0                     | 0)
1681
1682 #define IDLE_CYCLE_MASK         (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1683 #define IDLE_CYCLE_VALUE        (0     | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_OFF_IDLE)
1684
1685 static void wait_panel_status(struct intel_dp *intel_dp,
1686                                        u32 mask,
1687                                        u32 value)
1688 {
1689         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1690         struct drm_i915_private *dev_priv = dev->dev_private;
1691         i915_reg_t pp_stat_reg, pp_ctrl_reg;
1692
1693         lockdep_assert_held(&dev_priv->pps_mutex);
1694
1695         pp_stat_reg = _pp_stat_reg(intel_dp);
1696         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1697
1698         DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
1699                         mask, value,
1700                         I915_READ(pp_stat_reg),
1701                         I915_READ(pp_ctrl_reg));
1702
1703         if (_wait_for((I915_READ(pp_stat_reg) & mask) == value,
1704                       5 * USEC_PER_SEC, 10 * USEC_PER_MSEC))
1705                 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
1706                                 I915_READ(pp_stat_reg),
1707                                 I915_READ(pp_ctrl_reg));
1708
1709         DRM_DEBUG_KMS("Wait complete\n");
1710 }
1711
1712 static void wait_panel_on(struct intel_dp *intel_dp)
1713 {
1714         DRM_DEBUG_KMS("Wait for panel power on\n");
1715         wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
1716 }
1717
1718 static void wait_panel_off(struct intel_dp *intel_dp)
1719 {
1720         DRM_DEBUG_KMS("Wait for panel power off time\n");
1721         wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
1722 }
1723
1724 static void wait_panel_power_cycle(struct intel_dp *intel_dp)
1725 {
1726         ktime_t panel_power_on_time;
1727         s64 panel_power_off_duration;
1728
1729         DRM_DEBUG_KMS("Wait for panel power cycle\n");
1730
1731         /* take the difference of currrent time and panel power off time
1732          * and then make panel wait for t11_t12 if needed. */
1733         panel_power_on_time = ktime_get_boottime();
1734         panel_power_off_duration = ktime_ms_delta(panel_power_on_time, intel_dp->panel_power_off_time);
1735
1736         /* When we disable the VDD override bit last we have to do the manual
1737          * wait. */
1738         if (panel_power_off_duration < (s64)intel_dp->panel_power_cycle_delay)
1739                 wait_remaining_ms_from_jiffies(jiffies,
1740                                        intel_dp->panel_power_cycle_delay - panel_power_off_duration);
1741
1742         wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
1743 }
1744
1745 static void wait_backlight_on(struct intel_dp *intel_dp)
1746 {
1747         wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1748                                        intel_dp->backlight_on_delay);
1749 }
1750
1751 static void edp_wait_backlight_off(struct intel_dp *intel_dp)
1752 {
1753         wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1754                                        intel_dp->backlight_off_delay);
1755 }
1756
1757 /* Read the current pp_control value, unlocking the register if it
1758  * is locked
1759  */
1760
1761 static  u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
1762 {
1763         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1764         struct drm_i915_private *dev_priv = dev->dev_private;
1765         u32 control;
1766
1767         lockdep_assert_held(&dev_priv->pps_mutex);
1768
1769         control = I915_READ(_pp_ctrl_reg(intel_dp));
1770         if (!IS_BROXTON(dev)) {
1771                 control &= ~PANEL_UNLOCK_MASK;
1772                 control |= PANEL_UNLOCK_REGS;
1773         }
1774         return control;
1775 }
1776
1777 /*
1778  * Must be paired with edp_panel_vdd_off().
1779  * Must hold pps_mutex around the whole on/off sequence.
1780  * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1781  */
1782 static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
1783 {
1784         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1785         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1786         struct intel_encoder *intel_encoder = &intel_dig_port->base;
1787         struct drm_i915_private *dev_priv = dev->dev_private;
1788         enum intel_display_power_domain power_domain;
1789         u32 pp;
1790         i915_reg_t pp_stat_reg, pp_ctrl_reg;
1791         bool need_to_disable = !intel_dp->want_panel_vdd;
1792
1793         lockdep_assert_held(&dev_priv->pps_mutex);
1794
1795         if (!is_edp(intel_dp))
1796                 return false;
1797
1798         cancel_delayed_work(&intel_dp->panel_vdd_work);
1799         intel_dp->want_panel_vdd = true;
1800
1801         if (edp_have_panel_vdd(intel_dp))
1802                 return need_to_disable;
1803
1804         power_domain = intel_display_port_aux_power_domain(intel_encoder);
1805         intel_display_power_get(dev_priv, power_domain);
1806
1807         DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1808                       port_name(intel_dig_port->port));
1809
1810         if (!edp_have_panel_power(intel_dp))
1811                 wait_panel_power_cycle(intel_dp);
1812
1813         pp = ironlake_get_pp_control(intel_dp);
1814         pp |= EDP_FORCE_VDD;
1815
1816         pp_stat_reg = _pp_stat_reg(intel_dp);
1817         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1818
1819         I915_WRITE(pp_ctrl_reg, pp);
1820         POSTING_READ(pp_ctrl_reg);
1821         DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1822                         I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1823         /*
1824          * If the panel wasn't on, delay before accessing aux channel
1825          */
1826         if (!edp_have_panel_power(intel_dp)) {
1827                 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1828                               port_name(intel_dig_port->port));
1829                 msleep(intel_dp->panel_power_up_delay);
1830         }
1831
1832         return need_to_disable;
1833 }
1834
1835 /*
1836  * Must be paired with intel_edp_panel_vdd_off() or
1837  * intel_edp_panel_off().
1838  * Nested calls to these functions are not allowed since
1839  * we drop the lock. Caller must use some higher level
1840  * locking to prevent nested calls from other threads.
1841  */
1842 void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
1843 {
1844         bool vdd;
1845
1846         if (!is_edp(intel_dp))
1847                 return;
1848
1849         pps_lock(intel_dp);
1850         vdd = edp_panel_vdd_on(intel_dp);
1851         pps_unlock(intel_dp);
1852
1853         I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
1854              port_name(dp_to_dig_port(intel_dp)->port));
1855 }
1856
1857 static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
1858 {
1859         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1860         struct drm_i915_private *dev_priv = dev->dev_private;
1861         struct intel_digital_port *intel_dig_port =
1862                 dp_to_dig_port(intel_dp);
1863         struct intel_encoder *intel_encoder = &intel_dig_port->base;
1864         enum intel_display_power_domain power_domain;
1865         u32 pp;
1866         i915_reg_t pp_stat_reg, pp_ctrl_reg;
1867
1868         lockdep_assert_held(&dev_priv->pps_mutex);
1869
1870         WARN_ON(intel_dp->want_panel_vdd);
1871
1872         if (!edp_have_panel_vdd(intel_dp))
1873                 return;
1874
1875         DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1876                       port_name(intel_dig_port->port));
1877
1878         pp = ironlake_get_pp_control(intel_dp);
1879         pp &= ~EDP_FORCE_VDD;
1880
1881         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1882         pp_stat_reg = _pp_stat_reg(intel_dp);
1883
1884         I915_WRITE(pp_ctrl_reg, pp);
1885         POSTING_READ(pp_ctrl_reg);
1886
1887         /* Make sure sequencer is idle before allowing subsequent activity */
1888         DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1889         I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1890
1891         if ((pp & POWER_TARGET_ON) == 0)
1892                 intel_dp->panel_power_off_time = ktime_get_boottime();
1893
1894         power_domain = intel_display_port_aux_power_domain(intel_encoder);
1895         intel_display_power_put(dev_priv, power_domain);
1896 }
1897
1898 static void edp_panel_vdd_work(struct work_struct *__work)
1899 {
1900         struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1901                                                  struct intel_dp, panel_vdd_work);
1902
1903         pps_lock(intel_dp);
1904         if (!intel_dp->want_panel_vdd)
1905                 edp_panel_vdd_off_sync(intel_dp);
1906         pps_unlock(intel_dp);
1907 }
1908
1909 static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
1910 {
1911         unsigned long delay;
1912
1913         /*
1914          * Queue the timer to fire a long time from now (relative to the power
1915          * down delay) to keep the panel power up across a sequence of
1916          * operations.
1917          */
1918         delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
1919         schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
1920 }
1921
1922 /*
1923  * Must be paired with edp_panel_vdd_on().
1924  * Must hold pps_mutex around the whole on/off sequence.
1925  * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1926  */
1927 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
1928 {
1929         struct drm_i915_private *dev_priv =
1930                 intel_dp_to_dev(intel_dp)->dev_private;
1931
1932         lockdep_assert_held(&dev_priv->pps_mutex);
1933
1934         if (!is_edp(intel_dp))
1935                 return;
1936
1937         I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
1938              port_name(dp_to_dig_port(intel_dp)->port));
1939
1940         intel_dp->want_panel_vdd = false;
1941
1942         if (sync)
1943                 edp_panel_vdd_off_sync(intel_dp);
1944         else
1945                 edp_panel_vdd_schedule_off(intel_dp);
1946 }
1947
1948 static void edp_panel_on(struct intel_dp *intel_dp)
1949 {
1950         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1951         struct drm_i915_private *dev_priv = dev->dev_private;
1952         u32 pp;
1953         i915_reg_t pp_ctrl_reg;
1954
1955         lockdep_assert_held(&dev_priv->pps_mutex);
1956
1957         if (!is_edp(intel_dp))
1958                 return;
1959
1960         DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
1961                       port_name(dp_to_dig_port(intel_dp)->port));
1962
1963         if (WARN(edp_have_panel_power(intel_dp),
1964                  "eDP port %c panel power already on\n",
1965                  port_name(dp_to_dig_port(intel_dp)->port)))
1966                 return;
1967
1968         wait_panel_power_cycle(intel_dp);
1969
1970         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1971         pp = ironlake_get_pp_control(intel_dp);
1972         if (IS_GEN5(dev)) {
1973                 /* ILK workaround: disable reset around power sequence */
1974                 pp &= ~PANEL_POWER_RESET;
1975                 I915_WRITE(pp_ctrl_reg, pp);
1976                 POSTING_READ(pp_ctrl_reg);
1977         }
1978
1979         pp |= POWER_TARGET_ON;
1980         if (!IS_GEN5(dev))
1981                 pp |= PANEL_POWER_RESET;
1982
1983         I915_WRITE(pp_ctrl_reg, pp);
1984         POSTING_READ(pp_ctrl_reg);
1985
1986         wait_panel_on(intel_dp);
1987         intel_dp->last_power_on = jiffies;
1988
1989         if (IS_GEN5(dev)) {
1990                 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
1991                 I915_WRITE(pp_ctrl_reg, pp);
1992                 POSTING_READ(pp_ctrl_reg);
1993         }
1994 }
1995
1996 void intel_edp_panel_on(struct intel_dp *intel_dp)
1997 {
1998         if (!is_edp(intel_dp))
1999                 return;
2000
2001         pps_lock(intel_dp);
2002         edp_panel_on(intel_dp);
2003         pps_unlock(intel_dp);
2004 }
2005
2006
2007 static void edp_panel_off(struct intel_dp *intel_dp)
2008 {
2009         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2010         struct intel_encoder *intel_encoder = &intel_dig_port->base;
2011         struct drm_device *dev = intel_dp_to_dev(intel_dp);
2012         struct drm_i915_private *dev_priv = dev->dev_private;
2013         enum intel_display_power_domain power_domain;
2014         u32 pp;
2015         i915_reg_t pp_ctrl_reg;
2016
2017         lockdep_assert_held(&dev_priv->pps_mutex);
2018
2019         if (!is_edp(intel_dp))
2020                 return;
2021
2022         DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
2023                       port_name(dp_to_dig_port(intel_dp)->port));
2024
2025         WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
2026              port_name(dp_to_dig_port(intel_dp)->port));
2027
2028         pp = ironlake_get_pp_control(intel_dp);
2029         /* We need to switch off panel power _and_ force vdd, for otherwise some
2030          * panels get very unhappy and cease to work. */
2031         pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
2032                 EDP_BLC_ENABLE);
2033
2034         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2035
2036         intel_dp->want_panel_vdd = false;
2037
2038         I915_WRITE(pp_ctrl_reg, pp);
2039         POSTING_READ(pp_ctrl_reg);
2040
2041         intel_dp->panel_power_off_time = ktime_get_boottime();
2042         wait_panel_off(intel_dp);
2043
2044         /* We got a reference when we enabled the VDD. */
2045         power_domain = intel_display_port_aux_power_domain(intel_encoder);
2046         intel_display_power_put(dev_priv, power_domain);
2047 }
2048
2049 void intel_edp_panel_off(struct intel_dp *intel_dp)
2050 {
2051         if (!is_edp(intel_dp))
2052                 return;
2053
2054         pps_lock(intel_dp);
2055         edp_panel_off(intel_dp);
2056         pps_unlock(intel_dp);
2057 }
2058
2059 /* Enable backlight in the panel power control. */
2060 static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
2061 {
2062         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2063         struct drm_device *dev = intel_dig_port->base.base.dev;
2064         struct drm_i915_private *dev_priv = dev->dev_private;
2065         u32 pp;
2066         i915_reg_t pp_ctrl_reg;
2067
2068         /*
2069          * If we enable the backlight right away following a panel power
2070          * on, we may see slight flicker as the panel syncs with the eDP
2071          * link.  So delay a bit to make sure the image is solid before
2072          * allowing it to appear.
2073          */
2074         wait_backlight_on(intel_dp);
2075
2076         pps_lock(intel_dp);
2077
2078         pp = ironlake_get_pp_control(intel_dp);
2079         pp |= EDP_BLC_ENABLE;
2080
2081         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2082
2083         I915_WRITE(pp_ctrl_reg, pp);
2084         POSTING_READ(pp_ctrl_reg);
2085
2086         pps_unlock(intel_dp);
2087 }
2088
2089 /* Enable backlight PWM and backlight PP control. */
2090 void intel_edp_backlight_on(struct intel_dp *intel_dp)
2091 {
2092         if (!is_edp(intel_dp))
2093                 return;
2094
2095         DRM_DEBUG_KMS("\n");
2096
2097         intel_panel_enable_backlight(intel_dp->attached_connector);
2098         _intel_edp_backlight_on(intel_dp);
2099 }
2100
2101 /* Disable backlight in the panel power control. */
2102 static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
2103 {
2104         struct drm_device *dev = intel_dp_to_dev(intel_dp);
2105         struct drm_i915_private *dev_priv = dev->dev_private;
2106         u32 pp;
2107         i915_reg_t pp_ctrl_reg;
2108
2109         if (!is_edp(intel_dp))
2110                 return;
2111
2112         pps_lock(intel_dp);
2113
2114         pp = ironlake_get_pp_control(intel_dp);
2115         pp &= ~EDP_BLC_ENABLE;
2116
2117         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2118
2119         I915_WRITE(pp_ctrl_reg, pp);
2120         POSTING_READ(pp_ctrl_reg);
2121
2122         pps_unlock(intel_dp);
2123
2124         intel_dp->last_backlight_off = jiffies;
2125         edp_wait_backlight_off(intel_dp);
2126 }
2127
2128 /* Disable backlight PP control and backlight PWM. */
2129 void intel_edp_backlight_off(struct intel_dp *intel_dp)
2130 {
2131         if (!is_edp(intel_dp))
2132                 return;
2133
2134         DRM_DEBUG_KMS("\n");
2135
2136         _intel_edp_backlight_off(intel_dp);
2137         intel_panel_disable_backlight(intel_dp->attached_connector);
2138 }
2139
2140 /*
2141  * Hook for controlling the panel power control backlight through the bl_power
2142  * sysfs attribute. Take care to handle multiple calls.
2143  */
2144 static void intel_edp_backlight_power(struct intel_connector *connector,
2145                                       bool enable)
2146 {
2147         struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
2148         bool is_enabled;
2149
2150         pps_lock(intel_dp);
2151         is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
2152         pps_unlock(intel_dp);
2153
2154         if (is_enabled == enable)
2155                 return;
2156
2157         DRM_DEBUG_KMS("panel power control backlight %s\n",
2158                       enable ? "enable" : "disable");
2159
2160         if (enable)
2161                 _intel_edp_backlight_on(intel_dp);
2162         else
2163                 _intel_edp_backlight_off(intel_dp);
2164 }
2165
2166 static void assert_dp_port(struct intel_dp *intel_dp, bool state)
2167 {
2168         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2169         struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
2170         bool cur_state = I915_READ(intel_dp->output_reg) & DP_PORT_EN;
2171
2172         I915_STATE_WARN(cur_state != state,
2173                         "DP port %c state assertion failure (expected %s, current %s)\n",
2174                         port_name(dig_port->port),
2175                         onoff(state), onoff(cur_state));
2176 }
2177 #define assert_dp_port_disabled(d) assert_dp_port((d), false)
2178
2179 static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
2180 {
2181         bool cur_state = I915_READ(DP_A) & DP_PLL_ENABLE;
2182
2183         I915_STATE_WARN(cur_state != state,
2184                         "eDP PLL state assertion failure (expected %s, current %s)\n",
2185                         onoff(state), onoff(cur_state));
2186 }
2187 #define assert_edp_pll_enabled(d) assert_edp_pll((d), true)
2188 #define assert_edp_pll_disabled(d) assert_edp_pll((d), false)
2189
2190 static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
2191 {
2192         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2193         struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
2194         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2195
2196         assert_pipe_disabled(dev_priv, crtc->pipe);
2197         assert_dp_port_disabled(intel_dp);
2198         assert_edp_pll_disabled(dev_priv);
2199
2200         DRM_DEBUG_KMS("enabling eDP PLL for clock %d\n",
2201                       crtc->config->port_clock);
2202
2203         intel_dp->DP &= ~DP_PLL_FREQ_MASK;
2204
2205         if (crtc->config->port_clock == 162000)
2206                 intel_dp->DP |= DP_PLL_FREQ_162MHZ;
2207         else
2208                 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
2209
2210         I915_WRITE(DP_A, intel_dp->DP);
2211         POSTING_READ(DP_A);
2212         udelay(500);
2213
2214         /*
2215          * [DevILK] Work around required when enabling DP PLL
2216          * while a pipe is enabled going to FDI:
2217          * 1. Wait for the start of vertical blank on the enabled pipe going to FDI
2218          * 2. Program DP PLL enable
2219          */
2220         if (IS_GEN5(dev_priv))
2221                 intel_wait_for_vblank_if_active(dev_priv->dev, !crtc->pipe);
2222
2223         intel_dp->DP |= DP_PLL_ENABLE;
2224
2225         I915_WRITE(DP_A, intel_dp->DP);
2226         POSTING_READ(DP_A);
2227         udelay(200);
2228 }
2229
2230 static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
2231 {
2232         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2233         struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
2234         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2235
2236         assert_pipe_disabled(dev_priv, crtc->pipe);
2237         assert_dp_port_disabled(intel_dp);
2238         assert_edp_pll_enabled(dev_priv);
2239
2240         DRM_DEBUG_KMS("disabling eDP PLL\n");
2241
2242         intel_dp->DP &= ~DP_PLL_ENABLE;
2243
2244         I915_WRITE(DP_A, intel_dp->DP);
2245         POSTING_READ(DP_A);
2246         udelay(200);
2247 }
2248
2249 /* If the sink supports it, try to set the power state appropriately */
2250 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
2251 {
2252         int ret, i;
2253
2254         /* Should have a valid DPCD by this point */
2255         if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2256                 return;
2257
2258         if (mode != DRM_MODE_DPMS_ON) {
2259                 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2260                                          DP_SET_POWER_D3);
2261         } else {
2262                 /*
2263                  * When turning on, we need to retry for 1ms to give the sink
2264                  * time to wake up.
2265                  */
2266                 for (i = 0; i < 3; i++) {
2267                         ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2268                                                  DP_SET_POWER_D0);
2269                         if (ret == 1)
2270                                 break;
2271                         msleep(1);
2272                 }
2273         }
2274
2275         if (ret != 1)
2276                 DRM_DEBUG_KMS("failed to %s sink power state\n",
2277                               mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
2278 }
2279
2280 static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2281                                   enum pipe *pipe)
2282 {
2283         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2284         enum port port = dp_to_dig_port(intel_dp)->port;
2285         struct drm_device *dev = encoder->base.dev;
2286         struct drm_i915_private *dev_priv = dev->dev_private;
2287         enum intel_display_power_domain power_domain;
2288         u32 tmp;
2289         bool ret;
2290
2291         power_domain = intel_display_port_power_domain(encoder);
2292         if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
2293                 return false;
2294
2295         ret = false;
2296
2297         tmp = I915_READ(intel_dp->output_reg);
2298
2299         if (!(tmp & DP_PORT_EN))
2300                 goto out;
2301
2302         if (IS_GEN7(dev) && port == PORT_A) {
2303                 *pipe = PORT_TO_PIPE_CPT(tmp);
2304         } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
2305                 enum pipe p;
2306
2307                 for_each_pipe(dev_priv, p) {
2308                         u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
2309                         if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
2310                                 *pipe = p;
2311                                 ret = true;
2312
2313                                 goto out;
2314                         }
2315                 }
2316
2317                 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2318                               i915_mmio_reg_offset(intel_dp->output_reg));
2319         } else if (IS_CHERRYVIEW(dev)) {
2320                 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
2321         } else {
2322                 *pipe = PORT_TO_PIPE(tmp);
2323         }
2324
2325         ret = true;
2326
2327 out:
2328         intel_display_power_put(dev_priv, power_domain);
2329
2330         return ret;
2331 }
2332
2333 static void intel_dp_get_config(struct intel_encoder *encoder,
2334                                 struct intel_crtc_state *pipe_config)
2335 {
2336         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2337         u32 tmp, flags = 0;
2338         struct drm_device *dev = encoder->base.dev;
2339         struct drm_i915_private *dev_priv = dev->dev_private;
2340         enum port port = dp_to_dig_port(intel_dp)->port;
2341         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2342
2343         tmp = I915_READ(intel_dp->output_reg);
2344
2345         pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
2346
2347         if (HAS_PCH_CPT(dev) && port != PORT_A) {
2348                 u32 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2349
2350                 if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
2351                         flags |= DRM_MODE_FLAG_PHSYNC;
2352                 else
2353                         flags |= DRM_MODE_FLAG_NHSYNC;
2354
2355                 if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH)
2356                         flags |= DRM_MODE_FLAG_PVSYNC;
2357                 else
2358                         flags |= DRM_MODE_FLAG_NVSYNC;
2359         } else {
2360                 if (tmp & DP_SYNC_HS_HIGH)
2361                         flags |= DRM_MODE_FLAG_PHSYNC;
2362                 else
2363                         flags |= DRM_MODE_FLAG_NHSYNC;
2364
2365                 if (tmp & DP_SYNC_VS_HIGH)
2366                         flags |= DRM_MODE_FLAG_PVSYNC;
2367                 else
2368                         flags |= DRM_MODE_FLAG_NVSYNC;
2369         }
2370
2371         pipe_config->base.adjusted_mode.flags |= flags;
2372
2373         if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
2374             !IS_CHERRYVIEW(dev) && tmp & DP_COLOR_RANGE_16_235)
2375                 pipe_config->limited_color_range = true;
2376
2377         pipe_config->has_dp_encoder = true;
2378
2379         pipe_config->lane_count =
2380                 ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
2381
2382         intel_dp_get_m_n(crtc, pipe_config);
2383
2384         if (port == PORT_A) {
2385                 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ)
2386                         pipe_config->port_clock = 162000;
2387                 else
2388                         pipe_config->port_clock = 270000;
2389         }
2390
2391         pipe_config->base.adjusted_mode.crtc_clock =
2392                 intel_dotclock_calculate(pipe_config->port_clock,
2393                                          &pipe_config->dp_m_n);
2394
2395         if (is_edp(intel_dp) && dev_priv->vbt.edp.bpp &&
2396             pipe_config->pipe_bpp > dev_priv->vbt.edp.bpp) {
2397                 /*
2398                  * This is a big fat ugly hack.
2399                  *
2400                  * Some machines in UEFI boot mode provide us a VBT that has 18
2401                  * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2402                  * unknown we fail to light up. Yet the same BIOS boots up with
2403                  * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2404                  * max, not what it tells us to use.
2405                  *
2406                  * Note: This will still be broken if the eDP panel is not lit
2407                  * up by the BIOS, and thus we can't get the mode at module
2408                  * load.
2409                  */
2410                 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2411                               pipe_config->pipe_bpp, dev_priv->vbt.edp.bpp);
2412                 dev_priv->vbt.edp.bpp = pipe_config->pipe_bpp;
2413         }
2414 }
2415
2416 static void intel_disable_dp(struct intel_encoder *encoder)
2417 {
2418         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2419         struct drm_device *dev = encoder->base.dev;
2420         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2421
2422         if (crtc->config->has_audio)
2423                 intel_audio_codec_disable(encoder);
2424
2425         if (HAS_PSR(dev) && !HAS_DDI(dev))
2426                 intel_psr_disable(intel_dp);
2427
2428         /* Make sure the panel is off before trying to change the mode. But also
2429          * ensure that we have vdd while we switch off the panel. */
2430         intel_edp_panel_vdd_on(intel_dp);
2431         intel_edp_backlight_off(intel_dp);
2432         intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
2433         intel_edp_panel_off(intel_dp);
2434
2435         /* disable the port before the pipe on g4x */
2436         if (INTEL_INFO(dev)->gen < 5)
2437                 intel_dp_link_down(intel_dp);
2438 }
2439
2440 static void ilk_post_disable_dp(struct intel_encoder *encoder)
2441 {
2442         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2443         enum port port = dp_to_dig_port(intel_dp)->port;
2444
2445         intel_dp_link_down(intel_dp);
2446
2447         /* Only ilk+ has port A */
2448         if (port == PORT_A)
2449                 ironlake_edp_pll_off(intel_dp);
2450 }
2451
2452 static void vlv_post_disable_dp(struct intel_encoder *encoder)
2453 {
2454         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2455
2456         intel_dp_link_down(intel_dp);
2457 }
2458
2459 static void chv_post_disable_dp(struct intel_encoder *encoder)
2460 {
2461         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2462         struct drm_device *dev = encoder->base.dev;
2463         struct drm_i915_private *dev_priv = dev->dev_private;
2464
2465         intel_dp_link_down(intel_dp);
2466
2467         mutex_lock(&dev_priv->sb_lock);
2468
2469         /* Assert data lane reset */
2470         chv_data_lane_soft_reset(encoder, true);
2471
2472         mutex_unlock(&dev_priv->sb_lock);
2473 }
2474
2475 static void
2476 _intel_dp_set_link_train(struct intel_dp *intel_dp,
2477                          uint32_t *DP,
2478                          uint8_t dp_train_pat)
2479 {
2480         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2481         struct drm_device *dev = intel_dig_port->base.base.dev;
2482         struct drm_i915_private *dev_priv = dev->dev_private;
2483         enum port port = intel_dig_port->port;
2484
2485         if (HAS_DDI(dev)) {
2486                 uint32_t temp = I915_READ(DP_TP_CTL(port));
2487
2488                 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2489                         temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2490                 else
2491                         temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2492
2493                 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2494                 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2495                 case DP_TRAINING_PATTERN_DISABLE:
2496                         temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2497
2498                         break;
2499                 case DP_TRAINING_PATTERN_1:
2500                         temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2501                         break;
2502                 case DP_TRAINING_PATTERN_2:
2503                         temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2504                         break;
2505                 case DP_TRAINING_PATTERN_3:
2506                         temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2507                         break;
2508                 }
2509                 I915_WRITE(DP_TP_CTL(port), temp);
2510
2511         } else if ((IS_GEN7(dev) && port == PORT_A) ||
2512                    (HAS_PCH_CPT(dev) && port != PORT_A)) {
2513                 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2514
2515                 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2516                 case DP_TRAINING_PATTERN_DISABLE:
2517                         *DP |= DP_LINK_TRAIN_OFF_CPT;
2518                         break;
2519                 case DP_TRAINING_PATTERN_1:
2520                         *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2521                         break;
2522                 case DP_TRAINING_PATTERN_2:
2523                         *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2524                         break;
2525                 case DP_TRAINING_PATTERN_3:
2526                         DRM_ERROR("DP training pattern 3 not supported\n");
2527                         *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2528                         break;
2529                 }
2530
2531         } else {
2532                 if (IS_CHERRYVIEW(dev))
2533                         *DP &= ~DP_LINK_TRAIN_MASK_CHV;
2534                 else
2535                         *DP &= ~DP_LINK_TRAIN_MASK;
2536
2537                 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2538                 case DP_TRAINING_PATTERN_DISABLE:
2539                         *DP |= DP_LINK_TRAIN_OFF;
2540                         break;
2541                 case DP_TRAINING_PATTERN_1:
2542                         *DP |= DP_LINK_TRAIN_PAT_1;
2543                         break;
2544                 case DP_TRAINING_PATTERN_2:
2545                         *DP |= DP_LINK_TRAIN_PAT_2;
2546                         break;
2547                 case DP_TRAINING_PATTERN_3:
2548                         if (IS_CHERRYVIEW(dev)) {
2549                                 *DP |= DP_LINK_TRAIN_PAT_3_CHV;
2550                         } else {
2551                                 DRM_ERROR("DP training pattern 3 not supported\n");
2552                                 *DP |= DP_LINK_TRAIN_PAT_2;
2553                         }
2554                         break;
2555                 }
2556         }
2557 }
2558
2559 static void intel_dp_enable_port(struct intel_dp *intel_dp)
2560 {
2561         struct drm_device *dev = intel_dp_to_dev(intel_dp);
2562         struct drm_i915_private *dev_priv = dev->dev_private;
2563         struct intel_crtc *crtc =
2564                 to_intel_crtc(dp_to_dig_port(intel_dp)->base.base.crtc);
2565
2566         /* enable with pattern 1 (as per spec) */
2567         _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2568                                  DP_TRAINING_PATTERN_1);
2569
2570         I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2571         POSTING_READ(intel_dp->output_reg);
2572
2573         /*
2574          * Magic for VLV/CHV. We _must_ first set up the register
2575          * without actually enabling the port, and then do another
2576          * write to enable the port. Otherwise link training will
2577          * fail when the power sequencer is freshly used for this port.
2578          */
2579         intel_dp->DP |= DP_PORT_EN;
2580         if (crtc->config->has_audio)
2581                 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
2582
2583         I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2584         POSTING_READ(intel_dp->output_reg);
2585 }
2586
2587 static void intel_enable_dp(struct intel_encoder *encoder)
2588 {
2589         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2590         struct drm_device *dev = encoder->base.dev;
2591         struct drm_i915_private *dev_priv = dev->dev_private;
2592         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2593         uint32_t dp_reg = I915_READ(intel_dp->output_reg);
2594         enum pipe pipe = crtc->pipe;
2595
2596         if (WARN_ON(dp_reg & DP_PORT_EN))
2597                 return;
2598
2599         pps_lock(intel_dp);
2600
2601         if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
2602                 vlv_init_panel_power_sequencer(intel_dp);
2603
2604         intel_dp_enable_port(intel_dp);
2605
2606         edp_panel_vdd_on(intel_dp);
2607         edp_panel_on(intel_dp);
2608         edp_panel_vdd_off(intel_dp, true);
2609
2610         pps_unlock(intel_dp);
2611
2612         if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
2613                 unsigned int lane_mask = 0x0;
2614
2615                 if (IS_CHERRYVIEW(dev))
2616                         lane_mask = intel_dp_unused_lane_mask(crtc->config->lane_count);
2617
2618                 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
2619                                     lane_mask);
2620         }
2621
2622         intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
2623         intel_dp_start_link_train(intel_dp);
2624         intel_dp_stop_link_train(intel_dp);
2625
2626         if (crtc->config->has_audio) {
2627                 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2628                                  pipe_name(pipe));
2629                 intel_audio_codec_enable(encoder);
2630         }
2631 }
2632
2633 static void g4x_enable_dp(struct intel_encoder *encoder)
2634 {
2635         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2636
2637         intel_enable_dp(encoder);
2638         intel_edp_backlight_on(intel_dp);
2639 }
2640
2641 static void vlv_enable_dp(struct intel_encoder *encoder)
2642 {
2643         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2644
2645         intel_edp_backlight_on(intel_dp);
2646         intel_psr_enable(intel_dp);
2647 }
2648
2649 static void g4x_pre_enable_dp(struct intel_encoder *encoder)
2650 {
2651         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2652         enum port port = dp_to_dig_port(intel_dp)->port;
2653
2654         intel_dp_prepare(encoder);
2655
2656         /* Only ilk+ has port A */
2657         if (port == PORT_A)
2658                 ironlake_edp_pll_on(intel_dp);
2659 }
2660
2661 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2662 {
2663         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2664         struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2665         enum pipe pipe = intel_dp->pps_pipe;
2666         i915_reg_t pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
2667
2668         edp_panel_vdd_off_sync(intel_dp);
2669
2670         /*
2671          * VLV seems to get confused when multiple power seqeuencers
2672          * have the same port selected (even if only one has power/vdd
2673          * enabled). The failure manifests as vlv_wait_port_ready() failing
2674          * CHV on the other hand doesn't seem to mind having the same port
2675          * selected in multiple power seqeuencers, but let's clear the
2676          * port select always when logically disconnecting a power sequencer
2677          * from a port.
2678          */
2679         DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2680                       pipe_name(pipe), port_name(intel_dig_port->port));
2681         I915_WRITE(pp_on_reg, 0);
2682         POSTING_READ(pp_on_reg);
2683
2684         intel_dp->pps_pipe = INVALID_PIPE;
2685 }
2686
2687 static void vlv_steal_power_sequencer(struct drm_device *dev,
2688                                       enum pipe pipe)
2689 {
2690         struct drm_i915_private *dev_priv = dev->dev_private;
2691         struct intel_encoder *encoder;
2692
2693         lockdep_assert_held(&dev_priv->pps_mutex);
2694
2695         if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2696                 return;
2697
2698         for_each_intel_encoder(dev, encoder) {
2699                 struct intel_dp *intel_dp;
2700                 enum port port;
2701
2702                 if (encoder->type != INTEL_OUTPUT_EDP)
2703                         continue;
2704
2705                 intel_dp = enc_to_intel_dp(&encoder->base);
2706                 port = dp_to_dig_port(intel_dp)->port;
2707
2708                 if (intel_dp->pps_pipe != pipe)
2709                         continue;
2710
2711                 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
2712                               pipe_name(pipe), port_name(port));
2713
2714                 WARN(encoder->base.crtc,
2715                      "stealing pipe %c power sequencer from active eDP port %c\n",
2716                      pipe_name(pipe), port_name(port));
2717
2718                 /* make sure vdd is off before we steal it */
2719                 vlv_detach_power_sequencer(intel_dp);
2720         }
2721 }
2722
2723 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2724 {
2725         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2726         struct intel_encoder *encoder = &intel_dig_port->base;
2727         struct drm_device *dev = encoder->base.dev;
2728         struct drm_i915_private *dev_priv = dev->dev_private;
2729         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2730
2731         lockdep_assert_held(&dev_priv->pps_mutex);
2732
2733         if (!is_edp(intel_dp))
2734                 return;
2735
2736         if (intel_dp->pps_pipe == crtc->pipe)
2737                 return;
2738
2739         /*
2740          * If another power sequencer was being used on this
2741          * port previously make sure to turn off vdd there while
2742          * we still have control of it.
2743          */
2744         if (intel_dp->pps_pipe != INVALID_PIPE)
2745                 vlv_detach_power_sequencer(intel_dp);
2746
2747         /*
2748          * We may be stealing the power
2749          * sequencer from another port.
2750          */
2751         vlv_steal_power_sequencer(dev, crtc->pipe);
2752
2753         /* now it's all ours */
2754         intel_dp->pps_pipe = crtc->pipe;
2755
2756         DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2757                       pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2758
2759         /* init power sequencer on this pipe and port */
2760         intel_dp_init_panel_power_sequencer(dev, intel_dp);
2761         intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
2762 }
2763
2764 static void vlv_pre_enable_dp(struct intel_encoder *encoder)
2765 {
2766         vlv_phy_pre_encoder_enable(encoder);
2767
2768         intel_enable_dp(encoder);
2769 }
2770
2771 static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
2772 {
2773         intel_dp_prepare(encoder);
2774
2775         vlv_phy_pre_pll_enable(encoder);
2776 }
2777
2778 static void chv_pre_enable_dp(struct intel_encoder *encoder)
2779 {
2780         chv_phy_pre_encoder_enable(encoder);
2781
2782         intel_enable_dp(encoder);
2783
2784         /* Second common lane will stay alive on its own now */
2785         chv_phy_release_cl2_override(encoder);
2786 }
2787
2788 static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
2789 {
2790         intel_dp_prepare(encoder);
2791
2792         chv_phy_pre_pll_enable(encoder);
2793 }
2794
2795 static void chv_dp_post_pll_disable(struct intel_encoder *encoder)
2796 {
2797         chv_phy_post_pll_disable(encoder);
2798 }
2799
2800 /*
2801  * Fetch AUX CH registers 0x202 - 0x207 which contain
2802  * link status information
2803  */
2804 bool
2805 intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
2806 {
2807         return drm_dp_dpcd_read(&intel_dp->aux, DP_LANE0_1_STATUS, link_status,
2808                                 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
2809 }
2810
2811 /* These are source-specific values. */
2812 uint8_t
2813 intel_dp_voltage_max(struct intel_dp *intel_dp)
2814 {
2815         struct drm_device *dev = intel_dp_to_dev(intel_dp);
2816         struct drm_i915_private *dev_priv = dev->dev_private;
2817         enum port port = dp_to_dig_port(intel_dp)->port;
2818
2819         if (IS_BROXTON(dev))
2820                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2821         else if (INTEL_INFO(dev)->gen >= 9) {
2822                 if (dev_priv->vbt.edp.low_vswing && port == PORT_A)
2823                         return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2824                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
2825         } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
2826                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2827         else if (IS_GEN7(dev) && port == PORT_A)
2828                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
2829         else if (HAS_PCH_CPT(dev) && port != PORT_A)
2830                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2831         else
2832                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
2833 }
2834
2835 uint8_t
2836 intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
2837 {
2838         struct drm_device *dev = intel_dp_to_dev(intel_dp);
2839         enum port port = dp_to_dig_port(intel_dp)->port;
2840
2841         if (INTEL_INFO(dev)->gen >= 9) {
2842                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2843                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2844                         return DP_TRAIN_PRE_EMPH_LEVEL_3;
2845                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2846                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
2847                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2848                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
2849                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2850                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
2851                 default:
2852                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
2853                 }
2854         } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2855                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2856                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2857                         return DP_TRAIN_PRE_EMPH_LEVEL_3;
2858                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2859                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
2860                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2861                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
2862                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2863                 default:
2864                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
2865                 }
2866         } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
2867                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2868                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2869                         return DP_TRAIN_PRE_EMPH_LEVEL_3;
2870                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2871                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
2872                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2873                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
2874                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2875                 default:
2876                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
2877                 }
2878         } else if (IS_GEN7(dev) && port == PORT_A) {
2879                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2880                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2881                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
2882                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2883                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2884                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
2885                 default:
2886                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
2887                 }
2888         } else {
2889                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2890                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2891                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
2892                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2893                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
2894                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2895                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
2896                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2897                 default:
2898                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
2899                 }
2900         }
2901 }
2902
2903 static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
2904 {
2905         struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
2906         unsigned long demph_reg_value, preemph_reg_value,
2907                 uniqtranscale_reg_value;
2908         uint8_t train_set = intel_dp->train_set[0];
2909
2910         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
2911         case DP_TRAIN_PRE_EMPH_LEVEL_0:
2912                 preemph_reg_value = 0x0004000;
2913                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2914                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2915                         demph_reg_value = 0x2B405555;
2916                         uniqtranscale_reg_value = 0x552AB83A;
2917                         break;
2918                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2919                         demph_reg_value = 0x2B404040;
2920                         uniqtranscale_reg_value = 0x5548B83A;
2921                         break;
2922                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2923                         demph_reg_value = 0x2B245555;
2924                         uniqtranscale_reg_value = 0x5560B83A;
2925                         break;
2926                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2927                         demph_reg_value = 0x2B405555;
2928                         uniqtranscale_reg_value = 0x5598DA3A;
2929                         break;
2930                 default:
2931                         return 0;
2932                 }
2933                 break;
2934         case DP_TRAIN_PRE_EMPH_LEVEL_1:
2935                 preemph_reg_value = 0x0002000;
2936                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2937                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2938                         demph_reg_value = 0x2B404040;
2939                         uniqtranscale_reg_value = 0x5552B83A;
2940                         break;
2941                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2942                         demph_reg_value = 0x2B404848;
2943                         uniqtranscale_reg_value = 0x5580B83A;
2944                         break;
2945                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2946                         demph_reg_value = 0x2B404040;
2947                         uniqtranscale_reg_value = 0x55ADDA3A;
2948                         break;
2949                 default:
2950                         return 0;
2951                 }
2952                 break;
2953         case DP_TRAIN_PRE_EMPH_LEVEL_2:
2954                 preemph_reg_value = 0x0000000;
2955                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2956                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2957                         demph_reg_value = 0x2B305555;
2958                         uniqtranscale_reg_value = 0x5570B83A;
2959                         break;
2960                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2961                         demph_reg_value = 0x2B2B4040;
2962                         uniqtranscale_reg_value = 0x55ADDA3A;
2963                         break;
2964                 default:
2965                         return 0;
2966                 }
2967                 break;
2968         case DP_TRAIN_PRE_EMPH_LEVEL_3:
2969                 preemph_reg_value = 0x0006000;
2970                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2971                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2972                         demph_reg_value = 0x1B405555;
2973                         uniqtranscale_reg_value = 0x55ADDA3A;
2974                         break;
2975                 default:
2976                         return 0;
2977                 }
2978                 break;
2979         default:
2980                 return 0;
2981         }
2982
2983         vlv_set_phy_signal_level(encoder, demph_reg_value, preemph_reg_value,
2984                                  uniqtranscale_reg_value, 0);
2985
2986         return 0;
2987 }
2988
2989 static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
2990 {
2991         struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
2992         u32 deemph_reg_value, margin_reg_value;
2993         bool uniq_trans_scale = false;
2994         uint8_t train_set = intel_dp->train_set[0];
2995
2996         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
2997         case DP_TRAIN_PRE_EMPH_LEVEL_0:
2998                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2999                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3000                         deemph_reg_value = 128;
3001                         margin_reg_value = 52;
3002                         break;
3003                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3004                         deemph_reg_value = 128;
3005                         margin_reg_value = 77;
3006                         break;
3007                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3008                         deemph_reg_value = 128;
3009                         margin_reg_value = 102;
3010                         break;
3011                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3012                         deemph_reg_value = 128;
3013                         margin_reg_value = 154;
3014                         uniq_trans_scale = true;
3015                         break;
3016                 default:
3017                         return 0;
3018                 }
3019                 break;
3020         case DP_TRAIN_PRE_EMPH_LEVEL_1:
3021                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3022                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3023                         deemph_reg_value = 85;
3024                         margin_reg_value = 78;
3025                         break;
3026                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3027                         deemph_reg_value = 85;
3028                         margin_reg_value = 116;
3029                         break;
3030                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3031                         deemph_reg_value = 85;
3032                         margin_reg_value = 154;
3033                         break;
3034                 default:
3035                         return 0;
3036                 }
3037                 break;
3038         case DP_TRAIN_PRE_EMPH_LEVEL_2:
3039                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3040                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3041                         deemph_reg_value = 64;
3042                         margin_reg_value = 104;
3043                         break;
3044                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3045                         deemph_reg_value = 64;
3046                         margin_reg_value = 154;
3047                         break;
3048                 default:
3049                         return 0;
3050                 }
3051                 break;
3052         case DP_TRAIN_PRE_EMPH_LEVEL_3:
3053                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3054                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3055                         deemph_reg_value = 43;
3056                         margin_reg_value = 154;
3057                         break;
3058                 default:
3059                         return 0;
3060                 }
3061                 break;
3062         default:
3063                 return 0;
3064         }
3065
3066         chv_set_phy_signal_level(encoder, deemph_reg_value,
3067                                  margin_reg_value, uniq_trans_scale);
3068
3069         return 0;
3070 }
3071
3072 static uint32_t
3073 gen4_signal_levels(uint8_t train_set)
3074 {
3075         uint32_t        signal_levels = 0;
3076
3077         switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3078         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3079         default:
3080                 signal_levels |= DP_VOLTAGE_0_4;
3081                 break;
3082         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3083                 signal_levels |= DP_VOLTAGE_0_6;
3084                 break;
3085         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3086                 signal_levels |= DP_VOLTAGE_0_8;
3087                 break;
3088         case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3089                 signal_levels |= DP_VOLTAGE_1_2;
3090                 break;
3091         }
3092         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3093         case DP_TRAIN_PRE_EMPH_LEVEL_0:
3094         default:
3095                 signal_levels |= DP_PRE_EMPHASIS_0;
3096                 break;
3097         case DP_TRAIN_PRE_EMPH_LEVEL_1:
3098                 signal_levels |= DP_PRE_EMPHASIS_3_5;
3099                 break;
3100         case DP_TRAIN_PRE_EMPH_LEVEL_2:
3101                 signal_levels |= DP_PRE_EMPHASIS_6;
3102                 break;
3103         case DP_TRAIN_PRE_EMPH_LEVEL_3:
3104                 signal_levels |= DP_PRE_EMPHASIS_9_5;
3105                 break;
3106         }
3107         return signal_levels;
3108 }
3109
3110 /* Gen6's DP voltage swing and pre-emphasis control */
3111 static uint32_t
3112 gen6_edp_signal_levels(uint8_t train_set)
3113 {
3114         int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3115                                          DP_TRAIN_PRE_EMPHASIS_MASK);
3116         switch (signal_levels) {
3117         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3118         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3119                 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3120         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3121                 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
3122         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3123         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3124                 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
3125         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3126         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3127                 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
3128         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3129         case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3130                 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
3131         default:
3132                 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3133                               "0x%x\n", signal_levels);
3134                 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3135         }
3136 }
3137
3138 /* Gen7's DP voltage swing and pre-emphasis control */
3139 static uint32_t
3140 gen7_edp_signal_levels(uint8_t train_set)
3141 {
3142         int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3143                                          DP_TRAIN_PRE_EMPHASIS_MASK);
3144         switch (signal_levels) {
3145         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3146                 return EDP_LINK_TRAIN_400MV_0DB_IVB;
3147         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3148                 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
3149         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3150                 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3151
3152         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3153                 return EDP_LINK_TRAIN_600MV_0DB_IVB;
3154         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3155                 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3156
3157         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3158                 return EDP_LINK_TRAIN_800MV_0DB_IVB;
3159         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3160                 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3161
3162         default:
3163                 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3164                               "0x%x\n", signal_levels);
3165                 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3166         }
3167 }
3168
3169 void
3170 intel_dp_set_signal_levels(struct intel_dp *intel_dp)
3171 {
3172         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3173         enum port port = intel_dig_port->port;
3174         struct drm_device *dev = intel_dig_port->base.base.dev;
3175         struct drm_i915_private *dev_priv = to_i915(dev);
3176         uint32_t signal_levels, mask = 0;
3177         uint8_t train_set = intel_dp->train_set[0];
3178
3179         if (HAS_DDI(dev)) {
3180                 signal_levels = ddi_signal_levels(intel_dp);
3181
3182                 if (IS_BROXTON(dev))
3183                         signal_levels = 0;
3184                 else
3185                         mask = DDI_BUF_EMP_MASK;
3186         } else if (IS_CHERRYVIEW(dev)) {
3187                 signal_levels = chv_signal_levels(intel_dp);
3188         } else if (IS_VALLEYVIEW(dev)) {
3189                 signal_levels = vlv_signal_levels(intel_dp);
3190         } else if (IS_GEN7(dev) && port == PORT_A) {
3191                 signal_levels = gen7_edp_signal_levels(train_set);
3192                 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
3193         } else if (IS_GEN6(dev) && port == PORT_A) {
3194                 signal_levels = gen6_edp_signal_levels(train_set);
3195                 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3196         } else {
3197                 signal_levels = gen4_signal_levels(train_set);
3198                 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3199         }
3200
3201         if (mask)
3202                 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3203
3204         DRM_DEBUG_KMS("Using vswing level %d\n",
3205                 train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
3206         DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3207                 (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
3208                         DP_TRAIN_PRE_EMPHASIS_SHIFT);
3209
3210         intel_dp->DP = (intel_dp->DP & ~mask) | signal_levels;
3211
3212         I915_WRITE(intel_dp->output_reg, intel_dp->DP);
3213         POSTING_READ(intel_dp->output_reg);
3214 }
3215
3216 void
3217 intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
3218                                        uint8_t dp_train_pat)
3219 {
3220         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3221         struct drm_i915_private *dev_priv =
3222                 to_i915(intel_dig_port->base.base.dev);
3223
3224         _intel_dp_set_link_train(intel_dp, &intel_dp->DP, dp_train_pat);
3225
3226         I915_WRITE(intel_dp->output_reg, intel_dp->DP);
3227         POSTING_READ(intel_dp->output_reg);
3228 }
3229
3230 void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3231 {
3232         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3233         struct drm_device *dev = intel_dig_port->base.base.dev;
3234         struct drm_i915_private *dev_priv = dev->dev_private;
3235         enum port port = intel_dig_port->port;
3236         uint32_t val;
3237
3238         if (!HAS_DDI(dev))
3239                 return;
3240
3241         val = I915_READ(DP_TP_CTL(port));
3242         val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3243         val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3244         I915_WRITE(DP_TP_CTL(port), val);
3245
3246         /*
3247          * On PORT_A we can have only eDP in SST mode. There the only reason
3248          * we need to set idle transmission mode is to work around a HW issue
3249          * where we enable the pipe while not in idle link-training mode.
3250          * In this case there is requirement to wait for a minimum number of
3251          * idle patterns to be sent.
3252          */
3253         if (port == PORT_A)
3254                 return;
3255
3256         if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3257                      1))
3258                 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3259 }
3260
3261 static void
3262 intel_dp_link_down(struct intel_dp *intel_dp)
3263 {
3264         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3265         struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
3266         enum port port = intel_dig_port->port;
3267         struct drm_device *dev = intel_dig_port->base.base.dev;
3268         struct drm_i915_private *dev_priv = dev->dev_private;
3269         uint32_t DP = intel_dp->DP;
3270
3271         if (WARN_ON(HAS_DDI(dev)))
3272                 return;
3273
3274         if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
3275                 return;
3276
3277         DRM_DEBUG_KMS("\n");
3278
3279         if ((IS_GEN7(dev) && port == PORT_A) ||
3280             (HAS_PCH_CPT(dev) && port != PORT_A)) {
3281                 DP &= ~DP_LINK_TRAIN_MASK_CPT;
3282                 DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
3283         } else {
3284                 if (IS_CHERRYVIEW(dev))
3285                         DP &= ~DP_LINK_TRAIN_MASK_CHV;
3286                 else
3287                         DP &= ~DP_LINK_TRAIN_MASK;
3288                 DP |= DP_LINK_TRAIN_PAT_IDLE;
3289         }
3290         I915_WRITE(intel_dp->output_reg, DP);
3291         POSTING_READ(intel_dp->output_reg);
3292
3293         DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
3294         I915_WRITE(intel_dp->output_reg, DP);
3295         POSTING_READ(intel_dp->output_reg);
3296
3297         /*
3298          * HW workaround for IBX, we need to move the port
3299          * to transcoder A after disabling it to allow the
3300          * matching HDMI port to be enabled on transcoder A.
3301          */
3302         if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B && port != PORT_A) {
3303                 /*
3304                  * We get CPU/PCH FIFO underruns on the other pipe when
3305                  * doing the workaround. Sweep them under the rug.
3306                  */
3307                 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
3308                 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
3309
3310                 /* always enable with pattern 1 (as per spec) */
3311                 DP &= ~(DP_PIPEB_SELECT | DP_LINK_TRAIN_MASK);
3312                 DP |= DP_PORT_EN | DP_LINK_TRAIN_PAT_1;
3313                 I915_WRITE(intel_dp->output_reg, DP);
3314                 POSTING_READ(intel_dp->output_reg);
3315
3316                 DP &= ~DP_PORT_EN;
3317                 I915_WRITE(intel_dp->output_reg, DP);
3318                 POSTING_READ(intel_dp->output_reg);
3319
3320                 intel_wait_for_vblank_if_active(dev_priv->dev, PIPE_A);
3321                 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
3322                 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
3323         }
3324
3325         msleep(intel_dp->panel_power_down_delay);
3326
3327         intel_dp->DP = DP;
3328 }
3329
3330 static bool
3331 intel_dp_get_dpcd(struct intel_dp *intel_dp)
3332 {
3333         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3334         struct drm_device *dev = dig_port->base.base.dev;
3335         struct drm_i915_private *dev_priv = dev->dev_private;
3336
3337         if (drm_dp_dpcd_read(&intel_dp->aux, 0x000, intel_dp->dpcd,
3338                              sizeof(intel_dp->dpcd)) < 0)
3339                 return false; /* aux transfer failed */
3340
3341         DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
3342
3343         if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3344                 return false; /* DPCD not present */
3345
3346         if (drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT,
3347                              &intel_dp->sink_count, 1) < 0)
3348                 return false;
3349
3350         /*
3351          * Sink count can change between short pulse hpd hence
3352          * a member variable in intel_dp will track any changes
3353          * between short pulse interrupts.
3354          */
3355         intel_dp->sink_count = DP_GET_SINK_COUNT(intel_dp->sink_count);
3356
3357         /*
3358          * SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that
3359          * a dongle is present but no display. Unless we require to know
3360          * if a dongle is present or not, we don't need to update
3361          * downstream port information. So, an early return here saves
3362          * time from performing other operations which are not required.
3363          */
3364         if (!is_edp(intel_dp) && !intel_dp->sink_count)
3365                 return false;
3366
3367         /* Check if the panel supports PSR */
3368         memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
3369         if (is_edp(intel_dp)) {
3370                 drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT,
3371                                  intel_dp->psr_dpcd,
3372                                  sizeof(intel_dp->psr_dpcd));
3373                 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3374                         dev_priv->psr.sink_support = true;
3375                         DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
3376                 }
3377
3378                 if (INTEL_INFO(dev)->gen >= 9 &&
3379                         (intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
3380                         uint8_t frame_sync_cap;
3381
3382                         dev_priv->psr.sink_support = true;
3383                         drm_dp_dpcd_read(&intel_dp->aux,
3384                                          DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
3385                                          &frame_sync_cap, 1);
3386                         dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
3387                         /* PSR2 needs frame sync as well */
3388                         dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
3389                         DRM_DEBUG_KMS("PSR2 %s on sink",
3390                                 dev_priv->psr.psr2_support ? "supported" : "not supported");
3391                 }
3392
3393                 /* Read the eDP Display control capabilities registers */
3394                 memset(intel_dp->edp_dpcd, 0, sizeof(intel_dp->edp_dpcd));
3395                 if ((intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
3396                                 (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
3397                                                 intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) ==
3398                                                                 sizeof(intel_dp->edp_dpcd)))
3399                         DRM_DEBUG_KMS("EDP DPCD : %*ph\n", (int) sizeof(intel_dp->edp_dpcd),
3400                                         intel_dp->edp_dpcd);
3401         }
3402
3403         DRM_DEBUG_KMS("Display Port TPS3 support: source %s, sink %s\n",
3404                       yesno(intel_dp_source_supports_hbr2(intel_dp)),
3405                       yesno(drm_dp_tps3_supported(intel_dp->dpcd)));
3406
3407         /* Intermediate frequency support */
3408         if (is_edp(intel_dp) && (intel_dp->edp_dpcd[0] >= 0x03)) { /* eDp v1.4 or higher */
3409                 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
3410                 int i;
3411
3412                 drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES,
3413                                 sink_rates, sizeof(sink_rates));
3414
3415                 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
3416                         int val = le16_to_cpu(sink_rates[i]);
3417
3418                         if (val == 0)
3419                                 break;
3420
3421                         /* Value read is in kHz while drm clock is saved in deca-kHz */
3422                         intel_dp->sink_rates[i] = (val * 200) / 10;
3423                 }
3424                 intel_dp->num_sink_rates = i;
3425         }
3426
3427         intel_dp_print_rates(intel_dp);
3428
3429         if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
3430               DP_DWN_STRM_PORT_PRESENT))
3431                 return true; /* native DP sink */
3432
3433         if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
3434                 return true; /* no per-port downstream info */
3435
3436         if (drm_dp_dpcd_read(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
3437                              intel_dp->downstream_ports,
3438                              DP_MAX_DOWNSTREAM_PORTS) < 0)
3439                 return false; /* downstream port status fetch failed */
3440
3441         return true;
3442 }
3443
3444 static void
3445 intel_dp_probe_oui(struct intel_dp *intel_dp)
3446 {
3447         u8 buf[3];
3448
3449         if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
3450                 return;
3451
3452         if (drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
3453                 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
3454                               buf[0], buf[1], buf[2]);
3455
3456         if (drm_dp_dpcd_read(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
3457                 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
3458                               buf[0], buf[1], buf[2]);
3459 }
3460
3461 static bool
3462 intel_dp_probe_mst(struct intel_dp *intel_dp)
3463 {
3464         u8 buf[1];
3465
3466         if (!i915.enable_dp_mst)
3467                 return false;
3468
3469         if (!intel_dp->can_mst)
3470                 return false;
3471
3472         if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
3473                 return false;
3474
3475         if (drm_dp_dpcd_read(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
3476                 if (buf[0] & DP_MST_CAP) {
3477                         DRM_DEBUG_KMS("Sink is MST capable\n");
3478                         intel_dp->is_mst = true;
3479                 } else {
3480                         DRM_DEBUG_KMS("Sink is not MST capable\n");
3481                         intel_dp->is_mst = false;
3482                 }
3483         }
3484
3485         drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
3486         return intel_dp->is_mst;
3487 }
3488
3489 static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
3490 {
3491         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3492         struct drm_device *dev = dig_port->base.base.dev;
3493         struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
3494         u8 buf;
3495         int ret = 0;
3496         int count = 0;
3497         int attempts = 10;
3498
3499         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
3500                 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
3501                 ret = -EIO;
3502                 goto out;
3503         }
3504
3505         if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
3506                                buf & ~DP_TEST_SINK_START) < 0) {
3507                 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
3508                 ret = -EIO;
3509                 goto out;
3510         }
3511
3512         do {
3513                 intel_wait_for_vblank(dev, intel_crtc->pipe);
3514
3515                 if (drm_dp_dpcd_readb(&intel_dp->aux,
3516                                       DP_TEST_SINK_MISC, &buf) < 0) {
3517                         ret = -EIO;
3518                         goto out;
3519                 }
3520                 count = buf & DP_TEST_COUNT_MASK;
3521         } while (--attempts && count);
3522
3523         if (attempts == 0) {
3524                 DRM_DEBUG_KMS("TIMEOUT: Sink CRC counter is not zeroed after calculation is stopped\n");
3525                 ret = -ETIMEDOUT;
3526         }
3527
3528  out:
3529         hsw_enable_ips(intel_crtc);
3530         return ret;
3531 }
3532
3533 static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
3534 {
3535         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3536         struct drm_device *dev = dig_port->base.base.dev;
3537         struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
3538         u8 buf;
3539         int ret;
3540
3541         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
3542                 return -EIO;
3543
3544         if (!(buf & DP_TEST_CRC_SUPPORTED))
3545                 return -ENOTTY;
3546
3547         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
3548                 return -EIO;
3549
3550         if (buf & DP_TEST_SINK_START) {
3551                 ret = intel_dp_sink_crc_stop(intel_dp);
3552                 if (ret)
3553                         return ret;
3554         }
3555
3556         hsw_disable_ips(intel_crtc);
3557
3558         if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
3559                                buf | DP_TEST_SINK_START) < 0) {
3560                 hsw_enable_ips(intel_crtc);
3561                 return -EIO;
3562         }
3563
3564         intel_wait_for_vblank(dev, intel_crtc->pipe);
3565         return 0;
3566 }
3567
3568 int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
3569 {
3570         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3571         struct drm_device *dev = dig_port->base.base.dev;
3572         struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
3573         u8 buf;
3574         int count, ret;
3575         int attempts = 6;
3576
3577         ret = intel_dp_sink_crc_start(intel_dp);
3578         if (ret)
3579                 return ret;
3580
3581         do {
3582                 intel_wait_for_vblank(dev, intel_crtc->pipe);
3583
3584                 if (drm_dp_dpcd_readb(&intel_dp->aux,
3585                                       DP_TEST_SINK_MISC, &buf) < 0) {
3586                         ret = -EIO;
3587                         goto stop;
3588                 }
3589                 count = buf & DP_TEST_COUNT_MASK;
3590
3591         } while (--attempts && count == 0);
3592
3593         if (attempts == 0) {
3594                 DRM_ERROR("Panel is unable to calculate any CRC after 6 vblanks\n");
3595                 ret = -ETIMEDOUT;
3596                 goto stop;
3597         }
3598
3599         if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) {
3600                 ret = -EIO;
3601                 goto stop;
3602         }
3603
3604 stop:
3605         intel_dp_sink_crc_stop(intel_dp);
3606         return ret;
3607 }
3608
3609 static bool
3610 intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
3611 {
3612         return drm_dp_dpcd_read(&intel_dp->aux,
3613                                        DP_DEVICE_SERVICE_IRQ_VECTOR,
3614                                        sink_irq_vector, 1) == 1;
3615 }
3616
3617 static bool
3618 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
3619 {
3620         int ret;
3621
3622         ret = drm_dp_dpcd_read(&intel_dp->aux,
3623                                              DP_SINK_COUNT_ESI,
3624                                              sink_irq_vector, 14);
3625         if (ret != 14)
3626                 return false;
3627
3628         return true;
3629 }
3630
3631 static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
3632 {
3633         uint8_t test_result = DP_TEST_ACK;
3634         return test_result;
3635 }
3636
3637 static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
3638 {
3639         uint8_t test_result = DP_TEST_NAK;
3640         return test_result;
3641 }
3642
3643 static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
3644 {
3645         uint8_t test_result = DP_TEST_NAK;
3646         struct intel_connector *intel_connector = intel_dp->attached_connector;
3647         struct drm_connector *connector = &intel_connector->base;
3648
3649         if (intel_connector->detect_edid == NULL ||
3650             connector->edid_corrupt ||
3651             intel_dp->aux.i2c_defer_count > 6) {
3652                 /* Check EDID read for NACKs, DEFERs and corruption
3653                  * (DP CTS 1.2 Core r1.1)
3654                  *    4.2.2.4 : Failed EDID read, I2C_NAK
3655                  *    4.2.2.5 : Failed EDID read, I2C_DEFER
3656                  *    4.2.2.6 : EDID corruption detected
3657                  * Use failsafe mode for all cases
3658                  */
3659                 if (intel_dp->aux.i2c_nack_count > 0 ||
3660                         intel_dp->aux.i2c_defer_count > 0)
3661                         DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
3662                                       intel_dp->aux.i2c_nack_count,
3663                                       intel_dp->aux.i2c_defer_count);
3664                 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_FAILSAFE;
3665         } else {
3666                 struct edid *block = intel_connector->detect_edid;
3667
3668                 /* We have to write the checksum
3669                  * of the last block read
3670                  */
3671                 block += intel_connector->detect_edid->extensions;
3672
3673                 if (!drm_dp_dpcd_write(&intel_dp->aux,
3674                                         DP_TEST_EDID_CHECKSUM,
3675                                         &block->checksum,
3676                                         1))
3677                         DRM_DEBUG_KMS("Failed to write EDID checksum\n");
3678
3679                 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
3680                 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_STANDARD;
3681         }
3682
3683         /* Set test active flag here so userspace doesn't interrupt things */
3684         intel_dp->compliance_test_active = 1;
3685
3686         return test_result;
3687 }
3688
3689 static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
3690 {
3691         uint8_t test_result = DP_TEST_NAK;
3692         return test_result;
3693 }
3694
3695 static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
3696 {
3697         uint8_t response = DP_TEST_NAK;
3698         uint8_t rxdata = 0;
3699         int status = 0;
3700
3701         status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_REQUEST, &rxdata, 1);
3702         if (status <= 0) {
3703                 DRM_DEBUG_KMS("Could not read test request from sink\n");
3704                 goto update_status;
3705         }
3706
3707         switch (rxdata) {
3708         case DP_TEST_LINK_TRAINING:
3709                 DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
3710                 intel_dp->compliance_test_type = DP_TEST_LINK_TRAINING;
3711                 response = intel_dp_autotest_link_training(intel_dp);
3712                 break;
3713         case DP_TEST_LINK_VIDEO_PATTERN:
3714                 DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
3715                 intel_dp->compliance_test_type = DP_TEST_LINK_VIDEO_PATTERN;
3716                 response = intel_dp_autotest_video_pattern(intel_dp);
3717                 break;
3718         case DP_TEST_LINK_EDID_READ:
3719                 DRM_DEBUG_KMS("EDID test requested\n");
3720                 intel_dp->compliance_test_type = DP_TEST_LINK_EDID_READ;
3721                 response = intel_dp_autotest_edid(intel_dp);
3722                 break;
3723         case DP_TEST_LINK_PHY_TEST_PATTERN:
3724                 DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
3725                 intel_dp->compliance_test_type = DP_TEST_LINK_PHY_TEST_PATTERN;
3726                 response = intel_dp_autotest_phy_pattern(intel_dp);
3727                 break;
3728         default:
3729                 DRM_DEBUG_KMS("Invalid test request '%02x'\n", rxdata);
3730                 break;
3731         }
3732
3733 update_status:
3734         status = drm_dp_dpcd_write(&intel_dp->aux,
3735                                    DP_TEST_RESPONSE,
3736                                    &response, 1);
3737         if (status <= 0)
3738                 DRM_DEBUG_KMS("Could not write test response to sink\n");
3739 }
3740
3741 static int
3742 intel_dp_check_mst_status(struct intel_dp *intel_dp)
3743 {
3744         bool bret;
3745
3746         if (intel_dp->is_mst) {
3747                 u8 esi[16] = { 0 };
3748                 int ret = 0;
3749                 int retry;
3750                 bool handled;
3751                 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
3752 go_again:
3753                 if (bret == true) {
3754
3755                         /* check link status - esi[10] = 0x200c */
3756                         if (intel_dp->active_mst_links &&
3757                             !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
3758                                 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
3759                                 intel_dp_start_link_train(intel_dp);
3760                                 intel_dp_stop_link_train(intel_dp);
3761                         }
3762
3763                         DRM_DEBUG_KMS("got esi %3ph\n", esi);
3764                         ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
3765
3766                         if (handled) {
3767                                 for (retry = 0; retry < 3; retry++) {
3768                                         int wret;
3769                                         wret = drm_dp_dpcd_write(&intel_dp->aux,
3770                                                                  DP_SINK_COUNT_ESI+1,
3771                                                                  &esi[1], 3);
3772                                         if (wret == 3) {
3773                                                 break;
3774                                         }
3775                                 }
3776
3777                                 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
3778                                 if (bret == true) {
3779                                         DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
3780                                         goto go_again;
3781                                 }
3782                         } else
3783                                 ret = 0;
3784
3785                         return ret;
3786                 } else {
3787                         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3788                         DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
3789                         intel_dp->is_mst = false;
3790                         drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
3791                         /* send a hotplug event */
3792                         drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
3793                 }
3794         }
3795         return -EINVAL;
3796 }
3797
3798 static void
3799 intel_dp_check_link_status(struct intel_dp *intel_dp)
3800 {
3801         struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
3802         struct drm_device *dev = intel_dp_to_dev(intel_dp);
3803         u8 link_status[DP_LINK_STATUS_SIZE];
3804
3805         WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
3806
3807         if (!intel_dp_get_link_status(intel_dp, link_status)) {
3808                 DRM_ERROR("Failed to get link status\n");
3809                 return;
3810         }
3811
3812         if (!intel_encoder->base.crtc)
3813                 return;
3814
3815         if (!to_intel_crtc(intel_encoder->base.crtc)->active)
3816                 return;
3817
3818         /* if link training is requested we should perform it always */
3819         if ((intel_dp->compliance_test_type == DP_TEST_LINK_TRAINING) ||
3820             (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count))) {
3821                 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
3822                               intel_encoder->base.name);
3823                 intel_dp_start_link_train(intel_dp);
3824                 intel_dp_stop_link_train(intel_dp);
3825         }
3826 }
3827
3828 /*
3829  * According to DP spec
3830  * 5.1.2:
3831  *  1. Read DPCD
3832  *  2. Configure link according to Receiver Capabilities
3833  *  3. Use Link Training from 2.5.3.3 and 3.5.1.3
3834  *  4. Check link status on receipt of hot-plug interrupt
3835  *
3836  * intel_dp_short_pulse -  handles short pulse interrupts
3837  * when full detection is not required.
3838  * Returns %true if short pulse is handled and full detection
3839  * is NOT required and %false otherwise.
3840  */
3841 static bool
3842 intel_dp_short_pulse(struct intel_dp *intel_dp)
3843 {
3844         struct drm_device *dev = intel_dp_to_dev(intel_dp);
3845         u8 sink_irq_vector;
3846         u8 old_sink_count = intel_dp->sink_count;
3847         bool ret;
3848
3849         /*
3850          * Clearing compliance test variables to allow capturing
3851          * of values for next automated test request.
3852          */
3853         intel_dp->compliance_test_active = 0;
3854         intel_dp->compliance_test_type = 0;
3855         intel_dp->compliance_test_data = 0;
3856
3857         /*
3858          * Now read the DPCD to see if it's actually running
3859          * If the current value of sink count doesn't match with
3860          * the value that was stored earlier or dpcd read failed
3861          * we need to do full detection
3862          */
3863         ret = intel_dp_get_dpcd(intel_dp);
3864
3865         if ((old_sink_count != intel_dp->sink_count) || !ret) {
3866                 /* No need to proceed if we are going to do full detect */
3867                 return false;
3868         }
3869
3870         /* Try to read the source of the interrupt */
3871         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
3872             intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
3873                 /* Clear interrupt source */
3874                 drm_dp_dpcd_writeb(&intel_dp->aux,
3875                                    DP_DEVICE_SERVICE_IRQ_VECTOR,
3876                                    sink_irq_vector);
3877
3878                 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
3879                         DRM_DEBUG_DRIVER("Test request in short pulse not handled\n");
3880                 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
3881                         DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
3882         }
3883
3884         drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
3885         intel_dp_check_link_status(intel_dp);
3886         drm_modeset_unlock(&dev->mode_config.connection_mutex);
3887
3888         return true;
3889 }
3890
3891 /* XXX this is probably wrong for multiple downstream ports */
3892 static enum drm_connector_status
3893 intel_dp_detect_dpcd(struct intel_dp *intel_dp)
3894 {
3895         uint8_t *dpcd = intel_dp->dpcd;
3896         uint8_t type;
3897
3898         if (!intel_dp_get_dpcd(intel_dp))
3899                 return connector_status_disconnected;
3900
3901         if (is_edp(intel_dp))
3902                 return connector_status_connected;
3903
3904         /* if there's no downstream port, we're done */
3905         if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
3906                 return connector_status_connected;
3907
3908         /* If we're HPD-aware, SINK_COUNT changes dynamically */
3909         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
3910             intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
3911
3912                 return intel_dp->sink_count ?
3913                 connector_status_connected : connector_status_disconnected;
3914         }
3915
3916         /* If no HPD, poke DDC gently */
3917         if (drm_probe_ddc(&intel_dp->aux.ddc))
3918                 return connector_status_connected;
3919
3920         /* Well we tried, say unknown for unreliable port types */
3921         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
3922                 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
3923                 if (type == DP_DS_PORT_TYPE_VGA ||
3924                     type == DP_DS_PORT_TYPE_NON_EDID)
3925                         return connector_status_unknown;
3926         } else {
3927                 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
3928                         DP_DWN_STRM_PORT_TYPE_MASK;
3929                 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
3930                     type == DP_DWN_STRM_PORT_TYPE_OTHER)
3931                         return connector_status_unknown;
3932         }
3933
3934         /* Anything else is out of spec, warn and ignore */
3935         DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
3936         return connector_status_disconnected;
3937 }
3938
3939 static enum drm_connector_status
3940 edp_detect(struct intel_dp *intel_dp)
3941 {
3942         struct drm_device *dev = intel_dp_to_dev(intel_dp);
3943         enum drm_connector_status status;
3944
3945         status = intel_panel_detect(dev);
3946         if (status == connector_status_unknown)
3947                 status = connector_status_connected;
3948
3949         return status;
3950 }
3951
3952 static bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
3953                                        struct intel_digital_port *port)
3954 {
3955         u32 bit;
3956
3957         switch (port->port) {
3958         case PORT_A:
3959                 return true;
3960         case PORT_B:
3961                 bit = SDE_PORTB_HOTPLUG;
3962                 break;
3963         case PORT_C:
3964                 bit = SDE_PORTC_HOTPLUG;
3965                 break;
3966         case PORT_D:
3967                 bit = SDE_PORTD_HOTPLUG;
3968                 break;
3969         default:
3970                 MISSING_CASE(port->port);
3971                 return false;
3972         }
3973
3974         return I915_READ(SDEISR) & bit;
3975 }
3976
3977 static bool cpt_digital_port_connected(struct drm_i915_private *dev_priv,
3978                                        struct intel_digital_port *port)
3979 {
3980         u32 bit;
3981
3982         switch (port->port) {
3983         case PORT_A:
3984                 return true;
3985         case PORT_B:
3986                 bit = SDE_PORTB_HOTPLUG_CPT;
3987                 break;
3988         case PORT_C:
3989                 bit = SDE_PORTC_HOTPLUG_CPT;
3990                 break;
3991         case PORT_D:
3992                 bit = SDE_PORTD_HOTPLUG_CPT;
3993                 break;
3994         case PORT_E:
3995                 bit = SDE_PORTE_HOTPLUG_SPT;
3996                 break;
3997         default:
3998                 MISSING_CASE(port->port);
3999                 return false;
4000         }
4001
4002         return I915_READ(SDEISR) & bit;
4003 }
4004
4005 static bool g4x_digital_port_connected(struct drm_i915_private *dev_priv,
4006                                        struct intel_digital_port *port)
4007 {
4008         u32 bit;
4009
4010         switch (port->port) {
4011         case PORT_B:
4012                 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4013                 break;
4014         case PORT_C:
4015                 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4016                 break;
4017         case PORT_D:
4018                 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4019                 break;
4020         default:
4021                 MISSING_CASE(port->port);
4022                 return false;
4023         }
4024
4025         return I915_READ(PORT_HOTPLUG_STAT) & bit;
4026 }
4027
4028 static bool gm45_digital_port_connected(struct drm_i915_private *dev_priv,
4029                                         struct intel_digital_port *port)
4030 {
4031         u32 bit;
4032
4033         switch (port->port) {
4034         case PORT_B:
4035                 bit = PORTB_HOTPLUG_LIVE_STATUS_GM45;
4036                 break;
4037         case PORT_C:
4038                 bit = PORTC_HOTPLUG_LIVE_STATUS_GM45;
4039                 break;
4040         case PORT_D:
4041                 bit = PORTD_HOTPLUG_LIVE_STATUS_GM45;
4042                 break;
4043         default:
4044                 MISSING_CASE(port->port);
4045                 return false;
4046         }
4047
4048         return I915_READ(PORT_HOTPLUG_STAT) & bit;
4049 }
4050
4051 static bool bxt_digital_port_connected(struct drm_i915_private *dev_priv,
4052                                        struct intel_digital_port *intel_dig_port)
4053 {
4054         struct intel_encoder *intel_encoder = &intel_dig_port->base;
4055         enum port port;
4056         u32 bit;
4057
4058         intel_hpd_pin_to_port(intel_encoder->hpd_pin, &port);
4059         switch (port) {
4060         case PORT_A:
4061                 bit = BXT_DE_PORT_HP_DDIA;
4062                 break;
4063         case PORT_B:
4064                 bit = BXT_DE_PORT_HP_DDIB;
4065                 break;
4066         case PORT_C:
4067                 bit = BXT_DE_PORT_HP_DDIC;
4068                 break;
4069         default:
4070                 MISSING_CASE(port);
4071                 return false;
4072         }
4073
4074         return I915_READ(GEN8_DE_PORT_ISR) & bit;
4075 }
4076
4077 /*
4078  * intel_digital_port_connected - is the specified port connected?
4079  * @dev_priv: i915 private structure
4080  * @port: the port to test
4081  *
4082  * Return %true if @port is connected, %false otherwise.
4083  */
4084 bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
4085                                          struct intel_digital_port *port)
4086 {
4087         if (HAS_PCH_IBX(dev_priv))
4088                 return ibx_digital_port_connected(dev_priv, port);
4089         else if (HAS_PCH_SPLIT(dev_priv))
4090                 return cpt_digital_port_connected(dev_priv, port);
4091         else if (IS_BROXTON(dev_priv))
4092                 return bxt_digital_port_connected(dev_priv, port);
4093         else if (IS_GM45(dev_priv))
4094                 return gm45_digital_port_connected(dev_priv, port);
4095         else
4096                 return g4x_digital_port_connected(dev_priv, port);
4097 }
4098
4099 static struct edid *
4100 intel_dp_get_edid(struct intel_dp *intel_dp)
4101 {
4102         struct intel_connector *intel_connector = intel_dp->attached_connector;
4103
4104         /* use cached edid if we have one */
4105         if (intel_connector->edid) {
4106                 /* invalid edid */
4107                 if (IS_ERR(intel_connector->edid))
4108                         return NULL;
4109
4110                 return drm_edid_duplicate(intel_connector->edid);
4111         } else
4112                 return drm_get_edid(&intel_connector->base,
4113                                     &intel_dp->aux.ddc);
4114 }
4115
4116 static void
4117 intel_dp_set_edid(struct intel_dp *intel_dp)
4118 {
4119         struct intel_connector *intel_connector = intel_dp->attached_connector;
4120         struct edid *edid;
4121
4122         intel_dp_unset_edid(intel_dp);
4123         edid = intel_dp_get_edid(intel_dp);
4124         intel_connector->detect_edid = edid;
4125
4126         if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4127                 intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4128         else
4129                 intel_dp->has_audio = drm_detect_monitor_audio(edid);
4130 }
4131
4132 static void
4133 intel_dp_unset_edid(struct intel_dp *intel_dp)
4134 {
4135         struct intel_connector *intel_connector = intel_dp->attached_connector;
4136
4137         kfree(intel_connector->detect_edid);
4138         intel_connector->detect_edid = NULL;
4139
4140         intel_dp->has_audio = false;
4141 }
4142
4143 static void
4144 intel_dp_long_pulse(struct intel_connector *intel_connector)
4145 {
4146         struct drm_connector *connector = &intel_connector->base;
4147         struct intel_dp *intel_dp = intel_attached_dp(connector);
4148         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4149         struct intel_encoder *intel_encoder = &intel_dig_port->base;
4150         struct drm_device *dev = connector->dev;
4151         enum drm_connector_status status;
4152         enum intel_display_power_domain power_domain;
4153         bool ret;
4154         u8 sink_irq_vector;
4155
4156         power_domain = intel_display_port_aux_power_domain(intel_encoder);
4157         intel_display_power_get(to_i915(dev), power_domain);
4158
4159         /* Can't disconnect eDP, but you can close the lid... */
4160         if (is_edp(intel_dp))
4161                 status = edp_detect(intel_dp);
4162         else if (intel_digital_port_connected(to_i915(dev),
4163                                               dp_to_dig_port(intel_dp)))
4164                 status = intel_dp_detect_dpcd(intel_dp);
4165         else
4166                 status = connector_status_disconnected;
4167
4168         if (status != connector_status_connected) {
4169                 intel_dp->compliance_test_active = 0;
4170                 intel_dp->compliance_test_type = 0;
4171                 intel_dp->compliance_test_data = 0;
4172
4173                 if (intel_dp->is_mst) {
4174                         DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n",
4175                                       intel_dp->is_mst,
4176                                       intel_dp->mst_mgr.mst_state);
4177                         intel_dp->is_mst = false;
4178                         drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
4179                                                         intel_dp->is_mst);
4180                 }
4181
4182                 goto out;
4183         }
4184
4185         if (intel_encoder->type != INTEL_OUTPUT_EDP)
4186                 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4187
4188         intel_dp_probe_oui(intel_dp);
4189
4190         ret = intel_dp_probe_mst(intel_dp);
4191         if (ret) {
4192                 /*
4193                  * If we are in MST mode then this connector
4194                  * won't appear connected or have anything
4195                  * with EDID on it
4196                  */
4197                 status = connector_status_disconnected;
4198                 goto out;
4199         } else if (connector->status == connector_status_connected) {
4200                 /*
4201                  * If display was connected already and is still connected
4202                  * check links status, there has been known issues of
4203                  * link loss triggerring long pulse!!!!
4204                  */
4205                 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
4206                 intel_dp_check_link_status(intel_dp);
4207                 drm_modeset_unlock(&dev->mode_config.connection_mutex);
4208                 goto out;
4209         }
4210
4211         /*
4212          * Clearing NACK and defer counts to get their exact values
4213          * while reading EDID which are required by Compliance tests
4214          * 4.2.2.4 and 4.2.2.5
4215          */
4216         intel_dp->aux.i2c_nack_count = 0;
4217         intel_dp->aux.i2c_defer_count = 0;
4218
4219         intel_dp_set_edid(intel_dp);
4220
4221         status = connector_status_connected;
4222         intel_dp->detect_done = true;
4223
4224         /* Try to read the source of the interrupt */
4225         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4226             intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4227                 /* Clear interrupt source */
4228                 drm_dp_dpcd_writeb(&intel_dp->aux,
4229                                    DP_DEVICE_SERVICE_IRQ_VECTOR,
4230                                    sink_irq_vector);
4231
4232                 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4233                         intel_dp_handle_test_request(intel_dp);
4234                 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4235                         DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4236         }
4237
4238 out:
4239         if ((status != connector_status_connected) &&
4240             (intel_dp->is_mst == false))
4241                 intel_dp_unset_edid(intel_dp);
4242
4243         intel_display_power_put(to_i915(dev), power_domain);
4244         return;
4245 }
4246
4247 static enum drm_connector_status
4248 intel_dp_detect(struct drm_connector *connector, bool force)
4249 {
4250         struct intel_dp *intel_dp = intel_attached_dp(connector);
4251         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4252         struct intel_encoder *intel_encoder = &intel_dig_port->base;
4253         struct intel_connector *intel_connector = to_intel_connector(connector);
4254
4255         DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4256                       connector->base.id, connector->name);
4257
4258         if (intel_dp->is_mst) {
4259                 /* MST devices are disconnected from a monitor POV */
4260                 intel_dp_unset_edid(intel_dp);
4261                 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4262                         intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4263                 return connector_status_disconnected;
4264         }
4265
4266         /* If full detect is not performed yet, do a full detect */
4267         if (!intel_dp->detect_done)
4268                 intel_dp_long_pulse(intel_dp->attached_connector);
4269
4270         intel_dp->detect_done = false;
4271
4272         if (intel_connector->detect_edid)
4273                 return connector_status_connected;
4274         else
4275                 return connector_status_disconnected;
4276 }
4277
4278 static void
4279 intel_dp_force(struct drm_connector *connector)
4280 {
4281         struct intel_dp *intel_dp = intel_attached_dp(connector);
4282         struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4283         struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
4284         enum intel_display_power_domain power_domain;
4285
4286         DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4287                       connector->base.id, connector->name);
4288         intel_dp_unset_edid(intel_dp);
4289
4290         if (connector->status != connector_status_connected)
4291                 return;
4292
4293         power_domain = intel_display_port_aux_power_domain(intel_encoder);
4294         intel_display_power_get(dev_priv, power_domain);
4295
4296         intel_dp_set_edid(intel_dp);
4297
4298         intel_display_power_put(dev_priv, power_domain);
4299
4300         if (intel_encoder->type != INTEL_OUTPUT_EDP)
4301                 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4302 }
4303
4304 static int intel_dp_get_modes(struct drm_connector *connector)
4305 {
4306         struct intel_connector *intel_connector = to_intel_connector(connector);
4307         struct edid *edid;
4308
4309         edid = intel_connector->detect_edid;
4310         if (edid) {
4311                 int ret = intel_connector_update_modes(connector, edid);
4312                 if (ret)
4313                         return ret;
4314         }
4315
4316         /* if eDP has no EDID, fall back to fixed mode */
4317         if (is_edp(intel_attached_dp(connector)) &&
4318             intel_connector->panel.fixed_mode) {
4319                 struct drm_display_mode *mode;
4320
4321                 mode = drm_mode_duplicate(connector->dev,
4322                                           intel_connector->panel.fixed_mode);
4323                 if (mode) {
4324                         drm_mode_probed_add(connector, mode);
4325                         return 1;
4326                 }
4327         }
4328
4329         return 0;
4330 }
4331
4332 static bool
4333 intel_dp_detect_audio(struct drm_connector *connector)
4334 {
4335         bool has_audio = false;
4336         struct edid *edid;
4337
4338         edid = to_intel_connector(connector)->detect_edid;
4339         if (edid)
4340                 has_audio = drm_detect_monitor_audio(edid);
4341
4342         return has_audio;
4343 }
4344
4345 static int
4346 intel_dp_set_property(struct drm_connector *connector,
4347                       struct drm_property *property,
4348                       uint64_t val)
4349 {
4350         struct drm_i915_private *dev_priv = connector->dev->dev_private;
4351         struct intel_connector *intel_connector = to_intel_connector(connector);
4352         struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4353         struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4354         int ret;
4355
4356         ret = drm_object_property_set_value(&connector->base, property, val);
4357         if (ret)
4358                 return ret;
4359
4360         if (property == dev_priv->force_audio_property) {
4361                 int i = val;
4362                 bool has_audio;
4363
4364                 if (i == intel_dp->force_audio)
4365                         return 0;
4366
4367                 intel_dp->force_audio = i;
4368
4369                 if (i == HDMI_AUDIO_AUTO)
4370                         has_audio = intel_dp_detect_audio(connector);
4371                 else
4372                         has_audio = (i == HDMI_AUDIO_ON);
4373
4374                 if (has_audio == intel_dp->has_audio)
4375                         return 0;
4376
4377                 intel_dp->has_audio = has_audio;
4378                 goto done;
4379         }
4380
4381         if (property == dev_priv->broadcast_rgb_property) {
4382                 bool old_auto = intel_dp->color_range_auto;
4383                 bool old_range = intel_dp->limited_color_range;
4384
4385                 switch (val) {
4386                 case INTEL_BROADCAST_RGB_AUTO:
4387                         intel_dp->color_range_auto = true;
4388                         break;
4389                 case INTEL_BROADCAST_RGB_FULL:
4390                         intel_dp->color_range_auto = false;
4391                         intel_dp->limited_color_range = false;
4392                         break;
4393                 case INTEL_BROADCAST_RGB_LIMITED:
4394                         intel_dp->color_range_auto = false;
4395                         intel_dp->limited_color_range = true;
4396                         break;
4397                 default:
4398                         return -EINVAL;
4399                 }
4400
4401                 if (old_auto == intel_dp->color_range_auto &&
4402                     old_range == intel_dp->limited_color_range)
4403                         return 0;
4404
4405                 goto done;
4406         }
4407
4408         if (is_edp(intel_dp) &&
4409             property == connector->dev->mode_config.scaling_mode_property) {
4410                 if (val == DRM_MODE_SCALE_NONE) {
4411                         DRM_DEBUG_KMS("no scaling not supported\n");
4412                         return -EINVAL;
4413                 }
4414                 if (HAS_GMCH_DISPLAY(dev_priv) &&
4415                     val == DRM_MODE_SCALE_CENTER) {
4416                         DRM_DEBUG_KMS("centering not supported\n");
4417                         return -EINVAL;
4418                 }
4419
4420                 if (intel_connector->panel.fitting_mode == val) {
4421                         /* the eDP scaling property is not changed */
4422                         return 0;
4423                 }
4424                 intel_connector->panel.fitting_mode = val;
4425
4426                 goto done;
4427         }
4428
4429         return -EINVAL;
4430
4431 done:
4432         if (intel_encoder->base.crtc)
4433                 intel_crtc_restore_mode(intel_encoder->base.crtc);
4434
4435         return 0;
4436 }
4437
4438 static void
4439 intel_dp_connector_destroy(struct drm_connector *connector)
4440 {
4441         struct intel_connector *intel_connector = to_intel_connector(connector);
4442
4443         kfree(intel_connector->detect_edid);
4444
4445         if (!IS_ERR_OR_NULL(intel_connector->edid))
4446                 kfree(intel_connector->edid);
4447
4448         /* Can't call is_edp() since the encoder may have been destroyed
4449          * already. */
4450         if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4451                 intel_panel_fini(&intel_connector->panel);
4452
4453         drm_connector_cleanup(connector);
4454         kfree(connector);
4455 }
4456
4457 void intel_dp_encoder_destroy(struct drm_encoder *encoder)
4458 {
4459         struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4460         struct intel_dp *intel_dp = &intel_dig_port->dp;
4461
4462         intel_dp_mst_encoder_cleanup(intel_dig_port);
4463         if (is_edp(intel_dp)) {
4464                 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4465                 /*
4466                  * vdd might still be enabled do to the delayed vdd off.
4467                  * Make sure vdd is actually turned off here.
4468                  */
4469                 pps_lock(intel_dp);
4470                 edp_panel_vdd_off_sync(intel_dp);
4471                 pps_unlock(intel_dp);
4472
4473                 if (intel_dp->edp_notifier.notifier_call) {
4474                         unregister_reboot_notifier(&intel_dp->edp_notifier);
4475                         intel_dp->edp_notifier.notifier_call = NULL;
4476                 }
4477         }
4478         drm_encoder_cleanup(encoder);
4479         kfree(intel_dig_port);
4480 }
4481
4482 void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4483 {
4484         struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4485
4486         if (!is_edp(intel_dp))
4487                 return;
4488
4489         /*
4490          * vdd might still be enabled do to the delayed vdd off.
4491          * Make sure vdd is actually turned off here.
4492          */
4493         cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4494         pps_lock(intel_dp);
4495         edp_panel_vdd_off_sync(intel_dp);
4496         pps_unlock(intel_dp);
4497 }
4498
4499 static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
4500 {
4501         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4502         struct drm_device *dev = intel_dig_port->base.base.dev;
4503         struct drm_i915_private *dev_priv = dev->dev_private;
4504         enum intel_display_power_domain power_domain;
4505
4506         lockdep_assert_held(&dev_priv->pps_mutex);
4507
4508         if (!edp_have_panel_vdd(intel_dp))
4509                 return;
4510
4511         /*
4512          * The VDD bit needs a power domain reference, so if the bit is
4513          * already enabled when we boot or resume, grab this reference and
4514          * schedule a vdd off, so we don't hold on to the reference
4515          * indefinitely.
4516          */
4517         DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
4518         power_domain = intel_display_port_aux_power_domain(&intel_dig_port->base);
4519         intel_display_power_get(dev_priv, power_domain);
4520
4521         edp_panel_vdd_schedule_off(intel_dp);
4522 }
4523
4524 void intel_dp_encoder_reset(struct drm_encoder *encoder)
4525 {
4526         struct intel_dp *intel_dp;
4527
4528         if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
4529                 return;
4530
4531         intel_dp = enc_to_intel_dp(encoder);
4532
4533         pps_lock(intel_dp);
4534
4535         /*
4536          * Read out the current power sequencer assignment,
4537          * in case the BIOS did something with it.
4538          */
4539         if (IS_VALLEYVIEW(encoder->dev) || IS_CHERRYVIEW(encoder->dev))
4540                 vlv_initial_power_sequencer_setup(intel_dp);
4541
4542         intel_edp_panel_vdd_sanitize(intel_dp);
4543
4544         pps_unlock(intel_dp);
4545 }
4546
4547 static const struct drm_connector_funcs intel_dp_connector_funcs = {
4548         .dpms = drm_atomic_helper_connector_dpms,
4549         .detect = intel_dp_detect,
4550         .force = intel_dp_force,
4551         .fill_modes = drm_helper_probe_single_connector_modes,
4552         .set_property = intel_dp_set_property,
4553         .atomic_get_property = intel_connector_atomic_get_property,
4554         .destroy = intel_dp_connector_destroy,
4555         .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
4556         .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
4557 };
4558
4559 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
4560         .get_modes = intel_dp_get_modes,
4561         .mode_valid = intel_dp_mode_valid,
4562         .best_encoder = intel_best_encoder,
4563 };
4564
4565 static const struct drm_encoder_funcs intel_dp_enc_funcs = {
4566         .reset = intel_dp_encoder_reset,
4567         .destroy = intel_dp_encoder_destroy,
4568 };
4569
4570 enum irqreturn
4571 intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4572 {
4573         struct intel_dp *intel_dp = &intel_dig_port->dp;
4574         struct intel_encoder *intel_encoder = &intel_dig_port->base;
4575         struct drm_device *dev = intel_dig_port->base.base.dev;
4576         struct drm_i915_private *dev_priv = dev->dev_private;
4577         enum intel_display_power_domain power_domain;
4578         enum irqreturn ret = IRQ_NONE;
4579
4580         if (intel_dig_port->base.type != INTEL_OUTPUT_EDP &&
4581             intel_dig_port->base.type != INTEL_OUTPUT_HDMI)
4582                 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
4583
4584         if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
4585                 /*
4586                  * vdd off can generate a long pulse on eDP which
4587                  * would require vdd on to handle it, and thus we
4588                  * would end up in an endless cycle of
4589                  * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
4590                  */
4591                 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
4592                               port_name(intel_dig_port->port));
4593                 return IRQ_HANDLED;
4594         }
4595
4596         DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
4597                       port_name(intel_dig_port->port),
4598                       long_hpd ? "long" : "short");
4599
4600         power_domain = intel_display_port_aux_power_domain(intel_encoder);
4601         intel_display_power_get(dev_priv, power_domain);
4602
4603         if (long_hpd) {
4604                 /* indicate that we need to restart link training */
4605                 intel_dp->train_set_valid = false;
4606
4607                 intel_dp_long_pulse(intel_dp->attached_connector);
4608                 if (intel_dp->is_mst)
4609                         ret = IRQ_HANDLED;
4610                 goto put_power;
4611
4612         } else {
4613                 if (intel_dp->is_mst) {
4614                         if (intel_dp_check_mst_status(intel_dp) == -EINVAL) {
4615                                 /*
4616                                  * If we were in MST mode, and device is not
4617                                  * there, get out of MST mode
4618                                  */
4619                                 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n",
4620                                               intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
4621                                 intel_dp->is_mst = false;
4622                                 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
4623                                                                 intel_dp->is_mst);
4624                                 goto put_power;
4625                         }
4626                 }
4627
4628                 if (!intel_dp->is_mst) {
4629                         if (!intel_dp_short_pulse(intel_dp)) {
4630                                 intel_dp_long_pulse(intel_dp->attached_connector);
4631                                 goto put_power;
4632                         }
4633                 }
4634         }
4635
4636         ret = IRQ_HANDLED;
4637
4638 put_power:
4639         intel_display_power_put(dev_priv, power_domain);
4640
4641         return ret;
4642 }
4643
4644 /* check the VBT to see whether the eDP is on another port */
4645 bool intel_dp_is_edp(struct drm_device *dev, enum port port)
4646 {
4647         struct drm_i915_private *dev_priv = dev->dev_private;
4648
4649         /*
4650          * eDP not supported on g4x. so bail out early just
4651          * for a bit extra safety in case the VBT is bonkers.
4652          */
4653         if (INTEL_INFO(dev)->gen < 5)
4654                 return false;
4655
4656         if (port == PORT_A)
4657                 return true;
4658
4659         return intel_bios_is_port_edp(dev_priv, port);
4660 }
4661
4662 void
4663 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
4664 {
4665         struct intel_connector *intel_connector = to_intel_connector(connector);
4666
4667         intel_attach_force_audio_property(connector);
4668         intel_attach_broadcast_rgb_property(connector);
4669         intel_dp->color_range_auto = true;
4670
4671         if (is_edp(intel_dp)) {
4672                 drm_mode_create_scaling_mode_property(connector->dev);
4673                 drm_object_attach_property(
4674                         &connector->base,
4675                         connector->dev->mode_config.scaling_mode_property,
4676                         DRM_MODE_SCALE_ASPECT);
4677                 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
4678         }
4679 }
4680
4681 static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
4682 {
4683         intel_dp->panel_power_off_time = ktime_get_boottime();
4684         intel_dp->last_power_on = jiffies;
4685         intel_dp->last_backlight_off = jiffies;
4686 }
4687
4688 static void
4689 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
4690                                     struct intel_dp *intel_dp)
4691 {
4692         struct drm_i915_private *dev_priv = dev->dev_private;
4693         struct edp_power_seq cur, vbt, spec,
4694                 *final = &intel_dp->pps_delays;
4695         u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0;
4696         i915_reg_t pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
4697
4698         lockdep_assert_held(&dev_priv->pps_mutex);
4699
4700         /* already initialized? */
4701         if (final->t11_t12 != 0)
4702                 return;
4703
4704         if (IS_BROXTON(dev)) {
4705                 /*
4706                  * TODO: BXT has 2 sets of PPS registers.
4707                  * Correct Register for Broxton need to be identified
4708                  * using VBT. hardcoding for now
4709                  */
4710                 pp_ctrl_reg = BXT_PP_CONTROL(0);
4711                 pp_on_reg = BXT_PP_ON_DELAYS(0);
4712                 pp_off_reg = BXT_PP_OFF_DELAYS(0);
4713         } else if (HAS_PCH_SPLIT(dev)) {
4714                 pp_ctrl_reg = PCH_PP_CONTROL;
4715                 pp_on_reg = PCH_PP_ON_DELAYS;
4716                 pp_off_reg = PCH_PP_OFF_DELAYS;
4717                 pp_div_reg = PCH_PP_DIVISOR;
4718         } else {
4719                 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
4720
4721                 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
4722                 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
4723                 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
4724                 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
4725         }
4726
4727         /* Workaround: Need to write PP_CONTROL with the unlock key as
4728          * the very first thing. */
4729         pp_ctl = ironlake_get_pp_control(intel_dp);
4730
4731         pp_on = I915_READ(pp_on_reg);
4732         pp_off = I915_READ(pp_off_reg);
4733         if (!IS_BROXTON(dev)) {
4734                 I915_WRITE(pp_ctrl_reg, pp_ctl);
4735                 pp_div = I915_READ(pp_div_reg);
4736         }
4737
4738         /* Pull timing values out of registers */
4739         cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
4740                 PANEL_POWER_UP_DELAY_SHIFT;
4741
4742         cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
4743                 PANEL_LIGHT_ON_DELAY_SHIFT;
4744
4745         cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
4746                 PANEL_LIGHT_OFF_DELAY_SHIFT;
4747
4748         cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
4749                 PANEL_POWER_DOWN_DELAY_SHIFT;
4750
4751         if (IS_BROXTON(dev)) {
4752                 u16 tmp = (pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
4753                         BXT_POWER_CYCLE_DELAY_SHIFT;
4754                 if (tmp > 0)
4755                         cur.t11_t12 = (tmp - 1) * 1000;
4756                 else
4757                         cur.t11_t12 = 0;
4758         } else {
4759                 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
4760                        PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
4761         }
4762
4763         DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
4764                       cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
4765
4766         vbt = dev_priv->vbt.edp.pps;
4767
4768         /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
4769          * our hw here, which are all in 100usec. */
4770         spec.t1_t3 = 210 * 10;
4771         spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
4772         spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
4773         spec.t10 = 500 * 10;
4774         /* This one is special and actually in units of 100ms, but zero
4775          * based in the hw (so we need to add 100 ms). But the sw vbt
4776          * table multiplies it with 1000 to make it in units of 100usec,
4777          * too. */
4778         spec.t11_t12 = (510 + 100) * 10;
4779
4780         DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
4781                       vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
4782
4783         /* Use the max of the register settings and vbt. If both are
4784          * unset, fall back to the spec limits. */
4785 #define assign_final(field)     final->field = (max(cur.field, vbt.field) == 0 ? \
4786                                        spec.field : \
4787                                        max(cur.field, vbt.field))
4788         assign_final(t1_t3);
4789         assign_final(t8);
4790         assign_final(t9);
4791         assign_final(t10);
4792         assign_final(t11_t12);
4793 #undef assign_final
4794
4795 #define get_delay(field)        (DIV_ROUND_UP(final->field, 10))
4796         intel_dp->panel_power_up_delay = get_delay(t1_t3);
4797         intel_dp->backlight_on_delay = get_delay(t8);
4798         intel_dp->backlight_off_delay = get_delay(t9);
4799         intel_dp->panel_power_down_delay = get_delay(t10);
4800         intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
4801 #undef get_delay
4802
4803         DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
4804                       intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
4805                       intel_dp->panel_power_cycle_delay);
4806
4807         DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
4808                       intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
4809 }
4810
4811 static void
4812 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
4813                                               struct intel_dp *intel_dp)
4814 {
4815         struct drm_i915_private *dev_priv = dev->dev_private;
4816         u32 pp_on, pp_off, pp_div, port_sel = 0;
4817         int div = dev_priv->rawclk_freq / 1000;
4818         i915_reg_t pp_on_reg, pp_off_reg, pp_div_reg, pp_ctrl_reg;
4819         enum port port = dp_to_dig_port(intel_dp)->port;
4820         const struct edp_power_seq *seq = &intel_dp->pps_delays;
4821
4822         lockdep_assert_held(&dev_priv->pps_mutex);
4823
4824         if (IS_BROXTON(dev)) {
4825                 /*
4826                  * TODO: BXT has 2 sets of PPS registers.
4827                  * Correct Register for Broxton need to be identified
4828                  * using VBT. hardcoding for now
4829                  */
4830                 pp_ctrl_reg = BXT_PP_CONTROL(0);
4831                 pp_on_reg = BXT_PP_ON_DELAYS(0);
4832                 pp_off_reg = BXT_PP_OFF_DELAYS(0);
4833
4834         } else if (HAS_PCH_SPLIT(dev)) {
4835                 pp_on_reg = PCH_PP_ON_DELAYS;
4836                 pp_off_reg = PCH_PP_OFF_DELAYS;
4837                 pp_div_reg = PCH_PP_DIVISOR;
4838         } else {
4839                 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
4840
4841                 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
4842                 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
4843                 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
4844         }
4845
4846         /*
4847          * And finally store the new values in the power sequencer. The
4848          * backlight delays are set to 1 because we do manual waits on them. For
4849          * T8, even BSpec recommends doing it. For T9, if we don't do this,
4850          * we'll end up waiting for the backlight off delay twice: once when we
4851          * do the manual sleep, and once when we disable the panel and wait for
4852          * the PP_STATUS bit to become zero.
4853          */
4854         pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
4855                 (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
4856         pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
4857                  (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
4858         /* Compute the divisor for the pp clock, simply match the Bspec
4859          * formula. */
4860         if (IS_BROXTON(dev)) {
4861                 pp_div = I915_READ(pp_ctrl_reg);
4862                 pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
4863                 pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000)
4864                                 << BXT_POWER_CYCLE_DELAY_SHIFT);
4865         } else {
4866                 pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
4867                 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
4868                                 << PANEL_POWER_CYCLE_DELAY_SHIFT);
4869         }
4870
4871         /* Haswell doesn't have any port selection bits for the panel
4872          * power sequencer any more. */
4873         if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
4874                 port_sel = PANEL_PORT_SELECT_VLV(port);
4875         } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
4876                 if (port == PORT_A)
4877                         port_sel = PANEL_PORT_SELECT_DPA;
4878                 else
4879                         port_sel = PANEL_PORT_SELECT_DPD;
4880         }
4881
4882         pp_on |= port_sel;
4883
4884         I915_WRITE(pp_on_reg, pp_on);
4885         I915_WRITE(pp_off_reg, pp_off);
4886         if (IS_BROXTON(dev))
4887                 I915_WRITE(pp_ctrl_reg, pp_div);
4888         else
4889                 I915_WRITE(pp_div_reg, pp_div);
4890
4891         DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
4892                       I915_READ(pp_on_reg),
4893                       I915_READ(pp_off_reg),
4894                       IS_BROXTON(dev) ?
4895                       (I915_READ(pp_ctrl_reg) & BXT_POWER_CYCLE_DELAY_MASK) :
4896                       I915_READ(pp_div_reg));
4897 }
4898
4899 /**
4900  * intel_dp_set_drrs_state - program registers for RR switch to take effect
4901  * @dev: DRM device
4902  * @refresh_rate: RR to be programmed
4903  *
4904  * This function gets called when refresh rate (RR) has to be changed from
4905  * one frequency to another. Switches can be between high and low RR
4906  * supported by the panel or to any other RR based on media playback (in
4907  * this case, RR value needs to be passed from user space).
4908  *
4909  * The caller of this function needs to take a lock on dev_priv->drrs.
4910  */
4911 static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
4912 {
4913         struct drm_i915_private *dev_priv = dev->dev_private;
4914         struct intel_encoder *encoder;
4915         struct intel_digital_port *dig_port = NULL;
4916         struct intel_dp *intel_dp = dev_priv->drrs.dp;
4917         struct intel_crtc_state *config = NULL;
4918         struct intel_crtc *intel_crtc = NULL;
4919         enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
4920
4921         if (refresh_rate <= 0) {
4922                 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
4923                 return;
4924         }
4925
4926         if (intel_dp == NULL) {
4927                 DRM_DEBUG_KMS("DRRS not supported.\n");
4928                 return;
4929         }
4930
4931         /*
4932          * FIXME: This needs proper synchronization with psr state for some
4933          * platforms that cannot have PSR and DRRS enabled at the same time.
4934          */
4935
4936         dig_port = dp_to_dig_port(intel_dp);
4937         encoder = &dig_port->base;
4938         intel_crtc = to_intel_crtc(encoder->base.crtc);
4939
4940         if (!intel_crtc) {
4941                 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
4942                 return;
4943         }
4944
4945         config = intel_crtc->config;
4946
4947         if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
4948                 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
4949                 return;
4950         }
4951
4952         if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
4953                         refresh_rate)
4954                 index = DRRS_LOW_RR;
4955
4956         if (index == dev_priv->drrs.refresh_rate_type) {
4957                 DRM_DEBUG_KMS(
4958                         "DRRS requested for previously set RR...ignoring\n");
4959                 return;
4960         }
4961
4962         if (!intel_crtc->active) {
4963                 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
4964                 return;
4965         }
4966
4967         if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
4968                 switch (index) {
4969                 case DRRS_HIGH_RR:
4970                         intel_dp_set_m_n(intel_crtc, M1_N1);
4971                         break;
4972                 case DRRS_LOW_RR:
4973                         intel_dp_set_m_n(intel_crtc, M2_N2);
4974                         break;
4975                 case DRRS_MAX_RR:
4976                 default:
4977                         DRM_ERROR("Unsupported refreshrate type\n");
4978                 }
4979         } else if (INTEL_INFO(dev)->gen > 6) {
4980                 i915_reg_t reg = PIPECONF(intel_crtc->config->cpu_transcoder);
4981                 u32 val;
4982
4983                 val = I915_READ(reg);
4984                 if (index > DRRS_HIGH_RR) {
4985                         if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
4986                                 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
4987                         else
4988                                 val |= PIPECONF_EDP_RR_MODE_SWITCH;
4989                 } else {
4990                         if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
4991                                 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
4992                         else
4993                                 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
4994                 }
4995                 I915_WRITE(reg, val);
4996         }
4997
4998         dev_priv->drrs.refresh_rate_type = index;
4999
5000         DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5001 }
5002
5003 /**
5004  * intel_edp_drrs_enable - init drrs struct if supported
5005  * @intel_dp: DP struct
5006  *
5007  * Initializes frontbuffer_bits and drrs.dp
5008  */
5009 void intel_edp_drrs_enable(struct intel_dp *intel_dp)
5010 {
5011         struct drm_device *dev = intel_dp_to_dev(intel_dp);
5012         struct drm_i915_private *dev_priv = dev->dev_private;
5013         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5014         struct drm_crtc *crtc = dig_port->base.base.crtc;
5015         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5016
5017         if (!intel_crtc->config->has_drrs) {
5018                 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5019                 return;
5020         }
5021
5022         mutex_lock(&dev_priv->drrs.mutex);
5023         if (WARN_ON(dev_priv->drrs.dp)) {
5024                 DRM_ERROR("DRRS already enabled\n");
5025                 goto unlock;
5026         }
5027
5028         dev_priv->drrs.busy_frontbuffer_bits = 0;
5029
5030         dev_priv->drrs.dp = intel_dp;
5031
5032 unlock:
5033         mutex_unlock(&dev_priv->drrs.mutex);
5034 }
5035
5036 /**
5037  * intel_edp_drrs_disable - Disable DRRS
5038  * @intel_dp: DP struct
5039  *
5040  */
5041 void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5042 {
5043         struct drm_device *dev = intel_dp_to_dev(intel_dp);
5044         struct drm_i915_private *dev_priv = dev->dev_private;
5045         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5046         struct drm_crtc *crtc = dig_port->base.base.crtc;
5047         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5048
5049         if (!intel_crtc->config->has_drrs)
5050                 return;
5051
5052         mutex_lock(&dev_priv->drrs.mutex);
5053         if (!dev_priv->drrs.dp) {
5054                 mutex_unlock(&dev_priv->drrs.mutex);
5055                 return;
5056         }
5057
5058         if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5059                 intel_dp_set_drrs_state(dev_priv->dev,
5060                         intel_dp->attached_connector->panel.
5061                         fixed_mode->vrefresh);
5062
5063         dev_priv->drrs.dp = NULL;
5064         mutex_unlock(&dev_priv->drrs.mutex);
5065
5066         cancel_delayed_work_sync(&dev_priv->drrs.work);
5067 }
5068
5069 static void intel_edp_drrs_downclock_work(struct work_struct *work)
5070 {
5071         struct drm_i915_private *dev_priv =
5072                 container_of(work, typeof(*dev_priv), drrs.work.work);
5073         struct intel_dp *intel_dp;
5074
5075         mutex_lock(&dev_priv->drrs.mutex);
5076
5077         intel_dp = dev_priv->drrs.dp;
5078
5079         if (!intel_dp)
5080                 goto unlock;
5081
5082         /*
5083          * The delayed work can race with an invalidate hence we need to
5084          * recheck.
5085          */
5086
5087         if (dev_priv->drrs.busy_frontbuffer_bits)
5088                 goto unlock;
5089
5090         if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5091                 intel_dp_set_drrs_state(dev_priv->dev,
5092                         intel_dp->attached_connector->panel.
5093                         downclock_mode->vrefresh);
5094
5095 unlock:
5096         mutex_unlock(&dev_priv->drrs.mutex);
5097 }
5098
5099 /**
5100  * intel_edp_drrs_invalidate - Disable Idleness DRRS
5101  * @dev: DRM device
5102  * @frontbuffer_bits: frontbuffer plane tracking bits
5103  *
5104  * This function gets called everytime rendering on the given planes start.
5105  * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
5106  *
5107  * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5108  */
5109 void intel_edp_drrs_invalidate(struct drm_device *dev,
5110                 unsigned frontbuffer_bits)
5111 {
5112         struct drm_i915_private *dev_priv = dev->dev_private;
5113         struct drm_crtc *crtc;
5114         enum pipe pipe;
5115
5116         if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5117                 return;
5118
5119         cancel_delayed_work(&dev_priv->drrs.work);
5120
5121         mutex_lock(&dev_priv->drrs.mutex);
5122         if (!dev_priv->drrs.dp) {
5123                 mutex_unlock(&dev_priv->drrs.mutex);
5124                 return;
5125         }
5126
5127         crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5128         pipe = to_intel_crtc(crtc)->pipe;
5129
5130         frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5131         dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5132
5133         /* invalidate means busy screen hence upclock */
5134         if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5135                 intel_dp_set_drrs_state(dev_priv->dev,
5136                                 dev_priv->drrs.dp->attached_connector->panel.
5137                                 fixed_mode->vrefresh);
5138
5139         mutex_unlock(&dev_priv->drrs.mutex);
5140 }
5141
5142 /**
5143  * intel_edp_drrs_flush - Restart Idleness DRRS
5144  * @dev: DRM device
5145  * @frontbuffer_bits: frontbuffer plane tracking bits
5146  *
5147  * This function gets called every time rendering on the given planes has
5148  * completed or flip on a crtc is completed. So DRRS should be upclocked
5149  * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
5150  * if no other planes are dirty.
5151  *
5152  * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5153  */
5154 void intel_edp_drrs_flush(struct drm_device *dev,
5155                 unsigned frontbuffer_bits)
5156 {
5157         struct drm_i915_private *dev_priv = dev->dev_private;
5158         struct drm_crtc *crtc;
5159         enum pipe pipe;
5160
5161         if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5162                 return;
5163
5164         cancel_delayed_work(&dev_priv->drrs.work);
5165
5166         mutex_lock(&dev_priv->drrs.mutex);
5167         if (!dev_priv->drrs.dp) {
5168                 mutex_unlock(&dev_priv->drrs.mutex);
5169                 return;
5170         }
5171
5172         crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5173         pipe = to_intel_crtc(crtc)->pipe;
5174
5175         frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5176         dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5177
5178         /* flush means busy screen hence upclock */
5179         if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5180                 intel_dp_set_drrs_state(dev_priv->dev,
5181                                 dev_priv->drrs.dp->attached_connector->panel.
5182                                 fixed_mode->vrefresh);
5183
5184         /*
5185          * flush also means no more activity hence schedule downclock, if all
5186          * other fbs are quiescent too
5187          */
5188         if (!dev_priv->drrs.busy_frontbuffer_bits)
5189                 schedule_delayed_work(&dev_priv->drrs.work,
5190                                 msecs_to_jiffies(1000));
5191         mutex_unlock(&dev_priv->drrs.mutex);
5192 }
5193
5194 /**
5195  * DOC: Display Refresh Rate Switching (DRRS)
5196  *
5197  * Display Refresh Rate Switching (DRRS) is a power conservation feature
5198  * which enables swtching between low and high refresh rates,
5199  * dynamically, based on the usage scenario. This feature is applicable
5200  * for internal panels.
5201  *
5202  * Indication that the panel supports DRRS is given by the panel EDID, which
5203  * would list multiple refresh rates for one resolution.
5204  *
5205  * DRRS is of 2 types - static and seamless.
5206  * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5207  * (may appear as a blink on screen) and is used in dock-undock scenario.
5208  * Seamless DRRS involves changing RR without any visual effect to the user
5209  * and can be used during normal system usage. This is done by programming
5210  * certain registers.
5211  *
5212  * Support for static/seamless DRRS may be indicated in the VBT based on
5213  * inputs from the panel spec.
5214  *
5215  * DRRS saves power by switching to low RR based on usage scenarios.
5216  *
5217  * The implementation is based on frontbuffer tracking implementation.  When
5218  * there is a disturbance on the screen triggered by user activity or a periodic
5219  * system activity, DRRS is disabled (RR is changed to high RR).  When there is
5220  * no movement on screen, after a timeout of 1 second, a switch to low RR is
5221  * made.
5222  *
5223  * For integration with frontbuffer tracking code, intel_edp_drrs_invalidate()
5224  * and intel_edp_drrs_flush() are called.
5225  *
5226  * DRRS can be further extended to support other internal panels and also
5227  * the scenario of video playback wherein RR is set based on the rate
5228  * requested by userspace.
5229  */
5230
5231 /**
5232  * intel_dp_drrs_init - Init basic DRRS work and mutex.
5233  * @intel_connector: eDP connector
5234  * @fixed_mode: preferred mode of panel
5235  *
5236  * This function is  called only once at driver load to initialize basic
5237  * DRRS stuff.
5238  *
5239  * Returns:
5240  * Downclock mode if panel supports it, else return NULL.
5241  * DRRS support is determined by the presence of downclock mode (apart
5242  * from VBT setting).
5243  */
5244 static struct drm_display_mode *
5245 intel_dp_drrs_init(struct intel_connector *intel_connector,
5246                 struct drm_display_mode *fixed_mode)
5247 {
5248         struct drm_connector *connector = &intel_connector->base;
5249         struct drm_device *dev = connector->dev;
5250         struct drm_i915_private *dev_priv = dev->dev_private;
5251         struct drm_display_mode *downclock_mode = NULL;
5252
5253         INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5254         mutex_init(&dev_priv->drrs.mutex);
5255
5256         if (INTEL_INFO(dev)->gen <= 6) {
5257                 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5258                 return NULL;
5259         }
5260
5261         if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
5262                 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
5263                 return NULL;
5264         }
5265
5266         downclock_mode = intel_find_panel_downclock
5267                                         (dev, fixed_mode, connector);
5268
5269         if (!downclock_mode) {
5270                 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
5271                 return NULL;
5272         }
5273
5274         dev_priv->drrs.type = dev_priv->vbt.drrs_type;
5275
5276         dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
5277         DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
5278         return downclock_mode;
5279 }
5280
5281 static bool intel_edp_init_connector(struct intel_dp *intel_dp,
5282                                      struct intel_connector *intel_connector)
5283 {
5284         struct drm_connector *connector = &intel_connector->base;
5285         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
5286         struct intel_encoder *intel_encoder = &intel_dig_port->base;
5287         struct drm_device *dev = intel_encoder->base.dev;
5288         struct drm_i915_private *dev_priv = dev->dev_private;
5289         struct drm_display_mode *fixed_mode = NULL;
5290         struct drm_display_mode *downclock_mode = NULL;
5291         bool has_dpcd;
5292         struct drm_display_mode *scan;
5293         struct edid *edid;
5294         enum pipe pipe = INVALID_PIPE;
5295
5296         if (!is_edp(intel_dp))
5297                 return true;
5298
5299         pps_lock(intel_dp);
5300         intel_edp_panel_vdd_sanitize(intel_dp);
5301         pps_unlock(intel_dp);
5302
5303         /* Cache DPCD and EDID for edp. */
5304         has_dpcd = intel_dp_get_dpcd(intel_dp);
5305
5306         if (has_dpcd) {
5307                 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
5308                         dev_priv->no_aux_handshake =
5309                                 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
5310                                 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
5311         } else {
5312                 /* if this fails, presume the device is a ghost */
5313                 DRM_INFO("failed to retrieve link info, disabling eDP\n");
5314                 return false;
5315         }
5316
5317         /* We now know it's not a ghost, init power sequence regs. */
5318         pps_lock(intel_dp);
5319         intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
5320         pps_unlock(intel_dp);
5321
5322         mutex_lock(&dev->mode_config.mutex);
5323         edid = drm_get_edid(connector, &intel_dp->aux.ddc);
5324         if (edid) {
5325                 if (drm_add_edid_modes(connector, edid)) {
5326                         drm_mode_connector_update_edid_property(connector,
5327                                                                 edid);
5328                         drm_edid_to_eld(connector, edid);
5329                 } else {
5330                         kfree(edid);
5331                         edid = ERR_PTR(-EINVAL);
5332                 }
5333         } else {
5334                 edid = ERR_PTR(-ENOENT);
5335         }
5336         intel_connector->edid = edid;
5337
5338         /* prefer fixed mode from EDID if available */
5339         list_for_each_entry(scan, &connector->probed_modes, head) {
5340                 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5341                         fixed_mode = drm_mode_duplicate(dev, scan);
5342                         downclock_mode = intel_dp_drrs_init(
5343                                                 intel_connector, fixed_mode);
5344                         break;
5345                 }
5346         }
5347
5348         /* fallback to VBT if available for eDP */
5349         if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5350                 fixed_mode = drm_mode_duplicate(dev,
5351                                         dev_priv->vbt.lfp_lvds_vbt_mode);
5352                 if (fixed_mode)
5353                         fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5354         }
5355         mutex_unlock(&dev->mode_config.mutex);
5356
5357         if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
5358                 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5359                 register_reboot_notifier(&intel_dp->edp_notifier);
5360
5361                 /*
5362                  * Figure out the current pipe for the initial backlight setup.
5363                  * If the current pipe isn't valid, try the PPS pipe, and if that
5364                  * fails just assume pipe A.
5365                  */
5366                 if (IS_CHERRYVIEW(dev))
5367                         pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5368                 else
5369                         pipe = PORT_TO_PIPE(intel_dp->DP);
5370
5371                 if (pipe != PIPE_A && pipe != PIPE_B)
5372                         pipe = intel_dp->pps_pipe;
5373
5374                 if (pipe != PIPE_A && pipe != PIPE_B)
5375                         pipe = PIPE_A;
5376
5377                 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5378                               pipe_name(pipe));
5379         }
5380
5381         intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
5382         intel_connector->panel.backlight.power = intel_edp_backlight_power;
5383         intel_panel_setup_backlight(connector, pipe);
5384
5385         return true;
5386 }
5387
5388 bool
5389 intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5390                         struct intel_connector *intel_connector)
5391 {
5392         struct drm_connector *connector = &intel_connector->base;
5393         struct intel_dp *intel_dp = &intel_dig_port->dp;
5394         struct intel_encoder *intel_encoder = &intel_dig_port->base;
5395         struct drm_device *dev = intel_encoder->base.dev;
5396         struct drm_i915_private *dev_priv = dev->dev_private;
5397         enum port port = intel_dig_port->port;
5398         int type, ret;
5399
5400         if (WARN(intel_dig_port->max_lanes < 1,
5401                  "Not enough lanes (%d) for DP on port %c\n",
5402                  intel_dig_port->max_lanes, port_name(port)))
5403                 return false;
5404
5405         intel_dp->pps_pipe = INVALID_PIPE;
5406
5407         /* intel_dp vfuncs */
5408         if (INTEL_INFO(dev)->gen >= 9)
5409                 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
5410         else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
5411                 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
5412         else if (HAS_PCH_SPLIT(dev))
5413                 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
5414         else
5415                 intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider;
5416
5417         if (INTEL_INFO(dev)->gen >= 9)
5418                 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
5419         else
5420                 intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl;
5421
5422         if (HAS_DDI(dev))
5423                 intel_dp->prepare_link_retrain = intel_ddi_prepare_link_retrain;
5424
5425         /* Preserve the current hw state. */
5426         intel_dp->DP = I915_READ(intel_dp->output_reg);
5427         intel_dp->attached_connector = intel_connector;
5428
5429         if (intel_dp_is_edp(dev, port))
5430                 type = DRM_MODE_CONNECTOR_eDP;
5431         else
5432                 type = DRM_MODE_CONNECTOR_DisplayPort;
5433
5434         /*
5435          * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
5436          * for DP the encoder type can be set by the caller to
5437          * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
5438          */
5439         if (type == DRM_MODE_CONNECTOR_eDP)
5440                 intel_encoder->type = INTEL_OUTPUT_EDP;
5441
5442         /* eDP only on port B and/or C on vlv/chv */
5443         if (WARN_ON((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
5444                     is_edp(intel_dp) && port != PORT_B && port != PORT_C))
5445                 return false;
5446
5447         DRM_DEBUG_KMS("Adding %s connector on port %c\n",
5448                         type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
5449                         port_name(port));
5450
5451         drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
5452         drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
5453
5454         connector->interlace_allowed = true;
5455         connector->doublescan_allowed = 0;
5456
5457         INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
5458                           edp_panel_vdd_work);
5459
5460         intel_connector_attach_encoder(intel_connector, intel_encoder);
5461         drm_connector_register(connector);
5462
5463         if (HAS_DDI(dev))
5464                 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
5465         else
5466                 intel_connector->get_hw_state = intel_connector_get_hw_state;
5467         intel_connector->unregister = intel_dp_connector_unregister;
5468
5469         /* Set up the hotplug pin. */
5470         switch (port) {
5471         case PORT_A:
5472                 intel_encoder->hpd_pin = HPD_PORT_A;
5473                 break;
5474         case PORT_B:
5475                 intel_encoder->hpd_pin = HPD_PORT_B;
5476                 if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
5477                         intel_encoder->hpd_pin = HPD_PORT_A;
5478                 break;
5479         case PORT_C:
5480                 intel_encoder->hpd_pin = HPD_PORT_C;
5481                 break;
5482         case PORT_D:
5483                 intel_encoder->hpd_pin = HPD_PORT_D;
5484                 break;
5485         case PORT_E:
5486                 intel_encoder->hpd_pin = HPD_PORT_E;
5487                 break;
5488         default:
5489                 BUG();
5490         }
5491
5492         if (is_edp(intel_dp)) {
5493                 pps_lock(intel_dp);
5494                 intel_dp_init_panel_power_timestamps(intel_dp);
5495                 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
5496                         vlv_initial_power_sequencer_setup(intel_dp);
5497                 else
5498                         intel_dp_init_panel_power_sequencer(dev, intel_dp);
5499                 pps_unlock(intel_dp);
5500         }
5501
5502         ret = intel_dp_aux_init(intel_dp, intel_connector);
5503         if (ret)
5504                 goto fail;
5505
5506         /* init MST on ports that can support it */
5507         if (HAS_DP_MST(dev) &&
5508             (port == PORT_B || port == PORT_C || port == PORT_D))
5509                 intel_dp_mst_encoder_init(intel_dig_port,
5510                                           intel_connector->base.base.id);
5511
5512         if (!intel_edp_init_connector(intel_dp, intel_connector)) {
5513                 intel_dp_aux_fini(intel_dp);
5514                 intel_dp_mst_encoder_cleanup(intel_dig_port);
5515                 goto fail;
5516         }
5517
5518         intel_dp_add_properties(intel_dp, connector);
5519
5520         /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
5521          * 0xd.  Failure to do so will result in spurious interrupts being
5522          * generated on the port when a cable is not attached.
5523          */
5524         if (IS_G4X(dev) && !IS_GM45(dev)) {
5525                 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
5526                 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
5527         }
5528
5529         i915_debugfs_connector_add(connector);
5530
5531         return true;
5532
5533 fail:
5534         if (is_edp(intel_dp)) {
5535                 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5536                 /*
5537                  * vdd might still be enabled do to the delayed vdd off.
5538                  * Make sure vdd is actually turned off here.
5539                  */
5540                 pps_lock(intel_dp);
5541                 edp_panel_vdd_off_sync(intel_dp);
5542                 pps_unlock(intel_dp);
5543         }
5544         drm_connector_unregister(connector);
5545         drm_connector_cleanup(connector);
5546
5547         return false;
5548 }
5549
5550 void
5551 intel_dp_init(struct drm_device *dev,
5552               i915_reg_t output_reg, enum port port)
5553 {
5554         struct drm_i915_private *dev_priv = dev->dev_private;
5555         struct intel_digital_port *intel_dig_port;
5556         struct intel_encoder *intel_encoder;
5557         struct drm_encoder *encoder;
5558         struct intel_connector *intel_connector;
5559
5560         intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
5561         if (!intel_dig_port)
5562                 return;
5563
5564         intel_connector = intel_connector_alloc();
5565         if (!intel_connector)
5566                 goto err_connector_alloc;
5567
5568         intel_encoder = &intel_dig_port->base;
5569         encoder = &intel_encoder->base;
5570
5571         if (drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
5572                              DRM_MODE_ENCODER_TMDS, NULL))
5573                 goto err_encoder_init;
5574
5575         intel_encoder->compute_config = intel_dp_compute_config;
5576         intel_encoder->disable = intel_disable_dp;
5577         intel_encoder->get_hw_state = intel_dp_get_hw_state;
5578         intel_encoder->get_config = intel_dp_get_config;
5579         intel_encoder->suspend = intel_dp_encoder_suspend;
5580         if (IS_CHERRYVIEW(dev)) {
5581                 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
5582                 intel_encoder->pre_enable = chv_pre_enable_dp;
5583                 intel_encoder->enable = vlv_enable_dp;
5584                 intel_encoder->post_disable = chv_post_disable_dp;
5585                 intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
5586         } else if (IS_VALLEYVIEW(dev)) {
5587                 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
5588                 intel_encoder->pre_enable = vlv_pre_enable_dp;
5589                 intel_encoder->enable = vlv_enable_dp;
5590                 intel_encoder->post_disable = vlv_post_disable_dp;
5591         } else {
5592                 intel_encoder->pre_enable = g4x_pre_enable_dp;
5593                 intel_encoder->enable = g4x_enable_dp;
5594                 if (INTEL_INFO(dev)->gen >= 5)
5595                         intel_encoder->post_disable = ilk_post_disable_dp;
5596         }
5597
5598         intel_dig_port->port = port;
5599         intel_dig_port->dp.output_reg = output_reg;
5600         intel_dig_port->max_lanes = 4;
5601
5602         intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
5603         if (IS_CHERRYVIEW(dev)) {
5604                 if (port == PORT_D)
5605                         intel_encoder->crtc_mask = 1 << 2;
5606                 else
5607                         intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
5608         } else {
5609                 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
5610         }
5611         intel_encoder->cloneable = 0;
5612
5613         intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
5614         dev_priv->hotplug.irq_port[port] = intel_dig_port;
5615
5616         if (!intel_dp_init_connector(intel_dig_port, intel_connector))
5617                 goto err_init_connector;
5618
5619         return;
5620
5621 err_init_connector:
5622         drm_encoder_cleanup(encoder);
5623 err_encoder_init:
5624         kfree(intel_connector);
5625 err_connector_alloc:
5626         kfree(intel_dig_port);
5627
5628         return;
5629 }
5630
5631 void intel_dp_mst_suspend(struct drm_device *dev)
5632 {
5633         struct drm_i915_private *dev_priv = dev->dev_private;
5634         int i;
5635
5636         /* disable MST */
5637         for (i = 0; i < I915_MAX_PORTS; i++) {
5638                 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
5639                 if (!intel_dig_port)
5640                         continue;
5641
5642                 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
5643                         if (!intel_dig_port->dp.can_mst)
5644                                 continue;
5645                         if (intel_dig_port->dp.is_mst)
5646                                 drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
5647                 }
5648         }
5649 }
5650
5651 void intel_dp_mst_resume(struct drm_device *dev)
5652 {
5653         struct drm_i915_private *dev_priv = dev->dev_private;
5654         int i;
5655
5656         for (i = 0; i < I915_MAX_PORTS; i++) {
5657                 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
5658                 if (!intel_dig_port)
5659                         continue;
5660                 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
5661                         int ret;
5662
5663                         if (!intel_dig_port->dp.can_mst)
5664                                 continue;
5665
5666                         ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
5667                         if (ret != 0) {
5668                                 intel_dp_check_mst_status(&intel_dig_port->dp);
5669                         }
5670                 }
5671         }
5672 }