019283f439513cb0f1765aab72ba15b028d81b30
[cascardo/linux.git] / drivers / gpu / drm / i915 / intel_dp.c
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Keith Packard <keithp@keithp.com>
25  *
26  */
27
28 #include <linux/i2c.h>
29 #include <linux/slab.h>
30 #include <linux/export.h>
31 #include <linux/notifier.h>
32 #include <linux/reboot.h>
33 #include <drm/drmP.h>
34 #include <drm/drm_atomic_helper.h>
35 #include <drm/drm_crtc.h>
36 #include <drm/drm_crtc_helper.h>
37 #include <drm/drm_edid.h>
38 #include "intel_drv.h"
39 #include <drm/i915_drm.h>
40 #include "i915_drv.h"
41
42 #define DP_LINK_CHECK_TIMEOUT   (10 * 1000)
43
44 /* Compliance test status bits  */
45 #define INTEL_DP_RESOLUTION_SHIFT_MASK  0
46 #define INTEL_DP_RESOLUTION_PREFERRED   (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
47 #define INTEL_DP_RESOLUTION_STANDARD    (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
48 #define INTEL_DP_RESOLUTION_FAILSAFE    (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
49
50 struct dp_link_dpll {
51         int clock;
52         struct dpll dpll;
53 };
54
55 static const struct dp_link_dpll gen4_dpll[] = {
56         { 162000,
57                 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
58         { 270000,
59                 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
60 };
61
62 static const struct dp_link_dpll pch_dpll[] = {
63         { 162000,
64                 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
65         { 270000,
66                 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
67 };
68
69 static const struct dp_link_dpll vlv_dpll[] = {
70         { 162000,
71                 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
72         { 270000,
73                 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
74 };
75
76 /*
77  * CHV supports eDP 1.4 that have  more link rates.
78  * Below only provides the fixed rate but exclude variable rate.
79  */
80 static const struct dp_link_dpll chv_dpll[] = {
81         /*
82          * CHV requires to program fractional division for m2.
83          * m2 is stored in fixed point format using formula below
84          * (m2_int << 22) | m2_fraction
85          */
86         { 162000,       /* m2_int = 32, m2_fraction = 1677722 */
87                 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
88         { 270000,       /* m2_int = 27, m2_fraction = 0 */
89                 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
90         { 540000,       /* m2_int = 27, m2_fraction = 0 */
91                 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
92 };
93
94 static const int bxt_rates[] = { 162000, 216000, 243000, 270000,
95                                   324000, 432000, 540000 };
96 static const int skl_rates[] = { 162000, 216000, 270000,
97                                   324000, 432000, 540000 };
98 static const int default_rates[] = { 162000, 270000, 540000 };
99
100 /**
101  * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
102  * @intel_dp: DP struct
103  *
104  * If a CPU or PCH DP output is attached to an eDP panel, this function
105  * will return true, and false otherwise.
106  */
107 static bool is_edp(struct intel_dp *intel_dp)
108 {
109         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
110
111         return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
112 }
113
114 static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
115 {
116         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
117
118         return intel_dig_port->base.base.dev;
119 }
120
121 static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
122 {
123         return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
124 }
125
126 static void intel_dp_link_down(struct intel_dp *intel_dp);
127 static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
128 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
129 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
130 static void vlv_steal_power_sequencer(struct drm_device *dev,
131                                       enum pipe pipe);
132
133 static unsigned int intel_dp_unused_lane_mask(int lane_count)
134 {
135         return ~((1 << lane_count) - 1) & 0xf;
136 }
137
138 static int
139 intel_dp_max_link_bw(struct intel_dp  *intel_dp)
140 {
141         int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
142
143         switch (max_link_bw) {
144         case DP_LINK_BW_1_62:
145         case DP_LINK_BW_2_7:
146         case DP_LINK_BW_5_4:
147                 break;
148         default:
149                 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
150                      max_link_bw);
151                 max_link_bw = DP_LINK_BW_1_62;
152                 break;
153         }
154         return max_link_bw;
155 }
156
157 static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
158 {
159         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
160         struct drm_device *dev = intel_dig_port->base.base.dev;
161         u8 source_max, sink_max;
162
163         source_max = 4;
164         if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
165             (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
166                 source_max = 2;
167
168         sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
169
170         return min(source_max, sink_max);
171 }
172
173 /*
174  * The units on the numbers in the next two are... bizarre.  Examples will
175  * make it clearer; this one parallels an example in the eDP spec.
176  *
177  * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
178  *
179  *     270000 * 1 * 8 / 10 == 216000
180  *
181  * The actual data capacity of that configuration is 2.16Gbit/s, so the
182  * units are decakilobits.  ->clock in a drm_display_mode is in kilohertz -
183  * or equivalently, kilopixels per second - so for 1680x1050R it'd be
184  * 119000.  At 18bpp that's 2142000 kilobits per second.
185  *
186  * Thus the strange-looking division by 10 in intel_dp_link_required, to
187  * get the result in decakilobits instead of kilobits.
188  */
189
190 static int
191 intel_dp_link_required(int pixel_clock, int bpp)
192 {
193         return (pixel_clock * bpp + 9) / 10;
194 }
195
196 static int
197 intel_dp_max_data_rate(int max_link_clock, int max_lanes)
198 {
199         return (max_link_clock * max_lanes * 8) / 10;
200 }
201
202 static enum drm_mode_status
203 intel_dp_mode_valid(struct drm_connector *connector,
204                     struct drm_display_mode *mode)
205 {
206         struct intel_dp *intel_dp = intel_attached_dp(connector);
207         struct intel_connector *intel_connector = to_intel_connector(connector);
208         struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
209         int target_clock = mode->clock;
210         int max_rate, mode_rate, max_lanes, max_link_clock;
211
212         if (is_edp(intel_dp) && fixed_mode) {
213                 if (mode->hdisplay > fixed_mode->hdisplay)
214                         return MODE_PANEL;
215
216                 if (mode->vdisplay > fixed_mode->vdisplay)
217                         return MODE_PANEL;
218
219                 target_clock = fixed_mode->clock;
220         }
221
222         max_link_clock = intel_dp_max_link_rate(intel_dp);
223         max_lanes = intel_dp_max_lane_count(intel_dp);
224
225         max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
226         mode_rate = intel_dp_link_required(target_clock, 18);
227
228         if (mode_rate > max_rate)
229                 return MODE_CLOCK_HIGH;
230
231         if (mode->clock < 10000)
232                 return MODE_CLOCK_LOW;
233
234         if (mode->flags & DRM_MODE_FLAG_DBLCLK)
235                 return MODE_H_ILLEGAL;
236
237         return MODE_OK;
238 }
239
240 uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
241 {
242         int     i;
243         uint32_t v = 0;
244
245         if (src_bytes > 4)
246                 src_bytes = 4;
247         for (i = 0; i < src_bytes; i++)
248                 v |= ((uint32_t) src[i]) << ((3-i) * 8);
249         return v;
250 }
251
252 static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
253 {
254         int i;
255         if (dst_bytes > 4)
256                 dst_bytes = 4;
257         for (i = 0; i < dst_bytes; i++)
258                 dst[i] = src >> ((3-i) * 8);
259 }
260
261 static void
262 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
263                                     struct intel_dp *intel_dp);
264 static void
265 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
266                                               struct intel_dp *intel_dp);
267
268 static void pps_lock(struct intel_dp *intel_dp)
269 {
270         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
271         struct intel_encoder *encoder = &intel_dig_port->base;
272         struct drm_device *dev = encoder->base.dev;
273         struct drm_i915_private *dev_priv = dev->dev_private;
274         enum intel_display_power_domain power_domain;
275
276         /*
277          * See vlv_power_sequencer_reset() why we need
278          * a power domain reference here.
279          */
280         power_domain = intel_display_port_power_domain(encoder);
281         intel_display_power_get(dev_priv, power_domain);
282
283         mutex_lock(&dev_priv->pps_mutex);
284 }
285
286 static void pps_unlock(struct intel_dp *intel_dp)
287 {
288         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
289         struct intel_encoder *encoder = &intel_dig_port->base;
290         struct drm_device *dev = encoder->base.dev;
291         struct drm_i915_private *dev_priv = dev->dev_private;
292         enum intel_display_power_domain power_domain;
293
294         mutex_unlock(&dev_priv->pps_mutex);
295
296         power_domain = intel_display_port_power_domain(encoder);
297         intel_display_power_put(dev_priv, power_domain);
298 }
299
300 static void
301 vlv_power_sequencer_kick(struct intel_dp *intel_dp)
302 {
303         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
304         struct drm_device *dev = intel_dig_port->base.base.dev;
305         struct drm_i915_private *dev_priv = dev->dev_private;
306         enum pipe pipe = intel_dp->pps_pipe;
307         bool pll_enabled, release_cl_override = false;
308         enum dpio_phy phy = DPIO_PHY(pipe);
309         enum dpio_channel ch = vlv_pipe_to_channel(pipe);
310         uint32_t DP;
311
312         if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
313                  "skipping pipe %c power seqeuncer kick due to port %c being active\n",
314                  pipe_name(pipe), port_name(intel_dig_port->port)))
315                 return;
316
317         DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
318                       pipe_name(pipe), port_name(intel_dig_port->port));
319
320         /* Preserve the BIOS-computed detected bit. This is
321          * supposed to be read-only.
322          */
323         DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
324         DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
325         DP |= DP_PORT_WIDTH(1);
326         DP |= DP_LINK_TRAIN_PAT_1;
327
328         if (IS_CHERRYVIEW(dev))
329                 DP |= DP_PIPE_SELECT_CHV(pipe);
330         else if (pipe == PIPE_B)
331                 DP |= DP_PIPEB_SELECT;
332
333         pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
334
335         /*
336          * The DPLL for the pipe must be enabled for this to work.
337          * So enable temporarily it if it's not already enabled.
338          */
339         if (!pll_enabled) {
340                 release_cl_override = IS_CHERRYVIEW(dev) &&
341                         !chv_phy_powergate_ch(dev_priv, phy, ch, true);
342
343                 vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
344                                  &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
345         }
346
347         /*
348          * Similar magic as in intel_dp_enable_port().
349          * We _must_ do this port enable + disable trick
350          * to make this power seqeuencer lock onto the port.
351          * Otherwise even VDD force bit won't work.
352          */
353         I915_WRITE(intel_dp->output_reg, DP);
354         POSTING_READ(intel_dp->output_reg);
355
356         I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
357         POSTING_READ(intel_dp->output_reg);
358
359         I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
360         POSTING_READ(intel_dp->output_reg);
361
362         if (!pll_enabled) {
363                 vlv_force_pll_off(dev, pipe);
364
365                 if (release_cl_override)
366                         chv_phy_powergate_ch(dev_priv, phy, ch, false);
367         }
368 }
369
370 static enum pipe
371 vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
372 {
373         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
374         struct drm_device *dev = intel_dig_port->base.base.dev;
375         struct drm_i915_private *dev_priv = dev->dev_private;
376         struct intel_encoder *encoder;
377         unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
378         enum pipe pipe;
379
380         lockdep_assert_held(&dev_priv->pps_mutex);
381
382         /* We should never land here with regular DP ports */
383         WARN_ON(!is_edp(intel_dp));
384
385         if (intel_dp->pps_pipe != INVALID_PIPE)
386                 return intel_dp->pps_pipe;
387
388         /*
389          * We don't have power sequencer currently.
390          * Pick one that's not used by other ports.
391          */
392         list_for_each_entry(encoder, &dev->mode_config.encoder_list,
393                             base.head) {
394                 struct intel_dp *tmp;
395
396                 if (encoder->type != INTEL_OUTPUT_EDP)
397                         continue;
398
399                 tmp = enc_to_intel_dp(&encoder->base);
400
401                 if (tmp->pps_pipe != INVALID_PIPE)
402                         pipes &= ~(1 << tmp->pps_pipe);
403         }
404
405         /*
406          * Didn't find one. This should not happen since there
407          * are two power sequencers and up to two eDP ports.
408          */
409         if (WARN_ON(pipes == 0))
410                 pipe = PIPE_A;
411         else
412                 pipe = ffs(pipes) - 1;
413
414         vlv_steal_power_sequencer(dev, pipe);
415         intel_dp->pps_pipe = pipe;
416
417         DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
418                       pipe_name(intel_dp->pps_pipe),
419                       port_name(intel_dig_port->port));
420
421         /* init power sequencer on this pipe and port */
422         intel_dp_init_panel_power_sequencer(dev, intel_dp);
423         intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
424
425         /*
426          * Even vdd force doesn't work until we've made
427          * the power sequencer lock in on the port.
428          */
429         vlv_power_sequencer_kick(intel_dp);
430
431         return intel_dp->pps_pipe;
432 }
433
434 typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
435                                enum pipe pipe);
436
437 static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
438                                enum pipe pipe)
439 {
440         return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
441 }
442
443 static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
444                                 enum pipe pipe)
445 {
446         return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
447 }
448
449 static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
450                          enum pipe pipe)
451 {
452         return true;
453 }
454
455 static enum pipe
456 vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
457                      enum port port,
458                      vlv_pipe_check pipe_check)
459 {
460         enum pipe pipe;
461
462         for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
463                 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
464                         PANEL_PORT_SELECT_MASK;
465
466                 if (port_sel != PANEL_PORT_SELECT_VLV(port))
467                         continue;
468
469                 if (!pipe_check(dev_priv, pipe))
470                         continue;
471
472                 return pipe;
473         }
474
475         return INVALID_PIPE;
476 }
477
478 static void
479 vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
480 {
481         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
482         struct drm_device *dev = intel_dig_port->base.base.dev;
483         struct drm_i915_private *dev_priv = dev->dev_private;
484         enum port port = intel_dig_port->port;
485
486         lockdep_assert_held(&dev_priv->pps_mutex);
487
488         /* try to find a pipe with this port selected */
489         /* first pick one where the panel is on */
490         intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
491                                                   vlv_pipe_has_pp_on);
492         /* didn't find one? pick one where vdd is on */
493         if (intel_dp->pps_pipe == INVALID_PIPE)
494                 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
495                                                           vlv_pipe_has_vdd_on);
496         /* didn't find one? pick one with just the correct port */
497         if (intel_dp->pps_pipe == INVALID_PIPE)
498                 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
499                                                           vlv_pipe_any);
500
501         /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
502         if (intel_dp->pps_pipe == INVALID_PIPE) {
503                 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
504                               port_name(port));
505                 return;
506         }
507
508         DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
509                       port_name(port), pipe_name(intel_dp->pps_pipe));
510
511         intel_dp_init_panel_power_sequencer(dev, intel_dp);
512         intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
513 }
514
515 void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
516 {
517         struct drm_device *dev = dev_priv->dev;
518         struct intel_encoder *encoder;
519
520         if (WARN_ON(!IS_VALLEYVIEW(dev)))
521                 return;
522
523         /*
524          * We can't grab pps_mutex here due to deadlock with power_domain
525          * mutex when power_domain functions are called while holding pps_mutex.
526          * That also means that in order to use pps_pipe the code needs to
527          * hold both a power domain reference and pps_mutex, and the power domain
528          * reference get/put must be done while _not_ holding pps_mutex.
529          * pps_{lock,unlock}() do these steps in the correct order, so one
530          * should use them always.
531          */
532
533         list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
534                 struct intel_dp *intel_dp;
535
536                 if (encoder->type != INTEL_OUTPUT_EDP)
537                         continue;
538
539                 intel_dp = enc_to_intel_dp(&encoder->base);
540                 intel_dp->pps_pipe = INVALID_PIPE;
541         }
542 }
543
544 static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
545 {
546         struct drm_device *dev = intel_dp_to_dev(intel_dp);
547
548         if (IS_BROXTON(dev))
549                 return BXT_PP_CONTROL(0);
550         else if (HAS_PCH_SPLIT(dev))
551                 return PCH_PP_CONTROL;
552         else
553                 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
554 }
555
556 static u32 _pp_stat_reg(struct intel_dp *intel_dp)
557 {
558         struct drm_device *dev = intel_dp_to_dev(intel_dp);
559
560         if (IS_BROXTON(dev))
561                 return BXT_PP_STATUS(0);
562         else if (HAS_PCH_SPLIT(dev))
563                 return PCH_PP_STATUS;
564         else
565                 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
566 }
567
568 /* Reboot notifier handler to shutdown panel power to guarantee T12 timing
569    This function only applicable when panel PM state is not to be tracked */
570 static int edp_notify_handler(struct notifier_block *this, unsigned long code,
571                               void *unused)
572 {
573         struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
574                                                  edp_notifier);
575         struct drm_device *dev = intel_dp_to_dev(intel_dp);
576         struct drm_i915_private *dev_priv = dev->dev_private;
577
578         if (!is_edp(intel_dp) || code != SYS_RESTART)
579                 return 0;
580
581         pps_lock(intel_dp);
582
583         if (IS_VALLEYVIEW(dev)) {
584                 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
585                 u32 pp_ctrl_reg, pp_div_reg;
586                 u32 pp_div;
587
588                 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
589                 pp_div_reg  = VLV_PIPE_PP_DIVISOR(pipe);
590                 pp_div = I915_READ(pp_div_reg);
591                 pp_div &= PP_REFERENCE_DIVIDER_MASK;
592
593                 /* 0x1F write to PP_DIV_REG sets max cycle delay */
594                 I915_WRITE(pp_div_reg, pp_div | 0x1F);
595                 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
596                 msleep(intel_dp->panel_power_cycle_delay);
597         }
598
599         pps_unlock(intel_dp);
600
601         return 0;
602 }
603
604 static bool edp_have_panel_power(struct intel_dp *intel_dp)
605 {
606         struct drm_device *dev = intel_dp_to_dev(intel_dp);
607         struct drm_i915_private *dev_priv = dev->dev_private;
608
609         lockdep_assert_held(&dev_priv->pps_mutex);
610
611         if (IS_VALLEYVIEW(dev) &&
612             intel_dp->pps_pipe == INVALID_PIPE)
613                 return false;
614
615         return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
616 }
617
618 static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
619 {
620         struct drm_device *dev = intel_dp_to_dev(intel_dp);
621         struct drm_i915_private *dev_priv = dev->dev_private;
622
623         lockdep_assert_held(&dev_priv->pps_mutex);
624
625         if (IS_VALLEYVIEW(dev) &&
626             intel_dp->pps_pipe == INVALID_PIPE)
627                 return false;
628
629         return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
630 }
631
632 static void
633 intel_dp_check_edp(struct intel_dp *intel_dp)
634 {
635         struct drm_device *dev = intel_dp_to_dev(intel_dp);
636         struct drm_i915_private *dev_priv = dev->dev_private;
637
638         if (!is_edp(intel_dp))
639                 return;
640
641         if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
642                 WARN(1, "eDP powered off while attempting aux channel communication.\n");
643                 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
644                               I915_READ(_pp_stat_reg(intel_dp)),
645                               I915_READ(_pp_ctrl_reg(intel_dp)));
646         }
647 }
648
649 static uint32_t
650 intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
651 {
652         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
653         struct drm_device *dev = intel_dig_port->base.base.dev;
654         struct drm_i915_private *dev_priv = dev->dev_private;
655         uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
656         uint32_t status;
657         bool done;
658
659 #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
660         if (has_aux_irq)
661                 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
662                                           msecs_to_jiffies_timeout(10));
663         else
664                 done = wait_for_atomic(C, 10) == 0;
665         if (!done)
666                 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
667                           has_aux_irq);
668 #undef C
669
670         return status;
671 }
672
673 static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
674 {
675         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
676         struct drm_device *dev = intel_dig_port->base.base.dev;
677
678         /*
679          * The clock divider is based off the hrawclk, and would like to run at
680          * 2MHz.  So, take the hrawclk value and divide by 2 and use that
681          */
682         return index ? 0 : intel_hrawclk(dev) / 2;
683 }
684
685 static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
686 {
687         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
688         struct drm_device *dev = intel_dig_port->base.base.dev;
689         struct drm_i915_private *dev_priv = dev->dev_private;
690
691         if (index)
692                 return 0;
693
694         if (intel_dig_port->port == PORT_A) {
695                 return DIV_ROUND_UP(dev_priv->cdclk_freq, 2000);
696
697         } else {
698                 return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
699         }
700 }
701
702 static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
703 {
704         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
705         struct drm_device *dev = intel_dig_port->base.base.dev;
706         struct drm_i915_private *dev_priv = dev->dev_private;
707
708         if (intel_dig_port->port == PORT_A) {
709                 if (index)
710                         return 0;
711                 return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
712         } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
713                 /* Workaround for non-ULT HSW */
714                 switch (index) {
715                 case 0: return 63;
716                 case 1: return 72;
717                 default: return 0;
718                 }
719         } else  {
720                 return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
721         }
722 }
723
724 static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
725 {
726         return index ? 0 : 100;
727 }
728
729 static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
730 {
731         /*
732          * SKL doesn't need us to program the AUX clock divider (Hardware will
733          * derive the clock from CDCLK automatically). We still implement the
734          * get_aux_clock_divider vfunc to plug-in into the existing code.
735          */
736         return index ? 0 : 1;
737 }
738
739 static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
740                                       bool has_aux_irq,
741                                       int send_bytes,
742                                       uint32_t aux_clock_divider)
743 {
744         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
745         struct drm_device *dev = intel_dig_port->base.base.dev;
746         uint32_t precharge, timeout;
747
748         if (IS_GEN6(dev))
749                 precharge = 3;
750         else
751                 precharge = 5;
752
753         if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL)
754                 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
755         else
756                 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
757
758         return DP_AUX_CH_CTL_SEND_BUSY |
759                DP_AUX_CH_CTL_DONE |
760                (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
761                DP_AUX_CH_CTL_TIME_OUT_ERROR |
762                timeout |
763                DP_AUX_CH_CTL_RECEIVE_ERROR |
764                (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
765                (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
766                (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
767 }
768
769 static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
770                                       bool has_aux_irq,
771                                       int send_bytes,
772                                       uint32_t unused)
773 {
774         return DP_AUX_CH_CTL_SEND_BUSY |
775                DP_AUX_CH_CTL_DONE |
776                (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
777                DP_AUX_CH_CTL_TIME_OUT_ERROR |
778                DP_AUX_CH_CTL_TIME_OUT_1600us |
779                DP_AUX_CH_CTL_RECEIVE_ERROR |
780                (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
781                DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
782 }
783
784 static int
785 intel_dp_aux_ch(struct intel_dp *intel_dp,
786                 const uint8_t *send, int send_bytes,
787                 uint8_t *recv, int recv_size)
788 {
789         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
790         struct drm_device *dev = intel_dig_port->base.base.dev;
791         struct drm_i915_private *dev_priv = dev->dev_private;
792         uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
793         uint32_t ch_data = ch_ctl + 4;
794         uint32_t aux_clock_divider;
795         int i, ret, recv_bytes;
796         uint32_t status;
797         int try, clock = 0;
798         bool has_aux_irq = HAS_AUX_IRQ(dev);
799         bool vdd;
800
801         pps_lock(intel_dp);
802
803         /*
804          * We will be called with VDD already enabled for dpcd/edid/oui reads.
805          * In such cases we want to leave VDD enabled and it's up to upper layers
806          * to turn it off. But for eg. i2c-dev access we need to turn it on/off
807          * ourselves.
808          */
809         vdd = edp_panel_vdd_on(intel_dp);
810
811         /* dp aux is extremely sensitive to irq latency, hence request the
812          * lowest possible wakeup latency and so prevent the cpu from going into
813          * deep sleep states.
814          */
815         pm_qos_update_request(&dev_priv->pm_qos, 0);
816
817         intel_dp_check_edp(intel_dp);
818
819         intel_aux_display_runtime_get(dev_priv);
820
821         /* Try to wait for any previous AUX channel activity */
822         for (try = 0; try < 3; try++) {
823                 status = I915_READ_NOTRACE(ch_ctl);
824                 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
825                         break;
826                 msleep(1);
827         }
828
829         if (try == 3) {
830                 static u32 last_status = -1;
831                 const u32 status = I915_READ(ch_ctl);
832
833                 if (status != last_status) {
834                         WARN(1, "dp_aux_ch not started status 0x%08x\n",
835                              status);
836                         last_status = status;
837                 }
838
839                 ret = -EBUSY;
840                 goto out;
841         }
842
843         /* Only 5 data registers! */
844         if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
845                 ret = -E2BIG;
846                 goto out;
847         }
848
849         while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
850                 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
851                                                           has_aux_irq,
852                                                           send_bytes,
853                                                           aux_clock_divider);
854
855                 /* Must try at least 3 times according to DP spec */
856                 for (try = 0; try < 5; try++) {
857                         /* Load the send data into the aux channel data registers */
858                         for (i = 0; i < send_bytes; i += 4)
859                                 I915_WRITE(ch_data + i,
860                                            intel_dp_pack_aux(send + i,
861                                                              send_bytes - i));
862
863                         /* Send the command and wait for it to complete */
864                         I915_WRITE(ch_ctl, send_ctl);
865
866                         status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
867
868                         /* Clear done status and any errors */
869                         I915_WRITE(ch_ctl,
870                                    status |
871                                    DP_AUX_CH_CTL_DONE |
872                                    DP_AUX_CH_CTL_TIME_OUT_ERROR |
873                                    DP_AUX_CH_CTL_RECEIVE_ERROR);
874
875                         if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
876                                 continue;
877
878                         /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
879                          *   400us delay required for errors and timeouts
880                          *   Timeout errors from the HW already meet this
881                          *   requirement so skip to next iteration
882                          */
883                         if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
884                                 usleep_range(400, 500);
885                                 continue;
886                         }
887                         if (status & DP_AUX_CH_CTL_DONE)
888                                 goto done;
889                 }
890         }
891
892         if ((status & DP_AUX_CH_CTL_DONE) == 0) {
893                 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
894                 ret = -EBUSY;
895                 goto out;
896         }
897
898 done:
899         /* Check for timeout or receive error.
900          * Timeouts occur when the sink is not connected
901          */
902         if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
903                 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
904                 ret = -EIO;
905                 goto out;
906         }
907
908         /* Timeouts occur when the device isn't connected, so they're
909          * "normal" -- don't fill the kernel log with these */
910         if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
911                 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
912                 ret = -ETIMEDOUT;
913                 goto out;
914         }
915
916         /* Unload any bytes sent back from the other side */
917         recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
918                       DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
919         if (recv_bytes > recv_size)
920                 recv_bytes = recv_size;
921
922         for (i = 0; i < recv_bytes; i += 4)
923                 intel_dp_unpack_aux(I915_READ(ch_data + i),
924                                     recv + i, recv_bytes - i);
925
926         ret = recv_bytes;
927 out:
928         pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
929         intel_aux_display_runtime_put(dev_priv);
930
931         if (vdd)
932                 edp_panel_vdd_off(intel_dp, false);
933
934         pps_unlock(intel_dp);
935
936         return ret;
937 }
938
939 #define BARE_ADDRESS_SIZE       3
940 #define HEADER_SIZE             (BARE_ADDRESS_SIZE + 1)
941 static ssize_t
942 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
943 {
944         struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
945         uint8_t txbuf[20], rxbuf[20];
946         size_t txsize, rxsize;
947         int ret;
948
949         txbuf[0] = (msg->request << 4) |
950                 ((msg->address >> 16) & 0xf);
951         txbuf[1] = (msg->address >> 8) & 0xff;
952         txbuf[2] = msg->address & 0xff;
953         txbuf[3] = msg->size - 1;
954
955         switch (msg->request & ~DP_AUX_I2C_MOT) {
956         case DP_AUX_NATIVE_WRITE:
957         case DP_AUX_I2C_WRITE:
958         case DP_AUX_I2C_WRITE_STATUS_UPDATE:
959                 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
960                 rxsize = 2; /* 0 or 1 data bytes */
961
962                 if (WARN_ON(txsize > 20))
963                         return -E2BIG;
964
965                 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
966
967                 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
968                 if (ret > 0) {
969                         msg->reply = rxbuf[0] >> 4;
970
971                         if (ret > 1) {
972                                 /* Number of bytes written in a short write. */
973                                 ret = clamp_t(int, rxbuf[1], 0, msg->size);
974                         } else {
975                                 /* Return payload size. */
976                                 ret = msg->size;
977                         }
978                 }
979                 break;
980
981         case DP_AUX_NATIVE_READ:
982         case DP_AUX_I2C_READ:
983                 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
984                 rxsize = msg->size + 1;
985
986                 if (WARN_ON(rxsize > 20))
987                         return -E2BIG;
988
989                 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
990                 if (ret > 0) {
991                         msg->reply = rxbuf[0] >> 4;
992                         /*
993                          * Assume happy day, and copy the data. The caller is
994                          * expected to check msg->reply before touching it.
995                          *
996                          * Return payload size.
997                          */
998                         ret--;
999                         memcpy(msg->buffer, rxbuf + 1, ret);
1000                 }
1001                 break;
1002
1003         default:
1004                 ret = -EINVAL;
1005                 break;
1006         }
1007
1008         return ret;
1009 }
1010
1011 static void
1012 intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
1013 {
1014         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1015         struct drm_i915_private *dev_priv = dev->dev_private;
1016         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1017         enum port port = intel_dig_port->port;
1018         struct ddi_vbt_port_info *info = &dev_priv->vbt.ddi_port_info[port];
1019         const char *name = NULL;
1020         uint32_t porte_aux_ctl_reg = DPA_AUX_CH_CTL;
1021         int ret;
1022
1023         /* On SKL we don't have Aux for port E so we rely on VBT to set
1024          * a proper alternate aux channel.
1025          */
1026         if ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) && port == PORT_E) {
1027                 switch (info->alternate_aux_channel) {
1028                 case DP_AUX_B:
1029                         porte_aux_ctl_reg = DPB_AUX_CH_CTL;
1030                         break;
1031                 case DP_AUX_C:
1032                         porte_aux_ctl_reg = DPC_AUX_CH_CTL;
1033                         break;
1034                 case DP_AUX_D:
1035                         porte_aux_ctl_reg = DPD_AUX_CH_CTL;
1036                         break;
1037                 case DP_AUX_A:
1038                 default:
1039                         porte_aux_ctl_reg = DPA_AUX_CH_CTL;
1040                 }
1041         }
1042
1043         switch (port) {
1044         case PORT_A:
1045                 intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
1046                 name = "DPDDC-A";
1047                 break;
1048         case PORT_B:
1049                 intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
1050                 name = "DPDDC-B";
1051                 break;
1052         case PORT_C:
1053                 intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
1054                 name = "DPDDC-C";
1055                 break;
1056         case PORT_D:
1057                 intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
1058                 name = "DPDDC-D";
1059                 break;
1060         case PORT_E:
1061                 intel_dp->aux_ch_ctl_reg = porte_aux_ctl_reg;
1062                 name = "DPDDC-E";
1063                 break;
1064         default:
1065                 BUG();
1066         }
1067
1068         /*
1069          * The AUX_CTL register is usually DP_CTL + 0x10.
1070          *
1071          * On Haswell and Broadwell though:
1072          *   - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU
1073          *   - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU
1074          *
1075          * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
1076          */
1077         if (!IS_HASWELL(dev) && !IS_BROADWELL(dev) && port != PORT_E)
1078                 intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
1079
1080         intel_dp->aux.name = name;
1081         intel_dp->aux.dev = dev->dev;
1082         intel_dp->aux.transfer = intel_dp_aux_transfer;
1083
1084         DRM_DEBUG_KMS("registering %s bus for %s\n", name,
1085                       connector->base.kdev->kobj.name);
1086
1087         ret = drm_dp_aux_register(&intel_dp->aux);
1088         if (ret < 0) {
1089                 DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
1090                           name, ret);
1091                 return;
1092         }
1093
1094         ret = sysfs_create_link(&connector->base.kdev->kobj,
1095                                 &intel_dp->aux.ddc.dev.kobj,
1096                                 intel_dp->aux.ddc.dev.kobj.name);
1097         if (ret < 0) {
1098                 DRM_ERROR("sysfs_create_link() for %s failed (%d)\n", name, ret);
1099                 drm_dp_aux_unregister(&intel_dp->aux);
1100         }
1101 }
1102
1103 static void
1104 intel_dp_connector_unregister(struct intel_connector *intel_connector)
1105 {
1106         struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
1107
1108         if (!intel_connector->mst_port)
1109                 sysfs_remove_link(&intel_connector->base.kdev->kobj,
1110                                   intel_dp->aux.ddc.dev.kobj.name);
1111         intel_connector_unregister(intel_connector);
1112 }
1113
1114 static void
1115 skl_edp_set_pll_config(struct intel_crtc_state *pipe_config)
1116 {
1117         u32 ctrl1;
1118
1119         memset(&pipe_config->dpll_hw_state, 0,
1120                sizeof(pipe_config->dpll_hw_state));
1121
1122         pipe_config->ddi_pll_sel = SKL_DPLL0;
1123         pipe_config->dpll_hw_state.cfgcr1 = 0;
1124         pipe_config->dpll_hw_state.cfgcr2 = 0;
1125
1126         ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
1127         switch (pipe_config->port_clock / 2) {
1128         case 81000:
1129                 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
1130                                               SKL_DPLL0);
1131                 break;
1132         case 135000:
1133                 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350,
1134                                               SKL_DPLL0);
1135                 break;
1136         case 270000:
1137                 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700,
1138                                               SKL_DPLL0);
1139                 break;
1140         case 162000:
1141                 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620,
1142                                               SKL_DPLL0);
1143                 break;
1144         /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1145         results in CDCLK change. Need to handle the change of CDCLK by
1146         disabling pipes and re-enabling them */
1147         case 108000:
1148                 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
1149                                               SKL_DPLL0);
1150                 break;
1151         case 216000:
1152                 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160,
1153                                               SKL_DPLL0);
1154                 break;
1155
1156         }
1157         pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1158 }
1159
1160 void
1161 hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config)
1162 {
1163         memset(&pipe_config->dpll_hw_state, 0,
1164                sizeof(pipe_config->dpll_hw_state));
1165
1166         switch (pipe_config->port_clock / 2) {
1167         case 81000:
1168                 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
1169                 break;
1170         case 135000:
1171                 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
1172                 break;
1173         case 270000:
1174                 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
1175                 break;
1176         }
1177 }
1178
1179 static int
1180 intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
1181 {
1182         if (intel_dp->num_sink_rates) {
1183                 *sink_rates = intel_dp->sink_rates;
1184                 return intel_dp->num_sink_rates;
1185         }
1186
1187         *sink_rates = default_rates;
1188
1189         return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
1190 }
1191
1192 bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
1193 {
1194         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1195         struct drm_device *dev = dig_port->base.base.dev;
1196
1197         /* WaDisableHBR2:skl */
1198         if (IS_SKL_REVID(dev, 0, SKL_REVID_B0))
1199                 return false;
1200
1201         if ((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || IS_BROADWELL(dev) ||
1202             (INTEL_INFO(dev)->gen >= 9))
1203                 return true;
1204         else
1205                 return false;
1206 }
1207
1208 static int
1209 intel_dp_source_rates(struct intel_dp *intel_dp, const int **source_rates)
1210 {
1211         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1212         struct drm_device *dev = dig_port->base.base.dev;
1213         int size;
1214
1215         if (IS_BROXTON(dev)) {
1216                 *source_rates = bxt_rates;
1217                 size = ARRAY_SIZE(bxt_rates);
1218         } else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
1219                 *source_rates = skl_rates;
1220                 size = ARRAY_SIZE(skl_rates);
1221         } else {
1222                 *source_rates = default_rates;
1223                 size = ARRAY_SIZE(default_rates);
1224         }
1225
1226         /* This depends on the fact that 5.4 is last value in the array */
1227         if (!intel_dp_source_supports_hbr2(intel_dp))
1228                 size--;
1229
1230         return size;
1231 }
1232
1233 static void
1234 intel_dp_set_clock(struct intel_encoder *encoder,
1235                    struct intel_crtc_state *pipe_config)
1236 {
1237         struct drm_device *dev = encoder->base.dev;
1238         const struct dp_link_dpll *divisor = NULL;
1239         int i, count = 0;
1240
1241         if (IS_G4X(dev)) {
1242                 divisor = gen4_dpll;
1243                 count = ARRAY_SIZE(gen4_dpll);
1244         } else if (HAS_PCH_SPLIT(dev)) {
1245                 divisor = pch_dpll;
1246                 count = ARRAY_SIZE(pch_dpll);
1247         } else if (IS_CHERRYVIEW(dev)) {
1248                 divisor = chv_dpll;
1249                 count = ARRAY_SIZE(chv_dpll);
1250         } else if (IS_VALLEYVIEW(dev)) {
1251                 divisor = vlv_dpll;
1252                 count = ARRAY_SIZE(vlv_dpll);
1253         }
1254
1255         if (divisor && count) {
1256                 for (i = 0; i < count; i++) {
1257                         if (pipe_config->port_clock == divisor[i].clock) {
1258                                 pipe_config->dpll = divisor[i].dpll;
1259                                 pipe_config->clock_set = true;
1260                                 break;
1261                         }
1262                 }
1263         }
1264 }
1265
1266 static int intersect_rates(const int *source_rates, int source_len,
1267                            const int *sink_rates, int sink_len,
1268                            int *common_rates)
1269 {
1270         int i = 0, j = 0, k = 0;
1271
1272         while (i < source_len && j < sink_len) {
1273                 if (source_rates[i] == sink_rates[j]) {
1274                         if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
1275                                 return k;
1276                         common_rates[k] = source_rates[i];
1277                         ++k;
1278                         ++i;
1279                         ++j;
1280                 } else if (source_rates[i] < sink_rates[j]) {
1281                         ++i;
1282                 } else {
1283                         ++j;
1284                 }
1285         }
1286         return k;
1287 }
1288
1289 static int intel_dp_common_rates(struct intel_dp *intel_dp,
1290                                  int *common_rates)
1291 {
1292         const int *source_rates, *sink_rates;
1293         int source_len, sink_len;
1294
1295         sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1296         source_len = intel_dp_source_rates(intel_dp, &source_rates);
1297
1298         return intersect_rates(source_rates, source_len,
1299                                sink_rates, sink_len,
1300                                common_rates);
1301 }
1302
1303 static void snprintf_int_array(char *str, size_t len,
1304                                const int *array, int nelem)
1305 {
1306         int i;
1307
1308         str[0] = '\0';
1309
1310         for (i = 0; i < nelem; i++) {
1311                 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
1312                 if (r >= len)
1313                         return;
1314                 str += r;
1315                 len -= r;
1316         }
1317 }
1318
1319 static void intel_dp_print_rates(struct intel_dp *intel_dp)
1320 {
1321         const int *source_rates, *sink_rates;
1322         int source_len, sink_len, common_len;
1323         int common_rates[DP_MAX_SUPPORTED_RATES];
1324         char str[128]; /* FIXME: too big for stack? */
1325
1326         if ((drm_debug & DRM_UT_KMS) == 0)
1327                 return;
1328
1329         source_len = intel_dp_source_rates(intel_dp, &source_rates);
1330         snprintf_int_array(str, sizeof(str), source_rates, source_len);
1331         DRM_DEBUG_KMS("source rates: %s\n", str);
1332
1333         sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1334         snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
1335         DRM_DEBUG_KMS("sink rates: %s\n", str);
1336
1337         common_len = intel_dp_common_rates(intel_dp, common_rates);
1338         snprintf_int_array(str, sizeof(str), common_rates, common_len);
1339         DRM_DEBUG_KMS("common rates: %s\n", str);
1340 }
1341
1342 static int rate_to_index(int find, const int *rates)
1343 {
1344         int i = 0;
1345
1346         for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1347                 if (find == rates[i])
1348                         break;
1349
1350         return i;
1351 }
1352
1353 int
1354 intel_dp_max_link_rate(struct intel_dp *intel_dp)
1355 {
1356         int rates[DP_MAX_SUPPORTED_RATES] = {};
1357         int len;
1358
1359         len = intel_dp_common_rates(intel_dp, rates);
1360         if (WARN_ON(len <= 0))
1361                 return 162000;
1362
1363         return rates[rate_to_index(0, rates) - 1];
1364 }
1365
1366 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1367 {
1368         return rate_to_index(rate, intel_dp->sink_rates);
1369 }
1370
1371 void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
1372                            uint8_t *link_bw, uint8_t *rate_select)
1373 {
1374         if (intel_dp->num_sink_rates) {
1375                 *link_bw = 0;
1376                 *rate_select =
1377                         intel_dp_rate_select(intel_dp, port_clock);
1378         } else {
1379                 *link_bw = drm_dp_link_rate_to_bw_code(port_clock);
1380                 *rate_select = 0;
1381         }
1382 }
1383
1384 bool
1385 intel_dp_compute_config(struct intel_encoder *encoder,
1386                         struct intel_crtc_state *pipe_config)
1387 {
1388         struct drm_device *dev = encoder->base.dev;
1389         struct drm_i915_private *dev_priv = dev->dev_private;
1390         struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1391         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1392         enum port port = dp_to_dig_port(intel_dp)->port;
1393         struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
1394         struct intel_connector *intel_connector = intel_dp->attached_connector;
1395         int lane_count, clock;
1396         int min_lane_count = 1;
1397         int max_lane_count = intel_dp_max_lane_count(intel_dp);
1398         /* Conveniently, the link BW constants become indices with a shift...*/
1399         int min_clock = 0;
1400         int max_clock;
1401         int bpp, mode_rate;
1402         int link_avail, link_clock;
1403         int common_rates[DP_MAX_SUPPORTED_RATES] = {};
1404         int common_len;
1405         uint8_t link_bw, rate_select;
1406
1407         common_len = intel_dp_common_rates(intel_dp, common_rates);
1408
1409         /* No common link rates between source and sink */
1410         WARN_ON(common_len <= 0);
1411
1412         max_clock = common_len - 1;
1413
1414         if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
1415                 pipe_config->has_pch_encoder = true;
1416
1417         pipe_config->has_dp_encoder = true;
1418         pipe_config->has_drrs = false;
1419         pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
1420
1421         if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1422                 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1423                                        adjusted_mode);
1424
1425                 if (INTEL_INFO(dev)->gen >= 9) {
1426                         int ret;
1427                         ret = skl_update_scaler_crtc(pipe_config);
1428                         if (ret)
1429                                 return ret;
1430                 }
1431
1432                 if (HAS_GMCH_DISPLAY(dev))
1433                         intel_gmch_panel_fitting(intel_crtc, pipe_config,
1434                                                  intel_connector->panel.fitting_mode);
1435                 else
1436                         intel_pch_panel_fitting(intel_crtc, pipe_config,
1437                                                 intel_connector->panel.fitting_mode);
1438         }
1439
1440         if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
1441                 return false;
1442
1443         DRM_DEBUG_KMS("DP link computation with max lane count %i "
1444                       "max bw %d pixel clock %iKHz\n",
1445                       max_lane_count, common_rates[max_clock],
1446                       adjusted_mode->crtc_clock);
1447
1448         /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1449          * bpc in between. */
1450         bpp = pipe_config->pipe_bpp;
1451         if (is_edp(intel_dp)) {
1452
1453                 /* Get bpp from vbt only for panels that dont have bpp in edid */
1454                 if (intel_connector->base.display_info.bpc == 0 &&
1455                         (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp)) {
1456                         DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1457                                       dev_priv->vbt.edp_bpp);
1458                         bpp = dev_priv->vbt.edp_bpp;
1459                 }
1460
1461                 /*
1462                  * Use the maximum clock and number of lanes the eDP panel
1463                  * advertizes being capable of. The panels are generally
1464                  * designed to support only a single clock and lane
1465                  * configuration, and typically these values correspond to the
1466                  * native resolution of the panel.
1467                  */
1468                 min_lane_count = max_lane_count;
1469                 min_clock = max_clock;
1470         }
1471
1472         for (; bpp >= 6*3; bpp -= 2*3) {
1473                 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1474                                                    bpp);
1475
1476                 for (clock = min_clock; clock <= max_clock; clock++) {
1477                         for (lane_count = min_lane_count;
1478                                 lane_count <= max_lane_count;
1479                                 lane_count <<= 1) {
1480
1481                                 link_clock = common_rates[clock];
1482                                 link_avail = intel_dp_max_data_rate(link_clock,
1483                                                                     lane_count);
1484
1485                                 if (mode_rate <= link_avail) {
1486                                         goto found;
1487                                 }
1488                         }
1489                 }
1490         }
1491
1492         return false;
1493
1494 found:
1495         if (intel_dp->color_range_auto) {
1496                 /*
1497                  * See:
1498                  * CEA-861-E - 5.1 Default Encoding Parameters
1499                  * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1500                  */
1501                 pipe_config->limited_color_range =
1502                         bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1;
1503         } else {
1504                 pipe_config->limited_color_range =
1505                         intel_dp->limited_color_range;
1506         }
1507
1508         pipe_config->lane_count = lane_count;
1509
1510         pipe_config->pipe_bpp = bpp;
1511         pipe_config->port_clock = common_rates[clock];
1512
1513         intel_dp_compute_rate(intel_dp, pipe_config->port_clock,
1514                               &link_bw, &rate_select);
1515
1516         DRM_DEBUG_KMS("DP link bw %02x rate select %02x lane count %d clock %d bpp %d\n",
1517                       link_bw, rate_select, pipe_config->lane_count,
1518                       pipe_config->port_clock, bpp);
1519         DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1520                       mode_rate, link_avail);
1521
1522         intel_link_compute_m_n(bpp, lane_count,
1523                                adjusted_mode->crtc_clock,
1524                                pipe_config->port_clock,
1525                                &pipe_config->dp_m_n);
1526
1527         if (intel_connector->panel.downclock_mode != NULL &&
1528                 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
1529                         pipe_config->has_drrs = true;
1530                         intel_link_compute_m_n(bpp, lane_count,
1531                                 intel_connector->panel.downclock_mode->clock,
1532                                 pipe_config->port_clock,
1533                                 &pipe_config->dp_m2_n2);
1534         }
1535
1536         if ((IS_SKYLAKE(dev)  || IS_KABYLAKE(dev)) && is_edp(intel_dp))
1537                 skl_edp_set_pll_config(pipe_config);
1538         else if (IS_BROXTON(dev))
1539                 /* handled in ddi */;
1540         else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
1541                 hsw_dp_set_ddi_pll_sel(pipe_config);
1542         else
1543                 intel_dp_set_clock(encoder, pipe_config);
1544
1545         return true;
1546 }
1547
1548 static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
1549 {
1550         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1551         struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
1552         struct drm_device *dev = crtc->base.dev;
1553         struct drm_i915_private *dev_priv = dev->dev_private;
1554         u32 dpa_ctl;
1555
1556         DRM_DEBUG_KMS("eDP PLL enable for clock %d\n",
1557                       crtc->config->port_clock);
1558         dpa_ctl = I915_READ(DP_A);
1559         dpa_ctl &= ~DP_PLL_FREQ_MASK;
1560
1561         if (crtc->config->port_clock == 162000) {
1562                 dpa_ctl |= DP_PLL_FREQ_162MHZ;
1563                 intel_dp->DP |= DP_PLL_FREQ_162MHZ;
1564         } else {
1565                 dpa_ctl |= DP_PLL_FREQ_270MHZ;
1566                 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
1567         }
1568
1569         I915_WRITE(DP_A, dpa_ctl);
1570
1571         POSTING_READ(DP_A);
1572         udelay(500);
1573 }
1574
1575 void intel_dp_set_link_params(struct intel_dp *intel_dp,
1576                               const struct intel_crtc_state *pipe_config)
1577 {
1578         intel_dp->link_rate = pipe_config->port_clock;
1579         intel_dp->lane_count = pipe_config->lane_count;
1580 }
1581
1582 static void intel_dp_prepare(struct intel_encoder *encoder)
1583 {
1584         struct drm_device *dev = encoder->base.dev;
1585         struct drm_i915_private *dev_priv = dev->dev_private;
1586         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1587         enum port port = dp_to_dig_port(intel_dp)->port;
1588         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
1589         const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
1590
1591         intel_dp_set_link_params(intel_dp, crtc->config);
1592
1593         /*
1594          * There are four kinds of DP registers:
1595          *
1596          *      IBX PCH
1597          *      SNB CPU
1598          *      IVB CPU
1599          *      CPT PCH
1600          *
1601          * IBX PCH and CPU are the same for almost everything,
1602          * except that the CPU DP PLL is configured in this
1603          * register
1604          *
1605          * CPT PCH is quite different, having many bits moved
1606          * to the TRANS_DP_CTL register instead. That
1607          * configuration happens (oddly) in ironlake_pch_enable
1608          */
1609
1610         /* Preserve the BIOS-computed detected bit. This is
1611          * supposed to be read-only.
1612          */
1613         intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
1614
1615         /* Handle DP bits in common between all three register formats */
1616         intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
1617         intel_dp->DP |= DP_PORT_WIDTH(crtc->config->lane_count);
1618
1619         if (crtc->config->has_audio)
1620                 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
1621
1622         /* Split out the IBX/CPU vs CPT settings */
1623
1624         if (IS_GEN7(dev) && port == PORT_A) {
1625                 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1626                         intel_dp->DP |= DP_SYNC_HS_HIGH;
1627                 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1628                         intel_dp->DP |= DP_SYNC_VS_HIGH;
1629                 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1630
1631                 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1632                         intel_dp->DP |= DP_ENHANCED_FRAMING;
1633
1634                 intel_dp->DP |= crtc->pipe << 29;
1635         } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
1636                 u32 trans_dp;
1637
1638                 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1639
1640                 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
1641                 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1642                         trans_dp |= TRANS_DP_ENH_FRAMING;
1643                 else
1644                         trans_dp &= ~TRANS_DP_ENH_FRAMING;
1645                 I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
1646         } else {
1647                 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
1648                     crtc->config->limited_color_range)
1649                         intel_dp->DP |= DP_COLOR_RANGE_16_235;
1650
1651                 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1652                         intel_dp->DP |= DP_SYNC_HS_HIGH;
1653                 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1654                         intel_dp->DP |= DP_SYNC_VS_HIGH;
1655                 intel_dp->DP |= DP_LINK_TRAIN_OFF;
1656
1657                 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1658                         intel_dp->DP |= DP_ENHANCED_FRAMING;
1659
1660                 if (IS_CHERRYVIEW(dev))
1661                         intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
1662                 else if (crtc->pipe == PIPE_B)
1663                         intel_dp->DP |= DP_PIPEB_SELECT;
1664         }
1665 }
1666
1667 #define IDLE_ON_MASK            (PP_ON | PP_SEQUENCE_MASK | 0                     | PP_SEQUENCE_STATE_MASK)
1668 #define IDLE_ON_VALUE           (PP_ON | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_ON_IDLE)
1669
1670 #define IDLE_OFF_MASK           (PP_ON | PP_SEQUENCE_MASK | 0                     | 0)
1671 #define IDLE_OFF_VALUE          (0     | PP_SEQUENCE_NONE | 0                     | 0)
1672
1673 #define IDLE_CYCLE_MASK         (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1674 #define IDLE_CYCLE_VALUE        (0     | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_OFF_IDLE)
1675
1676 static void wait_panel_status(struct intel_dp *intel_dp,
1677                                        u32 mask,
1678                                        u32 value)
1679 {
1680         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1681         struct drm_i915_private *dev_priv = dev->dev_private;
1682         u32 pp_stat_reg, pp_ctrl_reg;
1683
1684         lockdep_assert_held(&dev_priv->pps_mutex);
1685
1686         pp_stat_reg = _pp_stat_reg(intel_dp);
1687         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1688
1689         DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
1690                         mask, value,
1691                         I915_READ(pp_stat_reg),
1692                         I915_READ(pp_ctrl_reg));
1693
1694         if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
1695                 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
1696                                 I915_READ(pp_stat_reg),
1697                                 I915_READ(pp_ctrl_reg));
1698         }
1699
1700         DRM_DEBUG_KMS("Wait complete\n");
1701 }
1702
1703 static void wait_panel_on(struct intel_dp *intel_dp)
1704 {
1705         DRM_DEBUG_KMS("Wait for panel power on\n");
1706         wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
1707 }
1708
1709 static void wait_panel_off(struct intel_dp *intel_dp)
1710 {
1711         DRM_DEBUG_KMS("Wait for panel power off time\n");
1712         wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
1713 }
1714
1715 static void wait_panel_power_cycle(struct intel_dp *intel_dp)
1716 {
1717         DRM_DEBUG_KMS("Wait for panel power cycle\n");
1718
1719         /* When we disable the VDD override bit last we have to do the manual
1720          * wait. */
1721         wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
1722                                        intel_dp->panel_power_cycle_delay);
1723
1724         wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
1725 }
1726
1727 static void wait_backlight_on(struct intel_dp *intel_dp)
1728 {
1729         wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1730                                        intel_dp->backlight_on_delay);
1731 }
1732
1733 static void edp_wait_backlight_off(struct intel_dp *intel_dp)
1734 {
1735         wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1736                                        intel_dp->backlight_off_delay);
1737 }
1738
1739 /* Read the current pp_control value, unlocking the register if it
1740  * is locked
1741  */
1742
1743 static  u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
1744 {
1745         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1746         struct drm_i915_private *dev_priv = dev->dev_private;
1747         u32 control;
1748
1749         lockdep_assert_held(&dev_priv->pps_mutex);
1750
1751         control = I915_READ(_pp_ctrl_reg(intel_dp));
1752         if (!IS_BROXTON(dev)) {
1753                 control &= ~PANEL_UNLOCK_MASK;
1754                 control |= PANEL_UNLOCK_REGS;
1755         }
1756         return control;
1757 }
1758
1759 /*
1760  * Must be paired with edp_panel_vdd_off().
1761  * Must hold pps_mutex around the whole on/off sequence.
1762  * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1763  */
1764 static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
1765 {
1766         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1767         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1768         struct intel_encoder *intel_encoder = &intel_dig_port->base;
1769         struct drm_i915_private *dev_priv = dev->dev_private;
1770         enum intel_display_power_domain power_domain;
1771         u32 pp;
1772         u32 pp_stat_reg, pp_ctrl_reg;
1773         bool need_to_disable = !intel_dp->want_panel_vdd;
1774
1775         lockdep_assert_held(&dev_priv->pps_mutex);
1776
1777         if (!is_edp(intel_dp))
1778                 return false;
1779
1780         cancel_delayed_work(&intel_dp->panel_vdd_work);
1781         intel_dp->want_panel_vdd = true;
1782
1783         if (edp_have_panel_vdd(intel_dp))
1784                 return need_to_disable;
1785
1786         power_domain = intel_display_port_power_domain(intel_encoder);
1787         intel_display_power_get(dev_priv, power_domain);
1788
1789         DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1790                       port_name(intel_dig_port->port));
1791
1792         if (!edp_have_panel_power(intel_dp))
1793                 wait_panel_power_cycle(intel_dp);
1794
1795         pp = ironlake_get_pp_control(intel_dp);
1796         pp |= EDP_FORCE_VDD;
1797
1798         pp_stat_reg = _pp_stat_reg(intel_dp);
1799         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1800
1801         I915_WRITE(pp_ctrl_reg, pp);
1802         POSTING_READ(pp_ctrl_reg);
1803         DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1804                         I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1805         /*
1806          * If the panel wasn't on, delay before accessing aux channel
1807          */
1808         if (!edp_have_panel_power(intel_dp)) {
1809                 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1810                               port_name(intel_dig_port->port));
1811                 msleep(intel_dp->panel_power_up_delay);
1812         }
1813
1814         return need_to_disable;
1815 }
1816
1817 /*
1818  * Must be paired with intel_edp_panel_vdd_off() or
1819  * intel_edp_panel_off().
1820  * Nested calls to these functions are not allowed since
1821  * we drop the lock. Caller must use some higher level
1822  * locking to prevent nested calls from other threads.
1823  */
1824 void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
1825 {
1826         bool vdd;
1827
1828         if (!is_edp(intel_dp))
1829                 return;
1830
1831         pps_lock(intel_dp);
1832         vdd = edp_panel_vdd_on(intel_dp);
1833         pps_unlock(intel_dp);
1834
1835         I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
1836              port_name(dp_to_dig_port(intel_dp)->port));
1837 }
1838
1839 static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
1840 {
1841         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1842         struct drm_i915_private *dev_priv = dev->dev_private;
1843         struct intel_digital_port *intel_dig_port =
1844                 dp_to_dig_port(intel_dp);
1845         struct intel_encoder *intel_encoder = &intel_dig_port->base;
1846         enum intel_display_power_domain power_domain;
1847         u32 pp;
1848         u32 pp_stat_reg, pp_ctrl_reg;
1849
1850         lockdep_assert_held(&dev_priv->pps_mutex);
1851
1852         WARN_ON(intel_dp->want_panel_vdd);
1853
1854         if (!edp_have_panel_vdd(intel_dp))
1855                 return;
1856
1857         DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1858                       port_name(intel_dig_port->port));
1859
1860         pp = ironlake_get_pp_control(intel_dp);
1861         pp &= ~EDP_FORCE_VDD;
1862
1863         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1864         pp_stat_reg = _pp_stat_reg(intel_dp);
1865
1866         I915_WRITE(pp_ctrl_reg, pp);
1867         POSTING_READ(pp_ctrl_reg);
1868
1869         /* Make sure sequencer is idle before allowing subsequent activity */
1870         DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1871         I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1872
1873         if ((pp & POWER_TARGET_ON) == 0)
1874                 intel_dp->last_power_cycle = jiffies;
1875
1876         power_domain = intel_display_port_power_domain(intel_encoder);
1877         intel_display_power_put(dev_priv, power_domain);
1878 }
1879
1880 static void edp_panel_vdd_work(struct work_struct *__work)
1881 {
1882         struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1883                                                  struct intel_dp, panel_vdd_work);
1884
1885         pps_lock(intel_dp);
1886         if (!intel_dp->want_panel_vdd)
1887                 edp_panel_vdd_off_sync(intel_dp);
1888         pps_unlock(intel_dp);
1889 }
1890
1891 static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
1892 {
1893         unsigned long delay;
1894
1895         /*
1896          * Queue the timer to fire a long time from now (relative to the power
1897          * down delay) to keep the panel power up across a sequence of
1898          * operations.
1899          */
1900         delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
1901         schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
1902 }
1903
1904 /*
1905  * Must be paired with edp_panel_vdd_on().
1906  * Must hold pps_mutex around the whole on/off sequence.
1907  * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1908  */
1909 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
1910 {
1911         struct drm_i915_private *dev_priv =
1912                 intel_dp_to_dev(intel_dp)->dev_private;
1913
1914         lockdep_assert_held(&dev_priv->pps_mutex);
1915
1916         if (!is_edp(intel_dp))
1917                 return;
1918
1919         I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
1920              port_name(dp_to_dig_port(intel_dp)->port));
1921
1922         intel_dp->want_panel_vdd = false;
1923
1924         if (sync)
1925                 edp_panel_vdd_off_sync(intel_dp);
1926         else
1927                 edp_panel_vdd_schedule_off(intel_dp);
1928 }
1929
1930 static void edp_panel_on(struct intel_dp *intel_dp)
1931 {
1932         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1933         struct drm_i915_private *dev_priv = dev->dev_private;
1934         u32 pp;
1935         u32 pp_ctrl_reg;
1936
1937         lockdep_assert_held(&dev_priv->pps_mutex);
1938
1939         if (!is_edp(intel_dp))
1940                 return;
1941
1942         DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
1943                       port_name(dp_to_dig_port(intel_dp)->port));
1944
1945         if (WARN(edp_have_panel_power(intel_dp),
1946                  "eDP port %c panel power already on\n",
1947                  port_name(dp_to_dig_port(intel_dp)->port)))
1948                 return;
1949
1950         wait_panel_power_cycle(intel_dp);
1951
1952         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1953         pp = ironlake_get_pp_control(intel_dp);
1954         if (IS_GEN5(dev)) {
1955                 /* ILK workaround: disable reset around power sequence */
1956                 pp &= ~PANEL_POWER_RESET;
1957                 I915_WRITE(pp_ctrl_reg, pp);
1958                 POSTING_READ(pp_ctrl_reg);
1959         }
1960
1961         pp |= POWER_TARGET_ON;
1962         if (!IS_GEN5(dev))
1963                 pp |= PANEL_POWER_RESET;
1964
1965         I915_WRITE(pp_ctrl_reg, pp);
1966         POSTING_READ(pp_ctrl_reg);
1967
1968         wait_panel_on(intel_dp);
1969         intel_dp->last_power_on = jiffies;
1970
1971         if (IS_GEN5(dev)) {
1972                 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
1973                 I915_WRITE(pp_ctrl_reg, pp);
1974                 POSTING_READ(pp_ctrl_reg);
1975         }
1976 }
1977
1978 void intel_edp_panel_on(struct intel_dp *intel_dp)
1979 {
1980         if (!is_edp(intel_dp))
1981                 return;
1982
1983         pps_lock(intel_dp);
1984         edp_panel_on(intel_dp);
1985         pps_unlock(intel_dp);
1986 }
1987
1988
1989 static void edp_panel_off(struct intel_dp *intel_dp)
1990 {
1991         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1992         struct intel_encoder *intel_encoder = &intel_dig_port->base;
1993         struct drm_device *dev = intel_dp_to_dev(intel_dp);
1994         struct drm_i915_private *dev_priv = dev->dev_private;
1995         enum intel_display_power_domain power_domain;
1996         u32 pp;
1997         u32 pp_ctrl_reg;
1998
1999         lockdep_assert_held(&dev_priv->pps_mutex);
2000
2001         if (!is_edp(intel_dp))
2002                 return;
2003
2004         DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
2005                       port_name(dp_to_dig_port(intel_dp)->port));
2006
2007         WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
2008              port_name(dp_to_dig_port(intel_dp)->port));
2009
2010         pp = ironlake_get_pp_control(intel_dp);
2011         /* We need to switch off panel power _and_ force vdd, for otherwise some
2012          * panels get very unhappy and cease to work. */
2013         pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
2014                 EDP_BLC_ENABLE);
2015
2016         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2017
2018         intel_dp->want_panel_vdd = false;
2019
2020         I915_WRITE(pp_ctrl_reg, pp);
2021         POSTING_READ(pp_ctrl_reg);
2022
2023         intel_dp->last_power_cycle = jiffies;
2024         wait_panel_off(intel_dp);
2025
2026         /* We got a reference when we enabled the VDD. */
2027         power_domain = intel_display_port_power_domain(intel_encoder);
2028         intel_display_power_put(dev_priv, power_domain);
2029 }
2030
2031 void intel_edp_panel_off(struct intel_dp *intel_dp)
2032 {
2033         if (!is_edp(intel_dp))
2034                 return;
2035
2036         pps_lock(intel_dp);
2037         edp_panel_off(intel_dp);
2038         pps_unlock(intel_dp);
2039 }
2040
2041 /* Enable backlight in the panel power control. */
2042 static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
2043 {
2044         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2045         struct drm_device *dev = intel_dig_port->base.base.dev;
2046         struct drm_i915_private *dev_priv = dev->dev_private;
2047         u32 pp;
2048         u32 pp_ctrl_reg;
2049
2050         /*
2051          * If we enable the backlight right away following a panel power
2052          * on, we may see slight flicker as the panel syncs with the eDP
2053          * link.  So delay a bit to make sure the image is solid before
2054          * allowing it to appear.
2055          */
2056         wait_backlight_on(intel_dp);
2057
2058         pps_lock(intel_dp);
2059
2060         pp = ironlake_get_pp_control(intel_dp);
2061         pp |= EDP_BLC_ENABLE;
2062
2063         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2064
2065         I915_WRITE(pp_ctrl_reg, pp);
2066         POSTING_READ(pp_ctrl_reg);
2067
2068         pps_unlock(intel_dp);
2069 }
2070
2071 /* Enable backlight PWM and backlight PP control. */
2072 void intel_edp_backlight_on(struct intel_dp *intel_dp)
2073 {
2074         if (!is_edp(intel_dp))
2075                 return;
2076
2077         DRM_DEBUG_KMS("\n");
2078
2079         intel_panel_enable_backlight(intel_dp->attached_connector);
2080         _intel_edp_backlight_on(intel_dp);
2081 }
2082
2083 /* Disable backlight in the panel power control. */
2084 static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
2085 {
2086         struct drm_device *dev = intel_dp_to_dev(intel_dp);
2087         struct drm_i915_private *dev_priv = dev->dev_private;
2088         u32 pp;
2089         u32 pp_ctrl_reg;
2090
2091         if (!is_edp(intel_dp))
2092                 return;
2093
2094         pps_lock(intel_dp);
2095
2096         pp = ironlake_get_pp_control(intel_dp);
2097         pp &= ~EDP_BLC_ENABLE;
2098
2099         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2100
2101         I915_WRITE(pp_ctrl_reg, pp);
2102         POSTING_READ(pp_ctrl_reg);
2103
2104         pps_unlock(intel_dp);
2105
2106         intel_dp->last_backlight_off = jiffies;
2107         edp_wait_backlight_off(intel_dp);
2108 }
2109
2110 /* Disable backlight PP control and backlight PWM. */
2111 void intel_edp_backlight_off(struct intel_dp *intel_dp)
2112 {
2113         if (!is_edp(intel_dp))
2114                 return;
2115
2116         DRM_DEBUG_KMS("\n");
2117
2118         _intel_edp_backlight_off(intel_dp);
2119         intel_panel_disable_backlight(intel_dp->attached_connector);
2120 }
2121
2122 /*
2123  * Hook for controlling the panel power control backlight through the bl_power
2124  * sysfs attribute. Take care to handle multiple calls.
2125  */
2126 static void intel_edp_backlight_power(struct intel_connector *connector,
2127                                       bool enable)
2128 {
2129         struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
2130         bool is_enabled;
2131
2132         pps_lock(intel_dp);
2133         is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
2134         pps_unlock(intel_dp);
2135
2136         if (is_enabled == enable)
2137                 return;
2138
2139         DRM_DEBUG_KMS("panel power control backlight %s\n",
2140                       enable ? "enable" : "disable");
2141
2142         if (enable)
2143                 _intel_edp_backlight_on(intel_dp);
2144         else
2145                 _intel_edp_backlight_off(intel_dp);
2146 }
2147
2148 static const char *state_string(bool enabled)
2149 {
2150         return enabled ? "on" : "off";
2151 }
2152
2153 static void assert_dp_port(struct intel_dp *intel_dp, bool state)
2154 {
2155         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2156         struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
2157         bool cur_state = I915_READ(intel_dp->output_reg) & DP_PORT_EN;
2158
2159         I915_STATE_WARN(cur_state != state,
2160                         "DP port %c state assertion failure (expected %s, current %s)\n",
2161                         port_name(dig_port->port),
2162                         state_string(state), state_string(cur_state));
2163 }
2164 #define assert_dp_port_disabled(d) assert_dp_port((d), false)
2165
2166 static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
2167 {
2168         bool cur_state = I915_READ(DP_A) & DP_PLL_ENABLE;
2169
2170         I915_STATE_WARN(cur_state != state,
2171                         "eDP PLL state assertion failure (expected %s, current %s)\n",
2172                         state_string(state), state_string(cur_state));
2173 }
2174 #define assert_edp_pll_enabled(d) assert_edp_pll((d), true)
2175 #define assert_edp_pll_disabled(d) assert_edp_pll((d), false)
2176
2177 static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
2178 {
2179         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2180         struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
2181         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2182         u32 dpa_ctl;
2183
2184         assert_pipe_disabled(dev_priv, crtc->pipe);
2185         assert_dp_port_disabled(intel_dp);
2186         assert_edp_pll_disabled(dev_priv);
2187
2188         DRM_DEBUG_KMS("\n");
2189         dpa_ctl = I915_READ(DP_A);
2190
2191         /* We don't adjust intel_dp->DP while tearing down the link, to
2192          * facilitate link retraining (e.g. after hotplug). Hence clear all
2193          * enable bits here to ensure that we don't enable too much. */
2194         intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
2195         intel_dp->DP |= DP_PLL_ENABLE;
2196         I915_WRITE(DP_A, intel_dp->DP);
2197         POSTING_READ(DP_A);
2198         udelay(200);
2199 }
2200
2201 static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
2202 {
2203         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2204         struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
2205         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2206         u32 dpa_ctl;
2207
2208         assert_pipe_disabled(dev_priv, crtc->pipe);
2209         assert_dp_port_disabled(intel_dp);
2210         assert_edp_pll_enabled(dev_priv);
2211
2212         dpa_ctl = I915_READ(DP_A);
2213
2214         /* We can't rely on the value tracked for the DP register in
2215          * intel_dp->DP because link_down must not change that (otherwise link
2216          * re-training will fail. */
2217         dpa_ctl &= ~DP_PLL_ENABLE;
2218         I915_WRITE(DP_A, dpa_ctl);
2219         POSTING_READ(DP_A);
2220         udelay(200);
2221 }
2222
2223 /* If the sink supports it, try to set the power state appropriately */
2224 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
2225 {
2226         int ret, i;
2227
2228         /* Should have a valid DPCD by this point */
2229         if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2230                 return;
2231
2232         if (mode != DRM_MODE_DPMS_ON) {
2233                 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2234                                          DP_SET_POWER_D3);
2235         } else {
2236                 /*
2237                  * When turning on, we need to retry for 1ms to give the sink
2238                  * time to wake up.
2239                  */
2240                 for (i = 0; i < 3; i++) {
2241                         ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2242                                                  DP_SET_POWER_D0);
2243                         if (ret == 1)
2244                                 break;
2245                         msleep(1);
2246                 }
2247         }
2248
2249         if (ret != 1)
2250                 DRM_DEBUG_KMS("failed to %s sink power state\n",
2251                               mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
2252 }
2253
2254 static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2255                                   enum pipe *pipe)
2256 {
2257         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2258         enum port port = dp_to_dig_port(intel_dp)->port;
2259         struct drm_device *dev = encoder->base.dev;
2260         struct drm_i915_private *dev_priv = dev->dev_private;
2261         enum intel_display_power_domain power_domain;
2262         u32 tmp;
2263
2264         power_domain = intel_display_port_power_domain(encoder);
2265         if (!intel_display_power_is_enabled(dev_priv, power_domain))
2266                 return false;
2267
2268         tmp = I915_READ(intel_dp->output_reg);
2269
2270         if (!(tmp & DP_PORT_EN))
2271                 return false;
2272
2273         if (IS_GEN7(dev) && port == PORT_A) {
2274                 *pipe = PORT_TO_PIPE_CPT(tmp);
2275         } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
2276                 enum pipe p;
2277
2278                 for_each_pipe(dev_priv, p) {
2279                         u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
2280                         if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
2281                                 *pipe = p;
2282                                 return true;
2283                         }
2284                 }
2285
2286                 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2287                               intel_dp->output_reg);
2288         } else if (IS_CHERRYVIEW(dev)) {
2289                 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
2290         } else {
2291                 *pipe = PORT_TO_PIPE(tmp);
2292         }
2293
2294         return true;
2295 }
2296
2297 static void intel_dp_get_config(struct intel_encoder *encoder,
2298                                 struct intel_crtc_state *pipe_config)
2299 {
2300         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2301         u32 tmp, flags = 0;
2302         struct drm_device *dev = encoder->base.dev;
2303         struct drm_i915_private *dev_priv = dev->dev_private;
2304         enum port port = dp_to_dig_port(intel_dp)->port;
2305         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2306         int dotclock;
2307
2308         tmp = I915_READ(intel_dp->output_reg);
2309
2310         pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
2311
2312         if (HAS_PCH_CPT(dev) && port != PORT_A) {
2313                 u32 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2314
2315                 if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
2316                         flags |= DRM_MODE_FLAG_PHSYNC;
2317                 else
2318                         flags |= DRM_MODE_FLAG_NHSYNC;
2319
2320                 if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH)
2321                         flags |= DRM_MODE_FLAG_PVSYNC;
2322                 else
2323                         flags |= DRM_MODE_FLAG_NVSYNC;
2324         } else {
2325                 if (tmp & DP_SYNC_HS_HIGH)
2326                         flags |= DRM_MODE_FLAG_PHSYNC;
2327                 else
2328                         flags |= DRM_MODE_FLAG_NHSYNC;
2329
2330                 if (tmp & DP_SYNC_VS_HIGH)
2331                         flags |= DRM_MODE_FLAG_PVSYNC;
2332                 else
2333                         flags |= DRM_MODE_FLAG_NVSYNC;
2334         }
2335
2336         pipe_config->base.adjusted_mode.flags |= flags;
2337
2338         if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
2339             tmp & DP_COLOR_RANGE_16_235)
2340                 pipe_config->limited_color_range = true;
2341
2342         pipe_config->has_dp_encoder = true;
2343
2344         pipe_config->lane_count =
2345                 ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
2346
2347         intel_dp_get_m_n(crtc, pipe_config);
2348
2349         if (port == PORT_A) {
2350                 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ)
2351                         pipe_config->port_clock = 162000;
2352                 else
2353                         pipe_config->port_clock = 270000;
2354         }
2355
2356         dotclock = intel_dotclock_calculate(pipe_config->port_clock,
2357                                             &pipe_config->dp_m_n);
2358
2359         if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
2360                 ironlake_check_encoder_dotclock(pipe_config, dotclock);
2361
2362         pipe_config->base.adjusted_mode.crtc_clock = dotclock;
2363
2364         if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
2365             pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
2366                 /*
2367                  * This is a big fat ugly hack.
2368                  *
2369                  * Some machines in UEFI boot mode provide us a VBT that has 18
2370                  * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2371                  * unknown we fail to light up. Yet the same BIOS boots up with
2372                  * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2373                  * max, not what it tells us to use.
2374                  *
2375                  * Note: This will still be broken if the eDP panel is not lit
2376                  * up by the BIOS, and thus we can't get the mode at module
2377                  * load.
2378                  */
2379                 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2380                               pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
2381                 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
2382         }
2383 }
2384
2385 static void intel_disable_dp(struct intel_encoder *encoder)
2386 {
2387         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2388         struct drm_device *dev = encoder->base.dev;
2389         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2390
2391         if (crtc->config->has_audio)
2392                 intel_audio_codec_disable(encoder);
2393
2394         if (HAS_PSR(dev) && !HAS_DDI(dev))
2395                 intel_psr_disable(intel_dp);
2396
2397         /* Make sure the panel is off before trying to change the mode. But also
2398          * ensure that we have vdd while we switch off the panel. */
2399         intel_edp_panel_vdd_on(intel_dp);
2400         intel_edp_backlight_off(intel_dp);
2401         intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
2402         intel_edp_panel_off(intel_dp);
2403
2404         /* disable the port before the pipe on g4x */
2405         if (INTEL_INFO(dev)->gen < 5)
2406                 intel_dp_link_down(intel_dp);
2407 }
2408
2409 static void ilk_post_disable_dp(struct intel_encoder *encoder)
2410 {
2411         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2412         enum port port = dp_to_dig_port(intel_dp)->port;
2413
2414         intel_dp_link_down(intel_dp);
2415         if (port == PORT_A)
2416                 ironlake_edp_pll_off(intel_dp);
2417 }
2418
2419 static void vlv_post_disable_dp(struct intel_encoder *encoder)
2420 {
2421         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2422
2423         intel_dp_link_down(intel_dp);
2424 }
2425
2426 static void chv_data_lane_soft_reset(struct intel_encoder *encoder,
2427                                      bool reset)
2428 {
2429         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2430         enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
2431         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2432         enum pipe pipe = crtc->pipe;
2433         uint32_t val;
2434
2435         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2436         if (reset)
2437                 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2438         else
2439                 val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
2440         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2441
2442         if (crtc->config->lane_count > 2) {
2443                 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2444                 if (reset)
2445                         val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2446                 else
2447                         val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
2448                 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2449         }
2450
2451         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2452         val |= CHV_PCS_REQ_SOFTRESET_EN;
2453         if (reset)
2454                 val &= ~DPIO_PCS_CLK_SOFT_RESET;
2455         else
2456                 val |= DPIO_PCS_CLK_SOFT_RESET;
2457         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2458
2459         if (crtc->config->lane_count > 2) {
2460                 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2461                 val |= CHV_PCS_REQ_SOFTRESET_EN;
2462                 if (reset)
2463                         val &= ~DPIO_PCS_CLK_SOFT_RESET;
2464                 else
2465                         val |= DPIO_PCS_CLK_SOFT_RESET;
2466                 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2467         }
2468 }
2469
2470 static void chv_post_disable_dp(struct intel_encoder *encoder)
2471 {
2472         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2473         struct drm_device *dev = encoder->base.dev;
2474         struct drm_i915_private *dev_priv = dev->dev_private;
2475
2476         intel_dp_link_down(intel_dp);
2477
2478         mutex_lock(&dev_priv->sb_lock);
2479
2480         /* Assert data lane reset */
2481         chv_data_lane_soft_reset(encoder, true);
2482
2483         mutex_unlock(&dev_priv->sb_lock);
2484 }
2485
2486 static void
2487 _intel_dp_set_link_train(struct intel_dp *intel_dp,
2488                          uint32_t *DP,
2489                          uint8_t dp_train_pat)
2490 {
2491         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2492         struct drm_device *dev = intel_dig_port->base.base.dev;
2493         struct drm_i915_private *dev_priv = dev->dev_private;
2494         enum port port = intel_dig_port->port;
2495
2496         if (HAS_DDI(dev)) {
2497                 uint32_t temp = I915_READ(DP_TP_CTL(port));
2498
2499                 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2500                         temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2501                 else
2502                         temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2503
2504                 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2505                 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2506                 case DP_TRAINING_PATTERN_DISABLE:
2507                         temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2508
2509                         break;
2510                 case DP_TRAINING_PATTERN_1:
2511                         temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2512                         break;
2513                 case DP_TRAINING_PATTERN_2:
2514                         temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2515                         break;
2516                 case DP_TRAINING_PATTERN_3:
2517                         temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2518                         break;
2519                 }
2520                 I915_WRITE(DP_TP_CTL(port), temp);
2521
2522         } else if ((IS_GEN7(dev) && port == PORT_A) ||
2523                    (HAS_PCH_CPT(dev) && port != PORT_A)) {
2524                 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2525
2526                 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2527                 case DP_TRAINING_PATTERN_DISABLE:
2528                         *DP |= DP_LINK_TRAIN_OFF_CPT;
2529                         break;
2530                 case DP_TRAINING_PATTERN_1:
2531                         *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2532                         break;
2533                 case DP_TRAINING_PATTERN_2:
2534                         *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2535                         break;
2536                 case DP_TRAINING_PATTERN_3:
2537                         DRM_ERROR("DP training pattern 3 not supported\n");
2538                         *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2539                         break;
2540                 }
2541
2542         } else {
2543                 if (IS_CHERRYVIEW(dev))
2544                         *DP &= ~DP_LINK_TRAIN_MASK_CHV;
2545                 else
2546                         *DP &= ~DP_LINK_TRAIN_MASK;
2547
2548                 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2549                 case DP_TRAINING_PATTERN_DISABLE:
2550                         *DP |= DP_LINK_TRAIN_OFF;
2551                         break;
2552                 case DP_TRAINING_PATTERN_1:
2553                         *DP |= DP_LINK_TRAIN_PAT_1;
2554                         break;
2555                 case DP_TRAINING_PATTERN_2:
2556                         *DP |= DP_LINK_TRAIN_PAT_2;
2557                         break;
2558                 case DP_TRAINING_PATTERN_3:
2559                         if (IS_CHERRYVIEW(dev)) {
2560                                 *DP |= DP_LINK_TRAIN_PAT_3_CHV;
2561                         } else {
2562                                 DRM_ERROR("DP training pattern 3 not supported\n");
2563                                 *DP |= DP_LINK_TRAIN_PAT_2;
2564                         }
2565                         break;
2566                 }
2567         }
2568 }
2569
2570 static void intel_dp_enable_port(struct intel_dp *intel_dp)
2571 {
2572         struct drm_device *dev = intel_dp_to_dev(intel_dp);
2573         struct drm_i915_private *dev_priv = dev->dev_private;
2574
2575         /* enable with pattern 1 (as per spec) */
2576         _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2577                                  DP_TRAINING_PATTERN_1);
2578
2579         I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2580         POSTING_READ(intel_dp->output_reg);
2581
2582         /*
2583          * Magic for VLV/CHV. We _must_ first set up the register
2584          * without actually enabling the port, and then do another
2585          * write to enable the port. Otherwise link training will
2586          * fail when the power sequencer is freshly used for this port.
2587          */
2588         intel_dp->DP |= DP_PORT_EN;
2589
2590         I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2591         POSTING_READ(intel_dp->output_reg);
2592 }
2593
2594 static void intel_enable_dp(struct intel_encoder *encoder)
2595 {
2596         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2597         struct drm_device *dev = encoder->base.dev;
2598         struct drm_i915_private *dev_priv = dev->dev_private;
2599         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2600         uint32_t dp_reg = I915_READ(intel_dp->output_reg);
2601         enum port port = dp_to_dig_port(intel_dp)->port;
2602         enum pipe pipe = crtc->pipe;
2603
2604         if (WARN_ON(dp_reg & DP_PORT_EN))
2605                 return;
2606
2607         pps_lock(intel_dp);
2608
2609         if (IS_VALLEYVIEW(dev))
2610                 vlv_init_panel_power_sequencer(intel_dp);
2611
2612         intel_dp_enable_port(intel_dp);
2613
2614         if (port == PORT_A && IS_GEN5(dev_priv)) {
2615                 /*
2616                  * Underrun reporting for the other pipe was disabled in
2617                  * g4x_pre_enable_dp(). The eDP PLL and port have now been
2618                  * enabled, so it's now safe to re-enable underrun reporting.
2619                  */
2620                 intel_wait_for_vblank_if_active(dev_priv->dev, !pipe);
2621                 intel_set_cpu_fifo_underrun_reporting(dev_priv, !pipe, true);
2622                 intel_set_pch_fifo_underrun_reporting(dev_priv, !pipe, true);
2623         }
2624
2625         edp_panel_vdd_on(intel_dp);
2626         edp_panel_on(intel_dp);
2627         edp_panel_vdd_off(intel_dp, true);
2628
2629         pps_unlock(intel_dp);
2630
2631         if (IS_VALLEYVIEW(dev)) {
2632                 unsigned int lane_mask = 0x0;
2633
2634                 if (IS_CHERRYVIEW(dev))
2635                         lane_mask = intel_dp_unused_lane_mask(crtc->config->lane_count);
2636
2637                 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
2638                                     lane_mask);
2639         }
2640
2641         intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
2642         intel_dp_start_link_train(intel_dp);
2643         intel_dp_stop_link_train(intel_dp);
2644
2645         if (crtc->config->has_audio) {
2646                 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2647                                  pipe_name(pipe));
2648                 intel_audio_codec_enable(encoder);
2649         }
2650 }
2651
2652 static void g4x_enable_dp(struct intel_encoder *encoder)
2653 {
2654         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2655
2656         intel_enable_dp(encoder);
2657         intel_edp_backlight_on(intel_dp);
2658 }
2659
2660 static void vlv_enable_dp(struct intel_encoder *encoder)
2661 {
2662         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2663
2664         intel_edp_backlight_on(intel_dp);
2665         intel_psr_enable(intel_dp);
2666 }
2667
2668 static void g4x_pre_enable_dp(struct intel_encoder *encoder)
2669 {
2670         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2671         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2672         enum port port = dp_to_dig_port(intel_dp)->port;
2673         enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
2674
2675         intel_dp_prepare(encoder);
2676
2677         if (port == PORT_A && IS_GEN5(dev_priv)) {
2678                 /*
2679                  * We get FIFO underruns on the other pipe when
2680                  * enabling the CPU eDP PLL, and when enabling CPU
2681                  * eDP port. We could potentially avoid the PLL
2682                  * underrun with a vblank wait just prior to enabling
2683                  * the PLL, but that doesn't appear to help the port
2684                  * enable case. Just sweep it all under the rug.
2685                  */
2686                 intel_set_cpu_fifo_underrun_reporting(dev_priv, !pipe, false);
2687                 intel_set_pch_fifo_underrun_reporting(dev_priv, !pipe, false);
2688         }
2689
2690         /* Only ilk+ has port A */
2691         if (port == PORT_A) {
2692                 ironlake_set_pll_cpu_edp(intel_dp);
2693                 ironlake_edp_pll_on(intel_dp);
2694         }
2695 }
2696
2697 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2698 {
2699         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2700         struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2701         enum pipe pipe = intel_dp->pps_pipe;
2702         int pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
2703
2704         edp_panel_vdd_off_sync(intel_dp);
2705
2706         /*
2707          * VLV seems to get confused when multiple power seqeuencers
2708          * have the same port selected (even if only one has power/vdd
2709          * enabled). The failure manifests as vlv_wait_port_ready() failing
2710          * CHV on the other hand doesn't seem to mind having the same port
2711          * selected in multiple power seqeuencers, but let's clear the
2712          * port select always when logically disconnecting a power sequencer
2713          * from a port.
2714          */
2715         DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2716                       pipe_name(pipe), port_name(intel_dig_port->port));
2717         I915_WRITE(pp_on_reg, 0);
2718         POSTING_READ(pp_on_reg);
2719
2720         intel_dp->pps_pipe = INVALID_PIPE;
2721 }
2722
2723 static void vlv_steal_power_sequencer(struct drm_device *dev,
2724                                       enum pipe pipe)
2725 {
2726         struct drm_i915_private *dev_priv = dev->dev_private;
2727         struct intel_encoder *encoder;
2728
2729         lockdep_assert_held(&dev_priv->pps_mutex);
2730
2731         if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2732                 return;
2733
2734         list_for_each_entry(encoder, &dev->mode_config.encoder_list,
2735                             base.head) {
2736                 struct intel_dp *intel_dp;
2737                 enum port port;
2738
2739                 if (encoder->type != INTEL_OUTPUT_EDP)
2740                         continue;
2741
2742                 intel_dp = enc_to_intel_dp(&encoder->base);
2743                 port = dp_to_dig_port(intel_dp)->port;
2744
2745                 if (intel_dp->pps_pipe != pipe)
2746                         continue;
2747
2748                 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
2749                               pipe_name(pipe), port_name(port));
2750
2751                 WARN(encoder->base.crtc,
2752                      "stealing pipe %c power sequencer from active eDP port %c\n",
2753                      pipe_name(pipe), port_name(port));
2754
2755                 /* make sure vdd is off before we steal it */
2756                 vlv_detach_power_sequencer(intel_dp);
2757         }
2758 }
2759
2760 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2761 {
2762         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2763         struct intel_encoder *encoder = &intel_dig_port->base;
2764         struct drm_device *dev = encoder->base.dev;
2765         struct drm_i915_private *dev_priv = dev->dev_private;
2766         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2767
2768         lockdep_assert_held(&dev_priv->pps_mutex);
2769
2770         if (!is_edp(intel_dp))
2771                 return;
2772
2773         if (intel_dp->pps_pipe == crtc->pipe)
2774                 return;
2775
2776         /*
2777          * If another power sequencer was being used on this
2778          * port previously make sure to turn off vdd there while
2779          * we still have control of it.
2780          */
2781         if (intel_dp->pps_pipe != INVALID_PIPE)
2782                 vlv_detach_power_sequencer(intel_dp);
2783
2784         /*
2785          * We may be stealing the power
2786          * sequencer from another port.
2787          */
2788         vlv_steal_power_sequencer(dev, crtc->pipe);
2789
2790         /* now it's all ours */
2791         intel_dp->pps_pipe = crtc->pipe;
2792
2793         DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2794                       pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2795
2796         /* init power sequencer on this pipe and port */
2797         intel_dp_init_panel_power_sequencer(dev, intel_dp);
2798         intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
2799 }
2800
2801 static void vlv_pre_enable_dp(struct intel_encoder *encoder)
2802 {
2803         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2804         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2805         struct drm_device *dev = encoder->base.dev;
2806         struct drm_i915_private *dev_priv = dev->dev_private;
2807         struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
2808         enum dpio_channel port = vlv_dport_to_channel(dport);
2809         int pipe = intel_crtc->pipe;
2810         u32 val;
2811
2812         mutex_lock(&dev_priv->sb_lock);
2813
2814         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
2815         val = 0;
2816         if (pipe)
2817                 val |= (1<<21);
2818         else
2819                 val &= ~(1<<21);
2820         val |= 0x001000c4;
2821         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2822         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2823         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
2824
2825         mutex_unlock(&dev_priv->sb_lock);
2826
2827         intel_enable_dp(encoder);
2828 }
2829
2830 static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
2831 {
2832         struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2833         struct drm_device *dev = encoder->base.dev;
2834         struct drm_i915_private *dev_priv = dev->dev_private;
2835         struct intel_crtc *intel_crtc =
2836                 to_intel_crtc(encoder->base.crtc);
2837         enum dpio_channel port = vlv_dport_to_channel(dport);
2838         int pipe = intel_crtc->pipe;
2839
2840         intel_dp_prepare(encoder);
2841
2842         /* Program Tx lane resets to default */
2843         mutex_lock(&dev_priv->sb_lock);
2844         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
2845                          DPIO_PCS_TX_LANE2_RESET |
2846                          DPIO_PCS_TX_LANE1_RESET);
2847         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
2848                          DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2849                          DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2850                          (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2851                                  DPIO_PCS_CLK_SOFT_RESET);
2852
2853         /* Fix up inter-pair skew failure */
2854         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2855         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2856         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
2857         mutex_unlock(&dev_priv->sb_lock);
2858 }
2859
2860 static void chv_pre_enable_dp(struct intel_encoder *encoder)
2861 {
2862         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2863         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2864         struct drm_device *dev = encoder->base.dev;
2865         struct drm_i915_private *dev_priv = dev->dev_private;
2866         struct intel_crtc *intel_crtc =
2867                 to_intel_crtc(encoder->base.crtc);
2868         enum dpio_channel ch = vlv_dport_to_channel(dport);
2869         int pipe = intel_crtc->pipe;
2870         int data, i, stagger;
2871         u32 val;
2872
2873         mutex_lock(&dev_priv->sb_lock);
2874
2875         /* allow hardware to manage TX FIFO reset source */
2876         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2877         val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2878         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2879
2880         if (intel_crtc->config->lane_count > 2) {
2881                 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2882                 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2883                 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2884         }
2885
2886         /* Program Tx lane latency optimal setting*/
2887         for (i = 0; i < intel_crtc->config->lane_count; i++) {
2888                 /* Set the upar bit */
2889                 if (intel_crtc->config->lane_count == 1)
2890                         data = 0x0;
2891                 else
2892                         data = (i == 1) ? 0x0 : 0x1;
2893                 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
2894                                 data << DPIO_UPAR_SHIFT);
2895         }
2896
2897         /* Data lane stagger programming */
2898         if (intel_crtc->config->port_clock > 270000)
2899                 stagger = 0x18;
2900         else if (intel_crtc->config->port_clock > 135000)
2901                 stagger = 0xd;
2902         else if (intel_crtc->config->port_clock > 67500)
2903                 stagger = 0x7;
2904         else if (intel_crtc->config->port_clock > 33750)
2905                 stagger = 0x4;
2906         else
2907                 stagger = 0x2;
2908
2909         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2910         val |= DPIO_TX2_STAGGER_MASK(0x1f);
2911         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2912
2913         if (intel_crtc->config->lane_count > 2) {
2914                 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2915                 val |= DPIO_TX2_STAGGER_MASK(0x1f);
2916                 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2917         }
2918
2919         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
2920                        DPIO_LANESTAGGER_STRAP(stagger) |
2921                        DPIO_LANESTAGGER_STRAP_OVRD |
2922                        DPIO_TX1_STAGGER_MASK(0x1f) |
2923                        DPIO_TX1_STAGGER_MULT(6) |
2924                        DPIO_TX2_STAGGER_MULT(0));
2925
2926         if (intel_crtc->config->lane_count > 2) {
2927                 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
2928                                DPIO_LANESTAGGER_STRAP(stagger) |
2929                                DPIO_LANESTAGGER_STRAP_OVRD |
2930                                DPIO_TX1_STAGGER_MASK(0x1f) |
2931                                DPIO_TX1_STAGGER_MULT(7) |
2932                                DPIO_TX2_STAGGER_MULT(5));
2933         }
2934
2935         /* Deassert data lane reset */
2936         chv_data_lane_soft_reset(encoder, false);
2937
2938         mutex_unlock(&dev_priv->sb_lock);
2939
2940         intel_enable_dp(encoder);
2941
2942         /* Second common lane will stay alive on its own now */
2943         if (dport->release_cl2_override) {
2944                 chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false);
2945                 dport->release_cl2_override = false;
2946         }
2947 }
2948
2949 static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
2950 {
2951         struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2952         struct drm_device *dev = encoder->base.dev;
2953         struct drm_i915_private *dev_priv = dev->dev_private;
2954         struct intel_crtc *intel_crtc =
2955                 to_intel_crtc(encoder->base.crtc);
2956         enum dpio_channel ch = vlv_dport_to_channel(dport);
2957         enum pipe pipe = intel_crtc->pipe;
2958         unsigned int lane_mask =
2959                 intel_dp_unused_lane_mask(intel_crtc->config->lane_count);
2960         u32 val;
2961
2962         intel_dp_prepare(encoder);
2963
2964         /*
2965          * Must trick the second common lane into life.
2966          * Otherwise we can't even access the PLL.
2967          */
2968         if (ch == DPIO_CH0 && pipe == PIPE_B)
2969                 dport->release_cl2_override =
2970                         !chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true);
2971
2972         chv_phy_powergate_lanes(encoder, true, lane_mask);
2973
2974         mutex_lock(&dev_priv->sb_lock);
2975
2976         /* Assert data lane reset */
2977         chv_data_lane_soft_reset(encoder, true);
2978
2979         /* program left/right clock distribution */
2980         if (pipe != PIPE_B) {
2981                 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
2982                 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
2983                 if (ch == DPIO_CH0)
2984                         val |= CHV_BUFLEFTENA1_FORCE;
2985                 if (ch == DPIO_CH1)
2986                         val |= CHV_BUFRIGHTENA1_FORCE;
2987                 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
2988         } else {
2989                 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
2990                 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
2991                 if (ch == DPIO_CH0)
2992                         val |= CHV_BUFLEFTENA2_FORCE;
2993                 if (ch == DPIO_CH1)
2994                         val |= CHV_BUFRIGHTENA2_FORCE;
2995                 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
2996         }
2997
2998         /* program clock channel usage */
2999         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
3000         val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
3001         if (pipe != PIPE_B)
3002                 val &= ~CHV_PCS_USEDCLKCHANNEL;
3003         else
3004                 val |= CHV_PCS_USEDCLKCHANNEL;
3005         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
3006
3007         if (intel_crtc->config->lane_count > 2) {
3008                 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
3009                 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
3010                 if (pipe != PIPE_B)
3011                         val &= ~CHV_PCS_USEDCLKCHANNEL;
3012                 else
3013                         val |= CHV_PCS_USEDCLKCHANNEL;
3014                 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
3015         }
3016
3017         /*
3018          * This a a bit weird since generally CL
3019          * matches the pipe, but here we need to
3020          * pick the CL based on the port.
3021          */
3022         val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
3023         if (pipe != PIPE_B)
3024                 val &= ~CHV_CMN_USEDCLKCHANNEL;
3025         else
3026                 val |= CHV_CMN_USEDCLKCHANNEL;
3027         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
3028
3029         mutex_unlock(&dev_priv->sb_lock);
3030 }
3031
3032 static void chv_dp_post_pll_disable(struct intel_encoder *encoder)
3033 {
3034         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3035         enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
3036         u32 val;
3037
3038         mutex_lock(&dev_priv->sb_lock);
3039
3040         /* disable left/right clock distribution */
3041         if (pipe != PIPE_B) {
3042                 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
3043                 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
3044                 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
3045         } else {
3046                 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
3047                 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
3048                 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
3049         }
3050
3051         mutex_unlock(&dev_priv->sb_lock);
3052
3053         /*
3054          * Leave the power down bit cleared for at least one
3055          * lane so that chv_powergate_phy_ch() will power
3056          * on something when the channel is otherwise unused.
3057          * When the port is off and the override is removed
3058          * the lanes power down anyway, so otherwise it doesn't
3059          * really matter what the state of power down bits is
3060          * after this.
3061          */
3062         chv_phy_powergate_lanes(encoder, false, 0x0);
3063 }
3064
3065 /*
3066  * Native read with retry for link status and receiver capability reads for
3067  * cases where the sink may still be asleep.
3068  *
3069  * Sinks are *supposed* to come up within 1ms from an off state, but we're also
3070  * supposed to retry 3 times per the spec.
3071  */
3072 static ssize_t
3073 intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
3074                         void *buffer, size_t size)
3075 {
3076         ssize_t ret;
3077         int i;
3078
3079         /*
3080          * Sometime we just get the same incorrect byte repeated
3081          * over the entire buffer. Doing just one throw away read
3082          * initially seems to "solve" it.
3083          */
3084         drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
3085
3086         for (i = 0; i < 3; i++) {
3087                 ret = drm_dp_dpcd_read(aux, offset, buffer, size);
3088                 if (ret == size)
3089                         return ret;
3090                 msleep(1);
3091         }
3092
3093         return ret;
3094 }
3095
3096 /*
3097  * Fetch AUX CH registers 0x202 - 0x207 which contain
3098  * link status information
3099  */
3100 bool
3101 intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
3102 {
3103         return intel_dp_dpcd_read_wake(&intel_dp->aux,
3104                                        DP_LANE0_1_STATUS,
3105                                        link_status,
3106                                        DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
3107 }
3108
3109 /* These are source-specific values. */
3110 uint8_t
3111 intel_dp_voltage_max(struct intel_dp *intel_dp)
3112 {
3113         struct drm_device *dev = intel_dp_to_dev(intel_dp);
3114         struct drm_i915_private *dev_priv = dev->dev_private;
3115         enum port port = dp_to_dig_port(intel_dp)->port;
3116
3117         if (IS_BROXTON(dev))
3118                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3119         else if (INTEL_INFO(dev)->gen >= 9) {
3120                 if (dev_priv->edp_low_vswing && port == PORT_A)
3121                         return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3122                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3123         } else if (IS_VALLEYVIEW(dev))
3124                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3125         else if (IS_GEN7(dev) && port == PORT_A)
3126                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3127         else if (HAS_PCH_CPT(dev) && port != PORT_A)
3128                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3129         else
3130                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3131 }
3132
3133 uint8_t
3134 intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
3135 {
3136         struct drm_device *dev = intel_dp_to_dev(intel_dp);
3137         enum port port = dp_to_dig_port(intel_dp)->port;
3138
3139         if (INTEL_INFO(dev)->gen >= 9) {
3140                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3141                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3142                         return DP_TRAIN_PRE_EMPH_LEVEL_3;
3143                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3144                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
3145                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3146                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
3147                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3148                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
3149                 default:
3150                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
3151                 }
3152         } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
3153                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3154                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3155                         return DP_TRAIN_PRE_EMPH_LEVEL_3;
3156                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3157                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
3158                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3159                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
3160                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3161                 default:
3162                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
3163                 }
3164         } else if (IS_VALLEYVIEW(dev)) {
3165                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3166                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3167                         return DP_TRAIN_PRE_EMPH_LEVEL_3;
3168                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3169                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
3170                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3171                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
3172                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3173                 default:
3174                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
3175                 }
3176         } else if (IS_GEN7(dev) && port == PORT_A) {
3177                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3178                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3179                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
3180                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3181                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3182                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
3183                 default:
3184                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
3185                 }
3186         } else {
3187                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3188                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3189                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
3190                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3191                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
3192                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3193                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
3194                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3195                 default:
3196                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
3197                 }
3198         }
3199 }
3200
3201 static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
3202 {
3203         struct drm_device *dev = intel_dp_to_dev(intel_dp);
3204         struct drm_i915_private *dev_priv = dev->dev_private;
3205         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3206         struct intel_crtc *intel_crtc =
3207                 to_intel_crtc(dport->base.base.crtc);
3208         unsigned long demph_reg_value, preemph_reg_value,
3209                 uniqtranscale_reg_value;
3210         uint8_t train_set = intel_dp->train_set[0];
3211         enum dpio_channel port = vlv_dport_to_channel(dport);
3212         int pipe = intel_crtc->pipe;
3213
3214         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3215         case DP_TRAIN_PRE_EMPH_LEVEL_0:
3216                 preemph_reg_value = 0x0004000;
3217                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3218                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3219                         demph_reg_value = 0x2B405555;
3220                         uniqtranscale_reg_value = 0x552AB83A;
3221                         break;
3222                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3223                         demph_reg_value = 0x2B404040;
3224                         uniqtranscale_reg_value = 0x5548B83A;
3225                         break;
3226                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3227                         demph_reg_value = 0x2B245555;
3228                         uniqtranscale_reg_value = 0x5560B83A;
3229                         break;
3230                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3231                         demph_reg_value = 0x2B405555;
3232                         uniqtranscale_reg_value = 0x5598DA3A;
3233                         break;
3234                 default:
3235                         return 0;
3236                 }
3237                 break;
3238         case DP_TRAIN_PRE_EMPH_LEVEL_1:
3239                 preemph_reg_value = 0x0002000;
3240                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3241                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3242                         demph_reg_value = 0x2B404040;
3243                         uniqtranscale_reg_value = 0x5552B83A;
3244                         break;
3245                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3246                         demph_reg_value = 0x2B404848;
3247                         uniqtranscale_reg_value = 0x5580B83A;
3248                         break;
3249                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3250                         demph_reg_value = 0x2B404040;
3251                         uniqtranscale_reg_value = 0x55ADDA3A;
3252                         break;
3253                 default:
3254                         return 0;
3255                 }
3256                 break;
3257         case DP_TRAIN_PRE_EMPH_LEVEL_2:
3258                 preemph_reg_value = 0x0000000;
3259                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3260                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3261                         demph_reg_value = 0x2B305555;
3262                         uniqtranscale_reg_value = 0x5570B83A;
3263                         break;
3264                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3265                         demph_reg_value = 0x2B2B4040;
3266                         uniqtranscale_reg_value = 0x55ADDA3A;
3267                         break;
3268                 default:
3269                         return 0;
3270                 }
3271                 break;
3272         case DP_TRAIN_PRE_EMPH_LEVEL_3:
3273                 preemph_reg_value = 0x0006000;
3274                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3275                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3276                         demph_reg_value = 0x1B405555;
3277                         uniqtranscale_reg_value = 0x55ADDA3A;
3278                         break;
3279                 default:
3280                         return 0;
3281                 }
3282                 break;
3283         default:
3284                 return 0;
3285         }
3286
3287         mutex_lock(&dev_priv->sb_lock);
3288         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
3289         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
3290         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
3291                          uniqtranscale_reg_value);
3292         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
3293         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
3294         vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
3295         vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
3296         mutex_unlock(&dev_priv->sb_lock);
3297
3298         return 0;
3299 }
3300
3301 static bool chv_need_uniq_trans_scale(uint8_t train_set)
3302 {
3303         return (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) == DP_TRAIN_PRE_EMPH_LEVEL_0 &&
3304                 (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) == DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3305 }
3306
3307 static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
3308 {
3309         struct drm_device *dev = intel_dp_to_dev(intel_dp);
3310         struct drm_i915_private *dev_priv = dev->dev_private;
3311         struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3312         struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
3313         u32 deemph_reg_value, margin_reg_value, val;
3314         uint8_t train_set = intel_dp->train_set[0];
3315         enum dpio_channel ch = vlv_dport_to_channel(dport);
3316         enum pipe pipe = intel_crtc->pipe;
3317         int i;
3318
3319         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3320         case DP_TRAIN_PRE_EMPH_LEVEL_0:
3321                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3322                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3323                         deemph_reg_value = 128;
3324                         margin_reg_value = 52;
3325                         break;
3326                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3327                         deemph_reg_value = 128;
3328                         margin_reg_value = 77;
3329                         break;
3330                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3331                         deemph_reg_value = 128;
3332                         margin_reg_value = 102;
3333                         break;
3334                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3335                         deemph_reg_value = 128;
3336                         margin_reg_value = 154;
3337                         /* FIXME extra to set for 1200 */
3338                         break;
3339                 default:
3340                         return 0;
3341                 }
3342                 break;
3343         case DP_TRAIN_PRE_EMPH_LEVEL_1:
3344                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3345                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3346                         deemph_reg_value = 85;
3347                         margin_reg_value = 78;
3348                         break;
3349                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3350                         deemph_reg_value = 85;
3351                         margin_reg_value = 116;
3352                         break;
3353                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3354                         deemph_reg_value = 85;
3355                         margin_reg_value = 154;
3356                         break;
3357                 default:
3358                         return 0;
3359                 }
3360                 break;
3361         case DP_TRAIN_PRE_EMPH_LEVEL_2:
3362                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3363                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3364                         deemph_reg_value = 64;
3365                         margin_reg_value = 104;
3366                         break;
3367                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3368                         deemph_reg_value = 64;
3369                         margin_reg_value = 154;
3370                         break;
3371                 default:
3372                         return 0;
3373                 }
3374                 break;
3375         case DP_TRAIN_PRE_EMPH_LEVEL_3:
3376                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3377                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3378                         deemph_reg_value = 43;
3379                         margin_reg_value = 154;
3380                         break;
3381                 default:
3382                         return 0;
3383                 }
3384                 break;
3385         default:
3386                 return 0;
3387         }
3388
3389         mutex_lock(&dev_priv->sb_lock);
3390
3391         /* Clear calc init */
3392         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3393         val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3394         val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3395         val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3396         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3397
3398         if (intel_crtc->config->lane_count > 2) {
3399                 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3400                 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3401                 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3402                 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3403                 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3404         }
3405
3406         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3407         val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3408         val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3409         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3410
3411         if (intel_crtc->config->lane_count > 2) {
3412                 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3413                 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3414                 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3415                 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3416         }
3417
3418         /* Program swing deemph */
3419         for (i = 0; i < intel_crtc->config->lane_count; i++) {
3420                 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3421                 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3422                 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3423                 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3424         }
3425
3426         /* Program swing margin */
3427         for (i = 0; i < intel_crtc->config->lane_count; i++) {
3428                 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3429
3430                 val &= ~DPIO_SWING_MARGIN000_MASK;
3431                 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
3432
3433                 /*
3434                  * Supposedly this value shouldn't matter when unique transition
3435                  * scale is disabled, but in fact it does matter. Let's just
3436                  * always program the same value and hope it's OK.
3437                  */
3438                 val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3439                 val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT;
3440
3441                 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3442         }
3443
3444         /*
3445          * The document said it needs to set bit 27 for ch0 and bit 26
3446          * for ch1. Might be a typo in the doc.
3447          * For now, for this unique transition scale selection, set bit
3448          * 27 for ch0 and ch1.
3449          */
3450         for (i = 0; i < intel_crtc->config->lane_count; i++) {
3451                 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3452                 if (chv_need_uniq_trans_scale(train_set))
3453                         val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
3454                 else
3455                         val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3456                 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3457         }
3458
3459         /* Start swing calculation */
3460         val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3461         val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3462         vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3463
3464         if (intel_crtc->config->lane_count > 2) {
3465                 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3466                 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3467                 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3468         }
3469
3470         mutex_unlock(&dev_priv->sb_lock);
3471
3472         return 0;
3473 }
3474
3475 static uint32_t
3476 gen4_signal_levels(uint8_t train_set)
3477 {
3478         uint32_t        signal_levels = 0;
3479
3480         switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3481         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3482         default:
3483                 signal_levels |= DP_VOLTAGE_0_4;
3484                 break;
3485         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3486                 signal_levels |= DP_VOLTAGE_0_6;
3487                 break;
3488         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3489                 signal_levels |= DP_VOLTAGE_0_8;
3490                 break;
3491         case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3492                 signal_levels |= DP_VOLTAGE_1_2;
3493                 break;
3494         }
3495         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3496         case DP_TRAIN_PRE_EMPH_LEVEL_0:
3497         default:
3498                 signal_levels |= DP_PRE_EMPHASIS_0;
3499                 break;
3500         case DP_TRAIN_PRE_EMPH_LEVEL_1:
3501                 signal_levels |= DP_PRE_EMPHASIS_3_5;
3502                 break;
3503         case DP_TRAIN_PRE_EMPH_LEVEL_2:
3504                 signal_levels |= DP_PRE_EMPHASIS_6;
3505                 break;
3506         case DP_TRAIN_PRE_EMPH_LEVEL_3:
3507                 signal_levels |= DP_PRE_EMPHASIS_9_5;
3508                 break;
3509         }
3510         return signal_levels;
3511 }
3512
3513 /* Gen6's DP voltage swing and pre-emphasis control */
3514 static uint32_t
3515 gen6_edp_signal_levels(uint8_t train_set)
3516 {
3517         int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3518                                          DP_TRAIN_PRE_EMPHASIS_MASK);
3519         switch (signal_levels) {
3520         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3521         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3522                 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3523         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3524                 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
3525         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3526         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3527                 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
3528         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3529         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3530                 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
3531         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3532         case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3533                 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
3534         default:
3535                 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3536                               "0x%x\n", signal_levels);
3537                 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3538         }
3539 }
3540
3541 /* Gen7's DP voltage swing and pre-emphasis control */
3542 static uint32_t
3543 gen7_edp_signal_levels(uint8_t train_set)
3544 {
3545         int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3546                                          DP_TRAIN_PRE_EMPHASIS_MASK);
3547         switch (signal_levels) {
3548         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3549                 return EDP_LINK_TRAIN_400MV_0DB_IVB;
3550         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3551                 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
3552         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3553                 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3554
3555         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3556                 return EDP_LINK_TRAIN_600MV_0DB_IVB;
3557         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3558                 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3559
3560         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3561                 return EDP_LINK_TRAIN_800MV_0DB_IVB;
3562         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3563                 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3564
3565         default:
3566                 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3567                               "0x%x\n", signal_levels);
3568                 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3569         }
3570 }
3571
3572 void
3573 intel_dp_set_signal_levels(struct intel_dp *intel_dp)
3574 {
3575         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3576         enum port port = intel_dig_port->port;
3577         struct drm_device *dev = intel_dig_port->base.base.dev;
3578         struct drm_i915_private *dev_priv = to_i915(dev);
3579         uint32_t signal_levels, mask = 0;
3580         uint8_t train_set = intel_dp->train_set[0];
3581
3582         if (HAS_DDI(dev)) {
3583                 signal_levels = ddi_signal_levels(intel_dp);
3584
3585                 if (IS_BROXTON(dev))
3586                         signal_levels = 0;
3587                 else
3588                         mask = DDI_BUF_EMP_MASK;
3589         } else if (IS_CHERRYVIEW(dev)) {
3590                 signal_levels = chv_signal_levels(intel_dp);
3591         } else if (IS_VALLEYVIEW(dev)) {
3592                 signal_levels = vlv_signal_levels(intel_dp);
3593         } else if (IS_GEN7(dev) && port == PORT_A) {
3594                 signal_levels = gen7_edp_signal_levels(train_set);
3595                 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
3596         } else if (IS_GEN6(dev) && port == PORT_A) {
3597                 signal_levels = gen6_edp_signal_levels(train_set);
3598                 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3599         } else {
3600                 signal_levels = gen4_signal_levels(train_set);
3601                 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3602         }
3603
3604         if (mask)
3605                 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3606
3607         DRM_DEBUG_KMS("Using vswing level %d\n",
3608                 train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
3609         DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3610                 (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
3611                         DP_TRAIN_PRE_EMPHASIS_SHIFT);
3612
3613         intel_dp->DP = (intel_dp->DP & ~mask) | signal_levels;
3614
3615         I915_WRITE(intel_dp->output_reg, intel_dp->DP);
3616         POSTING_READ(intel_dp->output_reg);
3617 }
3618
3619 void
3620 intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
3621                                        uint8_t dp_train_pat)
3622 {
3623         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3624         struct drm_i915_private *dev_priv =
3625                 to_i915(intel_dig_port->base.base.dev);
3626
3627         _intel_dp_set_link_train(intel_dp, &intel_dp->DP, dp_train_pat);
3628
3629         I915_WRITE(intel_dp->output_reg, intel_dp->DP);
3630         POSTING_READ(intel_dp->output_reg);
3631 }
3632
3633 void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3634 {
3635         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3636         struct drm_device *dev = intel_dig_port->base.base.dev;
3637         struct drm_i915_private *dev_priv = dev->dev_private;
3638         enum port port = intel_dig_port->port;
3639         uint32_t val;
3640
3641         if (!HAS_DDI(dev))
3642                 return;
3643
3644         val = I915_READ(DP_TP_CTL(port));
3645         val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3646         val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3647         I915_WRITE(DP_TP_CTL(port), val);
3648
3649         /*
3650          * On PORT_A we can have only eDP in SST mode. There the only reason
3651          * we need to set idle transmission mode is to work around a HW issue
3652          * where we enable the pipe while not in idle link-training mode.
3653          * In this case there is requirement to wait for a minimum number of
3654          * idle patterns to be sent.
3655          */
3656         if (port == PORT_A)
3657                 return;
3658
3659         if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3660                      1))
3661                 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3662 }
3663
3664 static void
3665 intel_dp_link_down(struct intel_dp *intel_dp)
3666 {
3667         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3668         struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
3669         enum port port = intel_dig_port->port;
3670         struct drm_device *dev = intel_dig_port->base.base.dev;
3671         struct drm_i915_private *dev_priv = dev->dev_private;
3672         uint32_t DP = intel_dp->DP;
3673
3674         if (WARN_ON(HAS_DDI(dev)))
3675                 return;
3676
3677         if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
3678                 return;
3679
3680         DRM_DEBUG_KMS("\n");
3681
3682         if ((IS_GEN7(dev) && port == PORT_A) ||
3683             (HAS_PCH_CPT(dev) && port != PORT_A)) {
3684                 DP &= ~DP_LINK_TRAIN_MASK_CPT;
3685                 DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
3686         } else {
3687                 if (IS_CHERRYVIEW(dev))
3688                         DP &= ~DP_LINK_TRAIN_MASK_CHV;
3689                 else
3690                         DP &= ~DP_LINK_TRAIN_MASK;
3691                 DP |= DP_LINK_TRAIN_PAT_IDLE;
3692         }
3693         I915_WRITE(intel_dp->output_reg, DP);
3694         POSTING_READ(intel_dp->output_reg);
3695
3696         DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
3697         I915_WRITE(intel_dp->output_reg, DP);
3698         POSTING_READ(intel_dp->output_reg);
3699
3700         /*
3701          * HW workaround for IBX, we need to move the port
3702          * to transcoder A after disabling it to allow the
3703          * matching HDMI port to be enabled on transcoder A.
3704          */
3705         if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B && port != PORT_A) {
3706                 /*
3707                  * We get CPU/PCH FIFO underruns on the other pipe when
3708                  * doing the workaround. Sweep them under the rug.
3709                  */
3710                 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
3711                 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
3712
3713                 /* always enable with pattern 1 (as per spec) */
3714                 DP &= ~(DP_PIPEB_SELECT | DP_LINK_TRAIN_MASK);
3715                 DP |= DP_PORT_EN | DP_LINK_TRAIN_PAT_1;
3716                 I915_WRITE(intel_dp->output_reg, DP);
3717                 POSTING_READ(intel_dp->output_reg);
3718
3719                 DP &= ~DP_PORT_EN;
3720                 I915_WRITE(intel_dp->output_reg, DP);
3721                 POSTING_READ(intel_dp->output_reg);
3722
3723                 intel_wait_for_vblank_if_active(dev_priv->dev, PIPE_A);
3724                 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
3725                 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
3726         }
3727
3728         msleep(intel_dp->panel_power_down_delay);
3729 }
3730
3731 static bool
3732 intel_dp_get_dpcd(struct intel_dp *intel_dp)
3733 {
3734         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3735         struct drm_device *dev = dig_port->base.base.dev;
3736         struct drm_i915_private *dev_priv = dev->dev_private;
3737         uint8_t rev;
3738
3739         if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
3740                                     sizeof(intel_dp->dpcd)) < 0)
3741                 return false; /* aux transfer failed */
3742
3743         DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
3744
3745         if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3746                 return false; /* DPCD not present */
3747
3748         /* Check if the panel supports PSR */
3749         memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
3750         if (is_edp(intel_dp)) {
3751                 intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
3752                                         intel_dp->psr_dpcd,
3753                                         sizeof(intel_dp->psr_dpcd));
3754                 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3755                         dev_priv->psr.sink_support = true;
3756                         DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
3757                 }
3758
3759                 if (INTEL_INFO(dev)->gen >= 9 &&
3760                         (intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
3761                         uint8_t frame_sync_cap;
3762
3763                         dev_priv->psr.sink_support = true;
3764                         intel_dp_dpcd_read_wake(&intel_dp->aux,
3765                                         DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
3766                                         &frame_sync_cap, 1);
3767                         dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
3768                         /* PSR2 needs frame sync as well */
3769                         dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
3770                         DRM_DEBUG_KMS("PSR2 %s on sink",
3771                                 dev_priv->psr.psr2_support ? "supported" : "not supported");
3772                 }
3773         }
3774
3775         DRM_DEBUG_KMS("Display Port TPS3 support: source %s, sink %s\n",
3776                       yesno(intel_dp_source_supports_hbr2(intel_dp)),
3777                       yesno(drm_dp_tps3_supported(intel_dp->dpcd)));
3778
3779         /* Intermediate frequency support */
3780         if (is_edp(intel_dp) &&
3781             (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
3782             (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
3783             (rev >= 0x03)) { /* eDp v1.4 or higher */
3784                 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
3785                 int i;
3786
3787                 intel_dp_dpcd_read_wake(&intel_dp->aux,
3788                                 DP_SUPPORTED_LINK_RATES,
3789                                 sink_rates,
3790                                 sizeof(sink_rates));
3791
3792                 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
3793                         int val = le16_to_cpu(sink_rates[i]);
3794
3795                         if (val == 0)
3796                                 break;
3797
3798                         /* Value read is in kHz while drm clock is saved in deca-kHz */
3799                         intel_dp->sink_rates[i] = (val * 200) / 10;
3800                 }
3801                 intel_dp->num_sink_rates = i;
3802         }
3803
3804         intel_dp_print_rates(intel_dp);
3805
3806         if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
3807               DP_DWN_STRM_PORT_PRESENT))
3808                 return true; /* native DP sink */
3809
3810         if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
3811                 return true; /* no per-port downstream info */
3812
3813         if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
3814                                     intel_dp->downstream_ports,
3815                                     DP_MAX_DOWNSTREAM_PORTS) < 0)
3816                 return false; /* downstream port status fetch failed */
3817
3818         return true;
3819 }
3820
3821 static void
3822 intel_dp_probe_oui(struct intel_dp *intel_dp)
3823 {
3824         u8 buf[3];
3825
3826         if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
3827                 return;
3828
3829         if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
3830                 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
3831                               buf[0], buf[1], buf[2]);
3832
3833         if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
3834                 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
3835                               buf[0], buf[1], buf[2]);
3836 }
3837
3838 static bool
3839 intel_dp_probe_mst(struct intel_dp *intel_dp)
3840 {
3841         u8 buf[1];
3842
3843         if (!intel_dp->can_mst)
3844                 return false;
3845
3846         if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
3847                 return false;
3848
3849         if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
3850                 if (buf[0] & DP_MST_CAP) {
3851                         DRM_DEBUG_KMS("Sink is MST capable\n");
3852                         intel_dp->is_mst = true;
3853                 } else {
3854                         DRM_DEBUG_KMS("Sink is not MST capable\n");
3855                         intel_dp->is_mst = false;
3856                 }
3857         }
3858
3859         drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
3860         return intel_dp->is_mst;
3861 }
3862
3863 static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
3864 {
3865         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3866         struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
3867         u8 buf;
3868         int ret = 0;
3869
3870         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
3871                 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
3872                 ret = -EIO;
3873                 goto out;
3874         }
3875
3876         if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
3877                                buf & ~DP_TEST_SINK_START) < 0) {
3878                 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
3879                 ret = -EIO;
3880                 goto out;
3881         }
3882
3883         intel_dp->sink_crc.started = false;
3884  out:
3885         hsw_enable_ips(intel_crtc);
3886         return ret;
3887 }
3888
3889 static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
3890 {
3891         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3892         struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
3893         u8 buf;
3894         int ret;
3895
3896         if (intel_dp->sink_crc.started) {
3897                 ret = intel_dp_sink_crc_stop(intel_dp);
3898                 if (ret)
3899                         return ret;
3900         }
3901
3902         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
3903                 return -EIO;
3904
3905         if (!(buf & DP_TEST_CRC_SUPPORTED))
3906                 return -ENOTTY;
3907
3908         intel_dp->sink_crc.last_count = buf & DP_TEST_COUNT_MASK;
3909
3910         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
3911                 return -EIO;
3912
3913         hsw_disable_ips(intel_crtc);
3914
3915         if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
3916                                buf | DP_TEST_SINK_START) < 0) {
3917                 hsw_enable_ips(intel_crtc);
3918                 return -EIO;
3919         }
3920
3921         intel_dp->sink_crc.started = true;
3922         return 0;
3923 }
3924
3925 int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
3926 {
3927         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3928         struct drm_device *dev = dig_port->base.base.dev;
3929         struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
3930         u8 buf;
3931         int count, ret;
3932         int attempts = 6;
3933         bool old_equal_new;
3934
3935         ret = intel_dp_sink_crc_start(intel_dp);
3936         if (ret)
3937                 return ret;
3938
3939         do {
3940                 intel_wait_for_vblank(dev, intel_crtc->pipe);
3941
3942                 if (drm_dp_dpcd_readb(&intel_dp->aux,
3943                                       DP_TEST_SINK_MISC, &buf) < 0) {
3944                         ret = -EIO;
3945                         goto stop;
3946                 }
3947                 count = buf & DP_TEST_COUNT_MASK;
3948
3949                 /*
3950                  * Count might be reset during the loop. In this case
3951                  * last known count needs to be reset as well.
3952                  */
3953                 if (count == 0)
3954                         intel_dp->sink_crc.last_count = 0;
3955
3956                 if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) {
3957                         ret = -EIO;
3958                         goto stop;
3959                 }
3960
3961                 old_equal_new = (count == intel_dp->sink_crc.last_count &&
3962                                  !memcmp(intel_dp->sink_crc.last_crc, crc,
3963                                          6 * sizeof(u8)));
3964
3965         } while (--attempts && (count == 0 || old_equal_new));
3966
3967         intel_dp->sink_crc.last_count = buf & DP_TEST_COUNT_MASK;
3968         memcpy(intel_dp->sink_crc.last_crc, crc, 6 * sizeof(u8));
3969
3970         if (attempts == 0) {
3971                 if (old_equal_new) {
3972                         DRM_DEBUG_KMS("Unreliable Sink CRC counter: Current returned CRC is identical to the previous one\n");
3973                 } else {
3974                         DRM_ERROR("Panel is unable to calculate any CRC after 6 vblanks\n");
3975                         ret = -ETIMEDOUT;
3976                         goto stop;
3977                 }
3978         }
3979
3980 stop:
3981         intel_dp_sink_crc_stop(intel_dp);
3982         return ret;
3983 }
3984
3985 static bool
3986 intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
3987 {
3988         return intel_dp_dpcd_read_wake(&intel_dp->aux,
3989                                        DP_DEVICE_SERVICE_IRQ_VECTOR,
3990                                        sink_irq_vector, 1) == 1;
3991 }
3992
3993 static bool
3994 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
3995 {
3996         int ret;
3997
3998         ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
3999                                              DP_SINK_COUNT_ESI,
4000                                              sink_irq_vector, 14);
4001         if (ret != 14)
4002                 return false;
4003
4004         return true;
4005 }
4006
4007 static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
4008 {
4009         uint8_t test_result = DP_TEST_ACK;
4010         return test_result;
4011 }
4012
4013 static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
4014 {
4015         uint8_t test_result = DP_TEST_NAK;
4016         return test_result;
4017 }
4018
4019 static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
4020 {
4021         uint8_t test_result = DP_TEST_NAK;
4022         struct intel_connector *intel_connector = intel_dp->attached_connector;
4023         struct drm_connector *connector = &intel_connector->base;
4024
4025         if (intel_connector->detect_edid == NULL ||
4026             connector->edid_corrupt ||
4027             intel_dp->aux.i2c_defer_count > 6) {
4028                 /* Check EDID read for NACKs, DEFERs and corruption
4029                  * (DP CTS 1.2 Core r1.1)
4030                  *    4.2.2.4 : Failed EDID read, I2C_NAK
4031                  *    4.2.2.5 : Failed EDID read, I2C_DEFER
4032                  *    4.2.2.6 : EDID corruption detected
4033                  * Use failsafe mode for all cases
4034                  */
4035                 if (intel_dp->aux.i2c_nack_count > 0 ||
4036                         intel_dp->aux.i2c_defer_count > 0)
4037                         DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
4038                                       intel_dp->aux.i2c_nack_count,
4039                                       intel_dp->aux.i2c_defer_count);
4040                 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_FAILSAFE;
4041         } else {
4042                 struct edid *block = intel_connector->detect_edid;
4043
4044                 /* We have to write the checksum
4045                  * of the last block read
4046                  */
4047                 block += intel_connector->detect_edid->extensions;
4048
4049                 if (!drm_dp_dpcd_write(&intel_dp->aux,
4050                                         DP_TEST_EDID_CHECKSUM,
4051                                         &block->checksum,
4052                                         1))
4053                         DRM_DEBUG_KMS("Failed to write EDID checksum\n");
4054
4055                 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
4056                 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_STANDARD;
4057         }
4058
4059         /* Set test active flag here so userspace doesn't interrupt things */
4060         intel_dp->compliance_test_active = 1;
4061
4062         return test_result;
4063 }
4064
4065 static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
4066 {
4067         uint8_t test_result = DP_TEST_NAK;
4068         return test_result;
4069 }
4070
4071 static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
4072 {
4073         uint8_t response = DP_TEST_NAK;
4074         uint8_t rxdata = 0;
4075         int status = 0;
4076
4077         intel_dp->compliance_test_active = 0;
4078         intel_dp->compliance_test_type = 0;
4079         intel_dp->compliance_test_data = 0;
4080
4081         intel_dp->aux.i2c_nack_count = 0;
4082         intel_dp->aux.i2c_defer_count = 0;
4083
4084         status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_REQUEST, &rxdata, 1);
4085         if (status <= 0) {
4086                 DRM_DEBUG_KMS("Could not read test request from sink\n");
4087                 goto update_status;
4088         }
4089
4090         switch (rxdata) {
4091         case DP_TEST_LINK_TRAINING:
4092                 DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
4093                 intel_dp->compliance_test_type = DP_TEST_LINK_TRAINING;
4094                 response = intel_dp_autotest_link_training(intel_dp);
4095                 break;
4096         case DP_TEST_LINK_VIDEO_PATTERN:
4097                 DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
4098                 intel_dp->compliance_test_type = DP_TEST_LINK_VIDEO_PATTERN;
4099                 response = intel_dp_autotest_video_pattern(intel_dp);
4100                 break;
4101         case DP_TEST_LINK_EDID_READ:
4102                 DRM_DEBUG_KMS("EDID test requested\n");
4103                 intel_dp->compliance_test_type = DP_TEST_LINK_EDID_READ;
4104                 response = intel_dp_autotest_edid(intel_dp);
4105                 break;
4106         case DP_TEST_LINK_PHY_TEST_PATTERN:
4107                 DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
4108                 intel_dp->compliance_test_type = DP_TEST_LINK_PHY_TEST_PATTERN;
4109                 response = intel_dp_autotest_phy_pattern(intel_dp);
4110                 break;
4111         default:
4112                 DRM_DEBUG_KMS("Invalid test request '%02x'\n", rxdata);
4113                 break;
4114         }
4115
4116 update_status:
4117         status = drm_dp_dpcd_write(&intel_dp->aux,
4118                                    DP_TEST_RESPONSE,
4119                                    &response, 1);
4120         if (status <= 0)
4121                 DRM_DEBUG_KMS("Could not write test response to sink\n");
4122 }
4123
4124 static int
4125 intel_dp_check_mst_status(struct intel_dp *intel_dp)
4126 {
4127         bool bret;
4128
4129         if (intel_dp->is_mst) {
4130                 u8 esi[16] = { 0 };
4131                 int ret = 0;
4132                 int retry;
4133                 bool handled;
4134                 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4135 go_again:
4136                 if (bret == true) {
4137
4138                         /* check link status - esi[10] = 0x200c */
4139                         if (intel_dp->active_mst_links &&
4140                             !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
4141                                 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4142                                 intel_dp_start_link_train(intel_dp);
4143                                 intel_dp_stop_link_train(intel_dp);
4144                         }
4145
4146                         DRM_DEBUG_KMS("got esi %3ph\n", esi);
4147                         ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
4148
4149                         if (handled) {
4150                                 for (retry = 0; retry < 3; retry++) {
4151                                         int wret;
4152                                         wret = drm_dp_dpcd_write(&intel_dp->aux,
4153                                                                  DP_SINK_COUNT_ESI+1,
4154                                                                  &esi[1], 3);
4155                                         if (wret == 3) {
4156                                                 break;
4157                                         }
4158                                 }
4159
4160                                 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4161                                 if (bret == true) {
4162                                         DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
4163                                         goto go_again;
4164                                 }
4165                         } else
4166                                 ret = 0;
4167
4168                         return ret;
4169                 } else {
4170                         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4171                         DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4172                         intel_dp->is_mst = false;
4173                         drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4174                         /* send a hotplug event */
4175                         drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
4176                 }
4177         }
4178         return -EINVAL;
4179 }
4180
4181 /*
4182  * According to DP spec
4183  * 5.1.2:
4184  *  1. Read DPCD
4185  *  2. Configure link according to Receiver Capabilities
4186  *  3. Use Link Training from 2.5.3.3 and 3.5.1.3
4187  *  4. Check link status on receipt of hot-plug interrupt
4188  */
4189 static void
4190 intel_dp_check_link_status(struct intel_dp *intel_dp)
4191 {
4192         struct drm_device *dev = intel_dp_to_dev(intel_dp);
4193         struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4194         u8 sink_irq_vector;
4195         u8 link_status[DP_LINK_STATUS_SIZE];
4196
4197         WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4198
4199         if (!intel_encoder->base.crtc)
4200                 return;
4201
4202         if (!to_intel_crtc(intel_encoder->base.crtc)->active)
4203                 return;
4204
4205         /* Try to read receiver status if the link appears to be up */
4206         if (!intel_dp_get_link_status(intel_dp, link_status)) {
4207                 return;
4208         }
4209
4210         /* Now read the DPCD to see if it's actually running */
4211         if (!intel_dp_get_dpcd(intel_dp)) {
4212                 return;
4213         }
4214
4215         /* Try to read the source of the interrupt */
4216         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4217             intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4218                 /* Clear interrupt source */
4219                 drm_dp_dpcd_writeb(&intel_dp->aux,
4220                                    DP_DEVICE_SERVICE_IRQ_VECTOR,
4221                                    sink_irq_vector);
4222
4223                 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4224                         DRM_DEBUG_DRIVER("Test request in short pulse not handled\n");
4225                 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4226                         DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4227         }
4228
4229         if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
4230                 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
4231                               intel_encoder->base.name);
4232                 intel_dp_start_link_train(intel_dp);
4233                 intel_dp_stop_link_train(intel_dp);
4234         }
4235 }
4236
4237 /* XXX this is probably wrong for multiple downstream ports */
4238 static enum drm_connector_status
4239 intel_dp_detect_dpcd(struct intel_dp *intel_dp)
4240 {
4241         uint8_t *dpcd = intel_dp->dpcd;
4242         uint8_t type;
4243
4244         if (!intel_dp_get_dpcd(intel_dp))
4245                 return connector_status_disconnected;
4246
4247         /* if there's no downstream port, we're done */
4248         if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
4249                 return connector_status_connected;
4250
4251         /* If we're HPD-aware, SINK_COUNT changes dynamically */
4252         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4253             intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
4254                 uint8_t reg;
4255
4256                 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
4257                                             &reg, 1) < 0)
4258                         return connector_status_unknown;
4259
4260                 return DP_GET_SINK_COUNT(reg) ? connector_status_connected
4261                                               : connector_status_disconnected;
4262         }
4263
4264         /* If no HPD, poke DDC gently */
4265         if (drm_probe_ddc(&intel_dp->aux.ddc))
4266                 return connector_status_connected;
4267
4268         /* Well we tried, say unknown for unreliable port types */
4269         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4270                 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4271                 if (type == DP_DS_PORT_TYPE_VGA ||
4272                     type == DP_DS_PORT_TYPE_NON_EDID)
4273                         return connector_status_unknown;
4274         } else {
4275                 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4276                         DP_DWN_STRM_PORT_TYPE_MASK;
4277                 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4278                     type == DP_DWN_STRM_PORT_TYPE_OTHER)
4279                         return connector_status_unknown;
4280         }
4281
4282         /* Anything else is out of spec, warn and ignore */
4283         DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
4284         return connector_status_disconnected;
4285 }
4286
4287 static enum drm_connector_status
4288 edp_detect(struct intel_dp *intel_dp)
4289 {
4290         struct drm_device *dev = intel_dp_to_dev(intel_dp);
4291         enum drm_connector_status status;
4292
4293         status = intel_panel_detect(dev);
4294         if (status == connector_status_unknown)
4295                 status = connector_status_connected;
4296
4297         return status;
4298 }
4299
4300 static bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
4301                                        struct intel_digital_port *port)
4302 {
4303         u32 bit;
4304
4305         switch (port->port) {
4306         case PORT_A:
4307                 return true;
4308         case PORT_B:
4309                 bit = SDE_PORTB_HOTPLUG;
4310                 break;
4311         case PORT_C:
4312                 bit = SDE_PORTC_HOTPLUG;
4313                 break;
4314         case PORT_D:
4315                 bit = SDE_PORTD_HOTPLUG;
4316                 break;
4317         default:
4318                 MISSING_CASE(port->port);
4319                 return false;
4320         }
4321
4322         return I915_READ(SDEISR) & bit;
4323 }
4324
4325 static bool cpt_digital_port_connected(struct drm_i915_private *dev_priv,
4326                                        struct intel_digital_port *port)
4327 {
4328         u32 bit;
4329
4330         switch (port->port) {
4331         case PORT_A:
4332                 return true;
4333         case PORT_B:
4334                 bit = SDE_PORTB_HOTPLUG_CPT;
4335                 break;
4336         case PORT_C:
4337                 bit = SDE_PORTC_HOTPLUG_CPT;
4338                 break;
4339         case PORT_D:
4340                 bit = SDE_PORTD_HOTPLUG_CPT;
4341                 break;
4342         case PORT_E:
4343                 bit = SDE_PORTE_HOTPLUG_SPT;
4344                 break;
4345         default:
4346                 MISSING_CASE(port->port);
4347                 return false;
4348         }
4349
4350         return I915_READ(SDEISR) & bit;
4351 }
4352
4353 static bool g4x_digital_port_connected(struct drm_i915_private *dev_priv,
4354                                        struct intel_digital_port *port)
4355 {
4356         u32 bit;
4357
4358         switch (port->port) {
4359         case PORT_B:
4360                 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4361                 break;
4362         case PORT_C:
4363                 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4364                 break;
4365         case PORT_D:
4366                 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4367                 break;
4368         default:
4369                 MISSING_CASE(port->port);
4370                 return false;
4371         }
4372
4373         return I915_READ(PORT_HOTPLUG_STAT) & bit;
4374 }
4375
4376 static bool vlv_digital_port_connected(struct drm_i915_private *dev_priv,
4377                                        struct intel_digital_port *port)
4378 {
4379         u32 bit;
4380
4381         switch (port->port) {
4382         case PORT_B:
4383                 bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
4384                 break;
4385         case PORT_C:
4386                 bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
4387                 break;
4388         case PORT_D:
4389                 bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
4390                 break;
4391         default:
4392                 MISSING_CASE(port->port);
4393                 return false;
4394         }
4395
4396         return I915_READ(PORT_HOTPLUG_STAT) & bit;
4397 }
4398
4399 static bool bxt_digital_port_connected(struct drm_i915_private *dev_priv,
4400                                        struct intel_digital_port *intel_dig_port)
4401 {
4402         struct intel_encoder *intel_encoder = &intel_dig_port->base;
4403         enum port port;
4404         u32 bit;
4405
4406         intel_hpd_pin_to_port(intel_encoder->hpd_pin, &port);
4407         switch (port) {
4408         case PORT_A:
4409                 bit = BXT_DE_PORT_HP_DDIA;
4410                 break;
4411         case PORT_B:
4412                 bit = BXT_DE_PORT_HP_DDIB;
4413                 break;
4414         case PORT_C:
4415                 bit = BXT_DE_PORT_HP_DDIC;
4416                 break;
4417         default:
4418                 MISSING_CASE(port);
4419                 return false;
4420         }
4421
4422         return I915_READ(GEN8_DE_PORT_ISR) & bit;
4423 }
4424
4425 /*
4426  * intel_digital_port_connected - is the specified port connected?
4427  * @dev_priv: i915 private structure
4428  * @port: the port to test
4429  *
4430  * Return %true if @port is connected, %false otherwise.
4431  */
4432 bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
4433                                          struct intel_digital_port *port)
4434 {
4435         if (HAS_PCH_IBX(dev_priv))
4436                 return ibx_digital_port_connected(dev_priv, port);
4437         if (HAS_PCH_SPLIT(dev_priv))
4438                 return cpt_digital_port_connected(dev_priv, port);
4439         else if (IS_BROXTON(dev_priv))
4440                 return bxt_digital_port_connected(dev_priv, port);
4441         else if (IS_VALLEYVIEW(dev_priv))
4442                 return vlv_digital_port_connected(dev_priv, port);
4443         else
4444                 return g4x_digital_port_connected(dev_priv, port);
4445 }
4446
4447 static enum drm_connector_status
4448 ironlake_dp_detect(struct intel_dp *intel_dp)
4449 {
4450         struct drm_device *dev = intel_dp_to_dev(intel_dp);
4451         struct drm_i915_private *dev_priv = dev->dev_private;
4452         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4453
4454         if (!intel_digital_port_connected(dev_priv, intel_dig_port))
4455                 return connector_status_disconnected;
4456
4457         return intel_dp_detect_dpcd(intel_dp);
4458 }
4459
4460 static enum drm_connector_status
4461 g4x_dp_detect(struct intel_dp *intel_dp)
4462 {
4463         struct drm_device *dev = intel_dp_to_dev(intel_dp);
4464         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4465
4466         /* Can't disconnect eDP, but you can close the lid... */
4467         if (is_edp(intel_dp)) {
4468                 enum drm_connector_status status;
4469
4470                 status = intel_panel_detect(dev);
4471                 if (status == connector_status_unknown)
4472                         status = connector_status_connected;
4473                 return status;
4474         }
4475
4476         if (!intel_digital_port_connected(dev->dev_private, intel_dig_port))
4477                 return connector_status_disconnected;
4478
4479         return intel_dp_detect_dpcd(intel_dp);
4480 }
4481
4482 static struct edid *
4483 intel_dp_get_edid(struct intel_dp *intel_dp)
4484 {
4485         struct intel_connector *intel_connector = intel_dp->attached_connector;
4486
4487         /* use cached edid if we have one */
4488         if (intel_connector->edid) {
4489                 /* invalid edid */
4490                 if (IS_ERR(intel_connector->edid))
4491                         return NULL;
4492
4493                 return drm_edid_duplicate(intel_connector->edid);
4494         } else
4495                 return drm_get_edid(&intel_connector->base,
4496                                     &intel_dp->aux.ddc);
4497 }
4498
4499 static void
4500 intel_dp_set_edid(struct intel_dp *intel_dp)
4501 {
4502         struct intel_connector *intel_connector = intel_dp->attached_connector;
4503         struct edid *edid;
4504
4505         edid = intel_dp_get_edid(intel_dp);
4506         intel_connector->detect_edid = edid;
4507
4508         if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4509                 intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4510         else
4511                 intel_dp->has_audio = drm_detect_monitor_audio(edid);
4512 }
4513
4514 static void
4515 intel_dp_unset_edid(struct intel_dp *intel_dp)
4516 {
4517         struct intel_connector *intel_connector = intel_dp->attached_connector;
4518
4519         kfree(intel_connector->detect_edid);
4520         intel_connector->detect_edid = NULL;
4521
4522         intel_dp->has_audio = false;
4523 }
4524
4525 static enum intel_display_power_domain
4526 intel_dp_power_get(struct intel_dp *dp)
4527 {
4528         struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4529         enum intel_display_power_domain power_domain;
4530
4531         power_domain = intel_display_port_power_domain(encoder);
4532         intel_display_power_get(to_i915(encoder->base.dev), power_domain);
4533
4534         return power_domain;
4535 }
4536
4537 static void
4538 intel_dp_power_put(struct intel_dp *dp,
4539                    enum intel_display_power_domain power_domain)
4540 {
4541         struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4542         intel_display_power_put(to_i915(encoder->base.dev), power_domain);
4543 }
4544
4545 static enum drm_connector_status
4546 intel_dp_detect(struct drm_connector *connector, bool force)
4547 {
4548         struct intel_dp *intel_dp = intel_attached_dp(connector);
4549         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4550         struct intel_encoder *intel_encoder = &intel_dig_port->base;
4551         struct drm_device *dev = connector->dev;
4552         enum drm_connector_status status;
4553         enum intel_display_power_domain power_domain;
4554         bool ret;
4555         u8 sink_irq_vector;
4556
4557         DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4558                       connector->base.id, connector->name);
4559         intel_dp_unset_edid(intel_dp);
4560
4561         if (intel_dp->is_mst) {
4562                 /* MST devices are disconnected from a monitor POV */
4563                 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4564                         intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4565                 return connector_status_disconnected;
4566         }
4567
4568         power_domain = intel_dp_power_get(intel_dp);
4569
4570         /* Can't disconnect eDP, but you can close the lid... */
4571         if (is_edp(intel_dp))
4572                 status = edp_detect(intel_dp);
4573         else if (HAS_PCH_SPLIT(dev))
4574                 status = ironlake_dp_detect(intel_dp);
4575         else
4576                 status = g4x_dp_detect(intel_dp);
4577         if (status != connector_status_connected)
4578                 goto out;
4579
4580         intel_dp_probe_oui(intel_dp);
4581
4582         ret = intel_dp_probe_mst(intel_dp);
4583         if (ret) {
4584                 /* if we are in MST mode then this connector
4585                    won't appear connected or have anything with EDID on it */
4586                 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4587                         intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4588                 status = connector_status_disconnected;
4589                 goto out;
4590         }
4591
4592         intel_dp_set_edid(intel_dp);
4593
4594         if (intel_encoder->type != INTEL_OUTPUT_EDP)
4595                 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4596         status = connector_status_connected;
4597
4598         /* Try to read the source of the interrupt */
4599         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4600             intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4601                 /* Clear interrupt source */
4602                 drm_dp_dpcd_writeb(&intel_dp->aux,
4603                                    DP_DEVICE_SERVICE_IRQ_VECTOR,
4604                                    sink_irq_vector);
4605
4606                 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4607                         intel_dp_handle_test_request(intel_dp);
4608                 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4609                         DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4610         }
4611
4612 out:
4613         intel_dp_power_put(intel_dp, power_domain);
4614         return status;
4615 }
4616
4617 static void
4618 intel_dp_force(struct drm_connector *connector)
4619 {
4620         struct intel_dp *intel_dp = intel_attached_dp(connector);
4621         struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4622         enum intel_display_power_domain power_domain;
4623
4624         DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4625                       connector->base.id, connector->name);
4626         intel_dp_unset_edid(intel_dp);
4627
4628         if (connector->status != connector_status_connected)
4629                 return;
4630
4631         power_domain = intel_dp_power_get(intel_dp);
4632
4633         intel_dp_set_edid(intel_dp);
4634
4635         intel_dp_power_put(intel_dp, power_domain);
4636
4637         if (intel_encoder->type != INTEL_OUTPUT_EDP)
4638                 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4639 }
4640
4641 static int intel_dp_get_modes(struct drm_connector *connector)
4642 {
4643         struct intel_connector *intel_connector = to_intel_connector(connector);
4644         struct edid *edid;
4645
4646         edid = intel_connector->detect_edid;
4647         if (edid) {
4648                 int ret = intel_connector_update_modes(connector, edid);
4649                 if (ret)
4650                         return ret;
4651         }
4652
4653         /* if eDP has no EDID, fall back to fixed mode */
4654         if (is_edp(intel_attached_dp(connector)) &&
4655             intel_connector->panel.fixed_mode) {
4656                 struct drm_display_mode *mode;
4657
4658                 mode = drm_mode_duplicate(connector->dev,
4659                                           intel_connector->panel.fixed_mode);
4660                 if (mode) {
4661                         drm_mode_probed_add(connector, mode);
4662                         return 1;
4663                 }
4664         }
4665
4666         return 0;
4667 }
4668
4669 static bool
4670 intel_dp_detect_audio(struct drm_connector *connector)
4671 {
4672         bool has_audio = false;
4673         struct edid *edid;
4674
4675         edid = to_intel_connector(connector)->detect_edid;
4676         if (edid)
4677                 has_audio = drm_detect_monitor_audio(edid);
4678
4679         return has_audio;
4680 }
4681
4682 static int
4683 intel_dp_set_property(struct drm_connector *connector,
4684                       struct drm_property *property,
4685                       uint64_t val)
4686 {
4687         struct drm_i915_private *dev_priv = connector->dev->dev_private;
4688         struct intel_connector *intel_connector = to_intel_connector(connector);
4689         struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4690         struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4691         int ret;
4692
4693         ret = drm_object_property_set_value(&connector->base, property, val);
4694         if (ret)
4695                 return ret;
4696
4697         if (property == dev_priv->force_audio_property) {
4698                 int i = val;
4699                 bool has_audio;
4700
4701                 if (i == intel_dp->force_audio)
4702                         return 0;
4703
4704                 intel_dp->force_audio = i;
4705
4706                 if (i == HDMI_AUDIO_AUTO)
4707                         has_audio = intel_dp_detect_audio(connector);
4708                 else
4709                         has_audio = (i == HDMI_AUDIO_ON);
4710
4711                 if (has_audio == intel_dp->has_audio)
4712                         return 0;
4713
4714                 intel_dp->has_audio = has_audio;
4715                 goto done;
4716         }
4717
4718         if (property == dev_priv->broadcast_rgb_property) {
4719                 bool old_auto = intel_dp->color_range_auto;
4720                 bool old_range = intel_dp->limited_color_range;
4721
4722                 switch (val) {
4723                 case INTEL_BROADCAST_RGB_AUTO:
4724                         intel_dp->color_range_auto = true;
4725                         break;
4726                 case INTEL_BROADCAST_RGB_FULL:
4727                         intel_dp->color_range_auto = false;
4728                         intel_dp->limited_color_range = false;
4729                         break;
4730                 case INTEL_BROADCAST_RGB_LIMITED:
4731                         intel_dp->color_range_auto = false;
4732                         intel_dp->limited_color_range = true;
4733                         break;
4734                 default:
4735                         return -EINVAL;
4736                 }
4737
4738                 if (old_auto == intel_dp->color_range_auto &&
4739                     old_range == intel_dp->limited_color_range)
4740                         return 0;
4741
4742                 goto done;
4743         }
4744
4745         if (is_edp(intel_dp) &&
4746             property == connector->dev->mode_config.scaling_mode_property) {
4747                 if (val == DRM_MODE_SCALE_NONE) {
4748                         DRM_DEBUG_KMS("no scaling not supported\n");
4749                         return -EINVAL;
4750                 }
4751
4752                 if (intel_connector->panel.fitting_mode == val) {
4753                         /* the eDP scaling property is not changed */
4754                         return 0;
4755                 }
4756                 intel_connector->panel.fitting_mode = val;
4757
4758                 goto done;
4759         }
4760
4761         return -EINVAL;
4762
4763 done:
4764         if (intel_encoder->base.crtc)
4765                 intel_crtc_restore_mode(intel_encoder->base.crtc);
4766
4767         return 0;
4768 }
4769
4770 static void
4771 intel_dp_connector_destroy(struct drm_connector *connector)
4772 {
4773         struct intel_connector *intel_connector = to_intel_connector(connector);
4774
4775         kfree(intel_connector->detect_edid);
4776
4777         if (!IS_ERR_OR_NULL(intel_connector->edid))
4778                 kfree(intel_connector->edid);
4779
4780         /* Can't call is_edp() since the encoder may have been destroyed
4781          * already. */
4782         if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4783                 intel_panel_fini(&intel_connector->panel);
4784
4785         drm_connector_cleanup(connector);
4786         kfree(connector);
4787 }
4788
4789 void intel_dp_encoder_destroy(struct drm_encoder *encoder)
4790 {
4791         struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4792         struct intel_dp *intel_dp = &intel_dig_port->dp;
4793
4794         drm_dp_aux_unregister(&intel_dp->aux);
4795         intel_dp_mst_encoder_cleanup(intel_dig_port);
4796         if (is_edp(intel_dp)) {
4797                 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4798                 /*
4799                  * vdd might still be enabled do to the delayed vdd off.
4800                  * Make sure vdd is actually turned off here.
4801                  */
4802                 pps_lock(intel_dp);
4803                 edp_panel_vdd_off_sync(intel_dp);
4804                 pps_unlock(intel_dp);
4805
4806                 if (intel_dp->edp_notifier.notifier_call) {
4807                         unregister_reboot_notifier(&intel_dp->edp_notifier);
4808                         intel_dp->edp_notifier.notifier_call = NULL;
4809                 }
4810         }
4811         drm_encoder_cleanup(encoder);
4812         kfree(intel_dig_port);
4813 }
4814
4815 static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4816 {
4817         struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4818
4819         if (!is_edp(intel_dp))
4820                 return;
4821
4822         /*
4823          * vdd might still be enabled do to the delayed vdd off.
4824          * Make sure vdd is actually turned off here.
4825          */
4826         cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4827         pps_lock(intel_dp);
4828         edp_panel_vdd_off_sync(intel_dp);
4829         pps_unlock(intel_dp);
4830 }
4831
4832 static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
4833 {
4834         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4835         struct drm_device *dev = intel_dig_port->base.base.dev;
4836         struct drm_i915_private *dev_priv = dev->dev_private;
4837         enum intel_display_power_domain power_domain;
4838
4839         lockdep_assert_held(&dev_priv->pps_mutex);
4840
4841         if (!edp_have_panel_vdd(intel_dp))
4842                 return;
4843
4844         /*
4845          * The VDD bit needs a power domain reference, so if the bit is
4846          * already enabled when we boot or resume, grab this reference and
4847          * schedule a vdd off, so we don't hold on to the reference
4848          * indefinitely.
4849          */
4850         DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
4851         power_domain = intel_display_port_power_domain(&intel_dig_port->base);
4852         intel_display_power_get(dev_priv, power_domain);
4853
4854         edp_panel_vdd_schedule_off(intel_dp);
4855 }
4856
4857 static void intel_dp_encoder_reset(struct drm_encoder *encoder)
4858 {
4859         struct intel_dp *intel_dp;
4860
4861         if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
4862                 return;
4863
4864         intel_dp = enc_to_intel_dp(encoder);
4865
4866         pps_lock(intel_dp);
4867
4868         /*
4869          * Read out the current power sequencer assignment,
4870          * in case the BIOS did something with it.
4871          */
4872         if (IS_VALLEYVIEW(encoder->dev))
4873                 vlv_initial_power_sequencer_setup(intel_dp);
4874
4875         intel_edp_panel_vdd_sanitize(intel_dp);
4876
4877         pps_unlock(intel_dp);
4878 }
4879
4880 static const struct drm_connector_funcs intel_dp_connector_funcs = {
4881         .dpms = drm_atomic_helper_connector_dpms,
4882         .detect = intel_dp_detect,
4883         .force = intel_dp_force,
4884         .fill_modes = drm_helper_probe_single_connector_modes,
4885         .set_property = intel_dp_set_property,
4886         .atomic_get_property = intel_connector_atomic_get_property,
4887         .destroy = intel_dp_connector_destroy,
4888         .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
4889         .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
4890 };
4891
4892 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
4893         .get_modes = intel_dp_get_modes,
4894         .mode_valid = intel_dp_mode_valid,
4895         .best_encoder = intel_best_encoder,
4896 };
4897
4898 static const struct drm_encoder_funcs intel_dp_enc_funcs = {
4899         .reset = intel_dp_encoder_reset,
4900         .destroy = intel_dp_encoder_destroy,
4901 };
4902
4903 enum irqreturn
4904 intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4905 {
4906         struct intel_dp *intel_dp = &intel_dig_port->dp;
4907         struct intel_encoder *intel_encoder = &intel_dig_port->base;
4908         struct drm_device *dev = intel_dig_port->base.base.dev;
4909         struct drm_i915_private *dev_priv = dev->dev_private;
4910         enum intel_display_power_domain power_domain;
4911         enum irqreturn ret = IRQ_NONE;
4912
4913         if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
4914                 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
4915
4916         if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
4917                 /*
4918                  * vdd off can generate a long pulse on eDP which
4919                  * would require vdd on to handle it, and thus we
4920                  * would end up in an endless cycle of
4921                  * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
4922                  */
4923                 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
4924                               port_name(intel_dig_port->port));
4925                 return IRQ_HANDLED;
4926         }
4927
4928         DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
4929                       port_name(intel_dig_port->port),
4930                       long_hpd ? "long" : "short");
4931
4932         power_domain = intel_display_port_power_domain(intel_encoder);
4933         intel_display_power_get(dev_priv, power_domain);
4934
4935         if (long_hpd) {
4936                 /* indicate that we need to restart link training */
4937                 intel_dp->train_set_valid = false;
4938
4939                 if (!intel_digital_port_connected(dev_priv, intel_dig_port))
4940                         goto mst_fail;
4941
4942                 if (!intel_dp_get_dpcd(intel_dp)) {
4943                         goto mst_fail;
4944                 }
4945
4946                 intel_dp_probe_oui(intel_dp);
4947
4948                 if (!intel_dp_probe_mst(intel_dp)) {
4949                         drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
4950                         intel_dp_check_link_status(intel_dp);
4951                         drm_modeset_unlock(&dev->mode_config.connection_mutex);
4952                         goto mst_fail;
4953                 }
4954         } else {
4955                 if (intel_dp->is_mst) {
4956                         if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
4957                                 goto mst_fail;
4958                 }
4959
4960                 if (!intel_dp->is_mst) {
4961                         drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
4962                         intel_dp_check_link_status(intel_dp);
4963                         drm_modeset_unlock(&dev->mode_config.connection_mutex);
4964                 }
4965         }
4966
4967         ret = IRQ_HANDLED;
4968
4969         goto put_power;
4970 mst_fail:
4971         /* if we were in MST mode, and device is not there get out of MST mode */
4972         if (intel_dp->is_mst) {
4973                 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
4974                 intel_dp->is_mst = false;
4975                 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4976         }
4977 put_power:
4978         intel_display_power_put(dev_priv, power_domain);
4979
4980         return ret;
4981 }
4982
4983 /* Return which DP Port should be selected for Transcoder DP control */
4984 int
4985 intel_trans_dp_port_sel(struct drm_crtc *crtc)
4986 {
4987         struct drm_device *dev = crtc->dev;
4988         struct intel_encoder *intel_encoder;
4989         struct intel_dp *intel_dp;
4990
4991         for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
4992                 intel_dp = enc_to_intel_dp(&intel_encoder->base);
4993
4994                 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
4995                     intel_encoder->type == INTEL_OUTPUT_EDP)
4996                         return intel_dp->output_reg;
4997         }
4998
4999         return -1;
5000 }
5001
5002 /* check the VBT to see whether the eDP is on another port */
5003 bool intel_dp_is_edp(struct drm_device *dev, enum port port)
5004 {
5005         struct drm_i915_private *dev_priv = dev->dev_private;
5006         union child_device_config *p_child;
5007         int i;
5008         static const short port_mapping[] = {
5009                 [PORT_B] = DVO_PORT_DPB,
5010                 [PORT_C] = DVO_PORT_DPC,
5011                 [PORT_D] = DVO_PORT_DPD,
5012                 [PORT_E] = DVO_PORT_DPE,
5013         };
5014
5015         /*
5016          * eDP not supported on g4x. so bail out early just
5017          * for a bit extra safety in case the VBT is bonkers.
5018          */
5019         if (INTEL_INFO(dev)->gen < 5)
5020                 return false;
5021
5022         if (port == PORT_A)
5023                 return true;
5024
5025         if (!dev_priv->vbt.child_dev_num)
5026                 return false;
5027
5028         for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
5029                 p_child = dev_priv->vbt.child_dev + i;
5030
5031                 if (p_child->common.dvo_port == port_mapping[port] &&
5032                     (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
5033                     (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
5034                         return true;
5035         }
5036         return false;
5037 }
5038
5039 void
5040 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
5041 {
5042         struct intel_connector *intel_connector = to_intel_connector(connector);
5043
5044         intel_attach_force_audio_property(connector);
5045         intel_attach_broadcast_rgb_property(connector);
5046         intel_dp->color_range_auto = true;
5047
5048         if (is_edp(intel_dp)) {
5049                 drm_mode_create_scaling_mode_property(connector->dev);
5050                 drm_object_attach_property(
5051                         &connector->base,
5052                         connector->dev->mode_config.scaling_mode_property,
5053                         DRM_MODE_SCALE_ASPECT);
5054                 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
5055         }
5056 }
5057
5058 static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
5059 {
5060         intel_dp->last_power_cycle = jiffies;
5061         intel_dp->last_power_on = jiffies;
5062         intel_dp->last_backlight_off = jiffies;
5063 }
5064
5065 static void
5066 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
5067                                     struct intel_dp *intel_dp)
5068 {
5069         struct drm_i915_private *dev_priv = dev->dev_private;
5070         struct edp_power_seq cur, vbt, spec,
5071                 *final = &intel_dp->pps_delays;
5072         u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0;
5073         int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg = 0;
5074
5075         lockdep_assert_held(&dev_priv->pps_mutex);
5076
5077         /* already initialized? */
5078         if (final->t11_t12 != 0)
5079                 return;
5080
5081         if (IS_BROXTON(dev)) {
5082                 /*
5083                  * TODO: BXT has 2 sets of PPS registers.
5084                  * Correct Register for Broxton need to be identified
5085                  * using VBT. hardcoding for now
5086                  */
5087                 pp_ctrl_reg = BXT_PP_CONTROL(0);
5088                 pp_on_reg = BXT_PP_ON_DELAYS(0);
5089                 pp_off_reg = BXT_PP_OFF_DELAYS(0);
5090         } else if (HAS_PCH_SPLIT(dev)) {
5091                 pp_ctrl_reg = PCH_PP_CONTROL;
5092                 pp_on_reg = PCH_PP_ON_DELAYS;
5093                 pp_off_reg = PCH_PP_OFF_DELAYS;
5094                 pp_div_reg = PCH_PP_DIVISOR;
5095         } else {
5096                 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5097
5098                 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
5099                 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5100                 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5101                 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
5102         }
5103
5104         /* Workaround: Need to write PP_CONTROL with the unlock key as
5105          * the very first thing. */
5106         pp_ctl = ironlake_get_pp_control(intel_dp);
5107
5108         pp_on = I915_READ(pp_on_reg);
5109         pp_off = I915_READ(pp_off_reg);
5110         if (!IS_BROXTON(dev)) {
5111                 I915_WRITE(pp_ctrl_reg, pp_ctl);
5112                 pp_div = I915_READ(pp_div_reg);
5113         }
5114
5115         /* Pull timing values out of registers */
5116         cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
5117                 PANEL_POWER_UP_DELAY_SHIFT;
5118
5119         cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
5120                 PANEL_LIGHT_ON_DELAY_SHIFT;
5121
5122         cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
5123                 PANEL_LIGHT_OFF_DELAY_SHIFT;
5124
5125         cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
5126                 PANEL_POWER_DOWN_DELAY_SHIFT;
5127
5128         if (IS_BROXTON(dev)) {
5129                 u16 tmp = (pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
5130                         BXT_POWER_CYCLE_DELAY_SHIFT;
5131                 if (tmp > 0)
5132                         cur.t11_t12 = (tmp - 1) * 1000;
5133                 else
5134                         cur.t11_t12 = 0;
5135         } else {
5136                 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
5137                        PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
5138         }
5139
5140         DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5141                       cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
5142
5143         vbt = dev_priv->vbt.edp_pps;
5144
5145         /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
5146          * our hw here, which are all in 100usec. */
5147         spec.t1_t3 = 210 * 10;
5148         spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
5149         spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
5150         spec.t10 = 500 * 10;
5151         /* This one is special and actually in units of 100ms, but zero
5152          * based in the hw (so we need to add 100 ms). But the sw vbt
5153          * table multiplies it with 1000 to make it in units of 100usec,
5154          * too. */
5155         spec.t11_t12 = (510 + 100) * 10;
5156
5157         DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5158                       vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
5159
5160         /* Use the max of the register settings and vbt. If both are
5161          * unset, fall back to the spec limits. */
5162 #define assign_final(field)     final->field = (max(cur.field, vbt.field) == 0 ? \
5163                                        spec.field : \
5164                                        max(cur.field, vbt.field))
5165         assign_final(t1_t3);
5166         assign_final(t8);
5167         assign_final(t9);
5168         assign_final(t10);
5169         assign_final(t11_t12);
5170 #undef assign_final
5171
5172 #define get_delay(field)        (DIV_ROUND_UP(final->field, 10))
5173         intel_dp->panel_power_up_delay = get_delay(t1_t3);
5174         intel_dp->backlight_on_delay = get_delay(t8);
5175         intel_dp->backlight_off_delay = get_delay(t9);
5176         intel_dp->panel_power_down_delay = get_delay(t10);
5177         intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
5178 #undef get_delay
5179
5180         DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
5181                       intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
5182                       intel_dp->panel_power_cycle_delay);
5183
5184         DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
5185                       intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
5186 }
5187
5188 static void
5189 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
5190                                               struct intel_dp *intel_dp)
5191 {
5192         struct drm_i915_private *dev_priv = dev->dev_private;
5193         u32 pp_on, pp_off, pp_div, port_sel = 0;
5194         int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
5195         int pp_on_reg, pp_off_reg, pp_div_reg = 0, pp_ctrl_reg;
5196         enum port port = dp_to_dig_port(intel_dp)->port;
5197         const struct edp_power_seq *seq = &intel_dp->pps_delays;
5198
5199         lockdep_assert_held(&dev_priv->pps_mutex);
5200
5201         if (IS_BROXTON(dev)) {
5202                 /*
5203                  * TODO: BXT has 2 sets of PPS registers.
5204                  * Correct Register for Broxton need to be identified
5205                  * using VBT. hardcoding for now
5206                  */
5207                 pp_ctrl_reg = BXT_PP_CONTROL(0);
5208                 pp_on_reg = BXT_PP_ON_DELAYS(0);
5209                 pp_off_reg = BXT_PP_OFF_DELAYS(0);
5210
5211         } else if (HAS_PCH_SPLIT(dev)) {
5212                 pp_on_reg = PCH_PP_ON_DELAYS;
5213                 pp_off_reg = PCH_PP_OFF_DELAYS;
5214                 pp_div_reg = PCH_PP_DIVISOR;
5215         } else {
5216                 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5217
5218                 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5219                 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5220                 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
5221         }
5222
5223         /*
5224          * And finally store the new values in the power sequencer. The
5225          * backlight delays are set to 1 because we do manual waits on them. For
5226          * T8, even BSpec recommends doing it. For T9, if we don't do this,
5227          * we'll end up waiting for the backlight off delay twice: once when we
5228          * do the manual sleep, and once when we disable the panel and wait for
5229          * the PP_STATUS bit to become zero.
5230          */
5231         pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
5232                 (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
5233         pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
5234                  (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
5235         /* Compute the divisor for the pp clock, simply match the Bspec
5236          * formula. */
5237         if (IS_BROXTON(dev)) {
5238                 pp_div = I915_READ(pp_ctrl_reg);
5239                 pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
5240                 pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000)
5241                                 << BXT_POWER_CYCLE_DELAY_SHIFT);
5242         } else {
5243                 pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
5244                 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
5245                                 << PANEL_POWER_CYCLE_DELAY_SHIFT);
5246         }
5247
5248         /* Haswell doesn't have any port selection bits for the panel
5249          * power sequencer any more. */
5250         if (IS_VALLEYVIEW(dev)) {
5251                 port_sel = PANEL_PORT_SELECT_VLV(port);
5252         } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
5253                 if (port == PORT_A)
5254                         port_sel = PANEL_PORT_SELECT_DPA;
5255                 else
5256                         port_sel = PANEL_PORT_SELECT_DPD;
5257         }
5258
5259         pp_on |= port_sel;
5260
5261         I915_WRITE(pp_on_reg, pp_on);
5262         I915_WRITE(pp_off_reg, pp_off);
5263         if (IS_BROXTON(dev))
5264                 I915_WRITE(pp_ctrl_reg, pp_div);
5265         else
5266                 I915_WRITE(pp_div_reg, pp_div);
5267
5268         DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
5269                       I915_READ(pp_on_reg),
5270                       I915_READ(pp_off_reg),
5271                       IS_BROXTON(dev) ?
5272                       (I915_READ(pp_ctrl_reg) & BXT_POWER_CYCLE_DELAY_MASK) :
5273                       I915_READ(pp_div_reg));
5274 }
5275
5276 /**
5277  * intel_dp_set_drrs_state - program registers for RR switch to take effect
5278  * @dev: DRM device
5279  * @refresh_rate: RR to be programmed
5280  *
5281  * This function gets called when refresh rate (RR) has to be changed from
5282  * one frequency to another. Switches can be between high and low RR
5283  * supported by the panel or to any other RR based on media playback (in
5284  * this case, RR value needs to be passed from user space).
5285  *
5286  * The caller of this function needs to take a lock on dev_priv->drrs.
5287  */
5288 static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
5289 {
5290         struct drm_i915_private *dev_priv = dev->dev_private;
5291         struct intel_encoder *encoder;
5292         struct intel_digital_port *dig_port = NULL;
5293         struct intel_dp *intel_dp = dev_priv->drrs.dp;
5294         struct intel_crtc_state *config = NULL;
5295         struct intel_crtc *intel_crtc = NULL;
5296         enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
5297
5298         if (refresh_rate <= 0) {
5299                 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
5300                 return;
5301         }
5302
5303         if (intel_dp == NULL) {
5304                 DRM_DEBUG_KMS("DRRS not supported.\n");
5305                 return;
5306         }
5307
5308         /*
5309          * FIXME: This needs proper synchronization with psr state for some
5310          * platforms that cannot have PSR and DRRS enabled at the same time.
5311          */
5312
5313         dig_port = dp_to_dig_port(intel_dp);
5314         encoder = &dig_port->base;
5315         intel_crtc = to_intel_crtc(encoder->base.crtc);
5316
5317         if (!intel_crtc) {
5318                 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5319                 return;
5320         }
5321
5322         config = intel_crtc->config;
5323
5324         if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
5325                 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5326                 return;
5327         }
5328
5329         if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
5330                         refresh_rate)
5331                 index = DRRS_LOW_RR;
5332
5333         if (index == dev_priv->drrs.refresh_rate_type) {
5334                 DRM_DEBUG_KMS(
5335                         "DRRS requested for previously set RR...ignoring\n");
5336                 return;
5337         }
5338
5339         if (!intel_crtc->active) {
5340                 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5341                 return;
5342         }
5343
5344         if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
5345                 switch (index) {
5346                 case DRRS_HIGH_RR:
5347                         intel_dp_set_m_n(intel_crtc, M1_N1);
5348                         break;
5349                 case DRRS_LOW_RR:
5350                         intel_dp_set_m_n(intel_crtc, M2_N2);
5351                         break;
5352                 case DRRS_MAX_RR:
5353                 default:
5354                         DRM_ERROR("Unsupported refreshrate type\n");
5355                 }
5356         } else if (INTEL_INFO(dev)->gen > 6) {
5357                 u32 reg = PIPECONF(intel_crtc->config->cpu_transcoder);
5358                 u32 val;
5359
5360                 val = I915_READ(reg);
5361                 if (index > DRRS_HIGH_RR) {
5362                         if (IS_VALLEYVIEW(dev))
5363                                 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5364                         else
5365                                 val |= PIPECONF_EDP_RR_MODE_SWITCH;
5366                 } else {
5367                         if (IS_VALLEYVIEW(dev))
5368                                 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5369                         else
5370                                 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
5371                 }
5372                 I915_WRITE(reg, val);
5373         }
5374
5375         dev_priv->drrs.refresh_rate_type = index;
5376
5377         DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5378 }
5379
5380 /**
5381  * intel_edp_drrs_enable - init drrs struct if supported
5382  * @intel_dp: DP struct
5383  *
5384  * Initializes frontbuffer_bits and drrs.dp
5385  */
5386 void intel_edp_drrs_enable(struct intel_dp *intel_dp)
5387 {
5388         struct drm_device *dev = intel_dp_to_dev(intel_dp);
5389         struct drm_i915_private *dev_priv = dev->dev_private;
5390         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5391         struct drm_crtc *crtc = dig_port->base.base.crtc;
5392         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5393
5394         if (!intel_crtc->config->has_drrs) {
5395                 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5396                 return;
5397         }
5398
5399         mutex_lock(&dev_priv->drrs.mutex);
5400         if (WARN_ON(dev_priv->drrs.dp)) {
5401                 DRM_ERROR("DRRS already enabled\n");
5402                 goto unlock;
5403         }
5404
5405         dev_priv->drrs.busy_frontbuffer_bits = 0;
5406
5407         dev_priv->drrs.dp = intel_dp;
5408
5409 unlock:
5410         mutex_unlock(&dev_priv->drrs.mutex);
5411 }
5412
5413 /**
5414  * intel_edp_drrs_disable - Disable DRRS
5415  * @intel_dp: DP struct
5416  *
5417  */
5418 void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5419 {
5420         struct drm_device *dev = intel_dp_to_dev(intel_dp);
5421         struct drm_i915_private *dev_priv = dev->dev_private;
5422         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5423         struct drm_crtc *crtc = dig_port->base.base.crtc;
5424         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5425
5426         if (!intel_crtc->config->has_drrs)
5427                 return;
5428
5429         mutex_lock(&dev_priv->drrs.mutex);
5430         if (!dev_priv->drrs.dp) {
5431                 mutex_unlock(&dev_priv->drrs.mutex);
5432                 return;
5433         }
5434
5435         if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5436                 intel_dp_set_drrs_state(dev_priv->dev,
5437                         intel_dp->attached_connector->panel.
5438                         fixed_mode->vrefresh);
5439
5440         dev_priv->drrs.dp = NULL;
5441         mutex_unlock(&dev_priv->drrs.mutex);
5442
5443         cancel_delayed_work_sync(&dev_priv->drrs.work);
5444 }
5445
5446 static void intel_edp_drrs_downclock_work(struct work_struct *work)
5447 {
5448         struct drm_i915_private *dev_priv =
5449                 container_of(work, typeof(*dev_priv), drrs.work.work);
5450         struct intel_dp *intel_dp;
5451
5452         mutex_lock(&dev_priv->drrs.mutex);
5453
5454         intel_dp = dev_priv->drrs.dp;
5455
5456         if (!intel_dp)
5457                 goto unlock;
5458
5459         /*
5460          * The delayed work can race with an invalidate hence we need to
5461          * recheck.
5462          */
5463
5464         if (dev_priv->drrs.busy_frontbuffer_bits)
5465                 goto unlock;
5466
5467         if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5468                 intel_dp_set_drrs_state(dev_priv->dev,
5469                         intel_dp->attached_connector->panel.
5470                         downclock_mode->vrefresh);
5471
5472 unlock:
5473         mutex_unlock(&dev_priv->drrs.mutex);
5474 }
5475
5476 /**
5477  * intel_edp_drrs_invalidate - Disable Idleness DRRS
5478  * @dev: DRM device
5479  * @frontbuffer_bits: frontbuffer plane tracking bits
5480  *
5481  * This function gets called everytime rendering on the given planes start.
5482  * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
5483  *
5484  * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5485  */
5486 void intel_edp_drrs_invalidate(struct drm_device *dev,
5487                 unsigned frontbuffer_bits)
5488 {
5489         struct drm_i915_private *dev_priv = dev->dev_private;
5490         struct drm_crtc *crtc;
5491         enum pipe pipe;
5492
5493         if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5494                 return;
5495
5496         cancel_delayed_work(&dev_priv->drrs.work);
5497
5498         mutex_lock(&dev_priv->drrs.mutex);
5499         if (!dev_priv->drrs.dp) {
5500                 mutex_unlock(&dev_priv->drrs.mutex);
5501                 return;
5502         }
5503
5504         crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5505         pipe = to_intel_crtc(crtc)->pipe;
5506
5507         frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5508         dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5509
5510         /* invalidate means busy screen hence upclock */
5511         if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5512                 intel_dp_set_drrs_state(dev_priv->dev,
5513                                 dev_priv->drrs.dp->attached_connector->panel.
5514                                 fixed_mode->vrefresh);
5515
5516         mutex_unlock(&dev_priv->drrs.mutex);
5517 }
5518
5519 /**
5520  * intel_edp_drrs_flush - Restart Idleness DRRS
5521  * @dev: DRM device
5522  * @frontbuffer_bits: frontbuffer plane tracking bits
5523  *
5524  * This function gets called every time rendering on the given planes has
5525  * completed or flip on a crtc is completed. So DRRS should be upclocked
5526  * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
5527  * if no other planes are dirty.
5528  *
5529  * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5530  */
5531 void intel_edp_drrs_flush(struct drm_device *dev,
5532                 unsigned frontbuffer_bits)
5533 {
5534         struct drm_i915_private *dev_priv = dev->dev_private;
5535         struct drm_crtc *crtc;
5536         enum pipe pipe;
5537
5538         if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5539                 return;
5540
5541         cancel_delayed_work(&dev_priv->drrs.work);
5542
5543         mutex_lock(&dev_priv->drrs.mutex);
5544         if (!dev_priv->drrs.dp) {
5545                 mutex_unlock(&dev_priv->drrs.mutex);
5546                 return;
5547         }
5548
5549         crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5550         pipe = to_intel_crtc(crtc)->pipe;
5551
5552         frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5553         dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5554
5555         /* flush means busy screen hence upclock */
5556         if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5557                 intel_dp_set_drrs_state(dev_priv->dev,
5558                                 dev_priv->drrs.dp->attached_connector->panel.
5559                                 fixed_mode->vrefresh);
5560
5561         /*
5562          * flush also means no more activity hence schedule downclock, if all
5563          * other fbs are quiescent too
5564          */
5565         if (!dev_priv->drrs.busy_frontbuffer_bits)
5566                 schedule_delayed_work(&dev_priv->drrs.work,
5567                                 msecs_to_jiffies(1000));
5568         mutex_unlock(&dev_priv->drrs.mutex);
5569 }
5570
5571 /**
5572  * DOC: Display Refresh Rate Switching (DRRS)
5573  *
5574  * Display Refresh Rate Switching (DRRS) is a power conservation feature
5575  * which enables swtching between low and high refresh rates,
5576  * dynamically, based on the usage scenario. This feature is applicable
5577  * for internal panels.
5578  *
5579  * Indication that the panel supports DRRS is given by the panel EDID, which
5580  * would list multiple refresh rates for one resolution.
5581  *
5582  * DRRS is of 2 types - static and seamless.
5583  * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5584  * (may appear as a blink on screen) and is used in dock-undock scenario.
5585  * Seamless DRRS involves changing RR without any visual effect to the user
5586  * and can be used during normal system usage. This is done by programming
5587  * certain registers.
5588  *
5589  * Support for static/seamless DRRS may be indicated in the VBT based on
5590  * inputs from the panel spec.
5591  *
5592  * DRRS saves power by switching to low RR based on usage scenarios.
5593  *
5594  * eDP DRRS:-
5595  *        The implementation is based on frontbuffer tracking implementation.
5596  * When there is a disturbance on the screen triggered by user activity or a
5597  * periodic system activity, DRRS is disabled (RR is changed to high RR).
5598  * When there is no movement on screen, after a timeout of 1 second, a switch
5599  * to low RR is made.
5600  *        For integration with frontbuffer tracking code,
5601  * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5602  *
5603  * DRRS can be further extended to support other internal panels and also
5604  * the scenario of video playback wherein RR is set based on the rate
5605  * requested by userspace.
5606  */
5607
5608 /**
5609  * intel_dp_drrs_init - Init basic DRRS work and mutex.
5610  * @intel_connector: eDP connector
5611  * @fixed_mode: preferred mode of panel
5612  *
5613  * This function is  called only once at driver load to initialize basic
5614  * DRRS stuff.
5615  *
5616  * Returns:
5617  * Downclock mode if panel supports it, else return NULL.
5618  * DRRS support is determined by the presence of downclock mode (apart
5619  * from VBT setting).
5620  */
5621 static struct drm_display_mode *
5622 intel_dp_drrs_init(struct intel_connector *intel_connector,
5623                 struct drm_display_mode *fixed_mode)
5624 {
5625         struct drm_connector *connector = &intel_connector->base;
5626         struct drm_device *dev = connector->dev;
5627         struct drm_i915_private *dev_priv = dev->dev_private;
5628         struct drm_display_mode *downclock_mode = NULL;
5629
5630         INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5631         mutex_init(&dev_priv->drrs.mutex);
5632
5633         if (INTEL_INFO(dev)->gen <= 6) {
5634                 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5635                 return NULL;
5636         }
5637
5638         if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
5639                 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
5640                 return NULL;
5641         }
5642
5643         downclock_mode = intel_find_panel_downclock
5644                                         (dev, fixed_mode, connector);
5645
5646         if (!downclock_mode) {
5647                 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
5648                 return NULL;
5649         }
5650
5651         dev_priv->drrs.type = dev_priv->vbt.drrs_type;
5652
5653         dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
5654         DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
5655         return downclock_mode;
5656 }
5657
5658 static bool intel_edp_init_connector(struct intel_dp *intel_dp,
5659                                      struct intel_connector *intel_connector)
5660 {
5661         struct drm_connector *connector = &intel_connector->base;
5662         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
5663         struct intel_encoder *intel_encoder = &intel_dig_port->base;
5664         struct drm_device *dev = intel_encoder->base.dev;
5665         struct drm_i915_private *dev_priv = dev->dev_private;
5666         struct drm_display_mode *fixed_mode = NULL;
5667         struct drm_display_mode *downclock_mode = NULL;
5668         bool has_dpcd;
5669         struct drm_display_mode *scan;
5670         struct edid *edid;
5671         enum pipe pipe = INVALID_PIPE;
5672
5673         if (!is_edp(intel_dp))
5674                 return true;
5675
5676         pps_lock(intel_dp);
5677         intel_edp_panel_vdd_sanitize(intel_dp);
5678         pps_unlock(intel_dp);
5679
5680         /* Cache DPCD and EDID for edp. */
5681         has_dpcd = intel_dp_get_dpcd(intel_dp);
5682
5683         if (has_dpcd) {
5684                 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
5685                         dev_priv->no_aux_handshake =
5686                                 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
5687                                 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
5688         } else {
5689                 /* if this fails, presume the device is a ghost */
5690                 DRM_INFO("failed to retrieve link info, disabling eDP\n");
5691                 return false;
5692         }
5693
5694         /* We now know it's not a ghost, init power sequence regs. */
5695         pps_lock(intel_dp);
5696         intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
5697         pps_unlock(intel_dp);
5698
5699         mutex_lock(&dev->mode_config.mutex);
5700         edid = drm_get_edid(connector, &intel_dp->aux.ddc);
5701         if (edid) {
5702                 if (drm_add_edid_modes(connector, edid)) {
5703                         drm_mode_connector_update_edid_property(connector,
5704                                                                 edid);
5705                         drm_edid_to_eld(connector, edid);
5706                 } else {
5707                         kfree(edid);
5708                         edid = ERR_PTR(-EINVAL);
5709                 }
5710         } else {
5711                 edid = ERR_PTR(-ENOENT);
5712         }
5713         intel_connector->edid = edid;
5714
5715         /* prefer fixed mode from EDID if available */
5716         list_for_each_entry(scan, &connector->probed_modes, head) {
5717                 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5718                         fixed_mode = drm_mode_duplicate(dev, scan);
5719                         downclock_mode = intel_dp_drrs_init(
5720                                                 intel_connector, fixed_mode);
5721                         break;
5722                 }
5723         }
5724
5725         /* fallback to VBT if available for eDP */
5726         if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5727                 fixed_mode = drm_mode_duplicate(dev,
5728                                         dev_priv->vbt.lfp_lvds_vbt_mode);
5729                 if (fixed_mode)
5730                         fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5731         }
5732         mutex_unlock(&dev->mode_config.mutex);
5733
5734         if (IS_VALLEYVIEW(dev)) {
5735                 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5736                 register_reboot_notifier(&intel_dp->edp_notifier);
5737
5738                 /*
5739                  * Figure out the current pipe for the initial backlight setup.
5740                  * If the current pipe isn't valid, try the PPS pipe, and if that
5741                  * fails just assume pipe A.
5742                  */
5743                 if (IS_CHERRYVIEW(dev))
5744                         pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5745                 else
5746                         pipe = PORT_TO_PIPE(intel_dp->DP);
5747
5748                 if (pipe != PIPE_A && pipe != PIPE_B)
5749                         pipe = intel_dp->pps_pipe;
5750
5751                 if (pipe != PIPE_A && pipe != PIPE_B)
5752                         pipe = PIPE_A;
5753
5754                 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5755                               pipe_name(pipe));
5756         }
5757
5758         intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
5759         intel_connector->panel.backlight.power = intel_edp_backlight_power;
5760         intel_panel_setup_backlight(connector, pipe);
5761
5762         return true;
5763 }
5764
5765 bool
5766 intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5767                         struct intel_connector *intel_connector)
5768 {
5769         struct drm_connector *connector = &intel_connector->base;
5770         struct intel_dp *intel_dp = &intel_dig_port->dp;
5771         struct intel_encoder *intel_encoder = &intel_dig_port->base;
5772         struct drm_device *dev = intel_encoder->base.dev;
5773         struct drm_i915_private *dev_priv = dev->dev_private;
5774         enum port port = intel_dig_port->port;
5775         int type;
5776
5777         intel_dp->pps_pipe = INVALID_PIPE;
5778
5779         /* intel_dp vfuncs */
5780         if (INTEL_INFO(dev)->gen >= 9)
5781                 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
5782         else if (IS_VALLEYVIEW(dev))
5783                 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
5784         else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
5785                 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
5786         else if (HAS_PCH_SPLIT(dev))
5787                 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
5788         else
5789                 intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
5790
5791         if (INTEL_INFO(dev)->gen >= 9)
5792                 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
5793         else
5794                 intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
5795
5796         if (HAS_DDI(dev))
5797                 intel_dp->prepare_link_retrain = intel_ddi_prepare_link_retrain;
5798
5799         /* Preserve the current hw state. */
5800         intel_dp->DP = I915_READ(intel_dp->output_reg);
5801         intel_dp->attached_connector = intel_connector;
5802
5803         if (intel_dp_is_edp(dev, port))
5804                 type = DRM_MODE_CONNECTOR_eDP;
5805         else
5806                 type = DRM_MODE_CONNECTOR_DisplayPort;
5807
5808         /*
5809          * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
5810          * for DP the encoder type can be set by the caller to
5811          * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
5812          */
5813         if (type == DRM_MODE_CONNECTOR_eDP)
5814                 intel_encoder->type = INTEL_OUTPUT_EDP;
5815
5816         /* eDP only on port B and/or C on vlv/chv */
5817         if (WARN_ON(IS_VALLEYVIEW(dev) && is_edp(intel_dp) &&
5818                     port != PORT_B && port != PORT_C))
5819                 return false;
5820
5821         DRM_DEBUG_KMS("Adding %s connector on port %c\n",
5822                         type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
5823                         port_name(port));
5824
5825         drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
5826         drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
5827
5828         connector->interlace_allowed = true;
5829         connector->doublescan_allowed = 0;
5830
5831         INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
5832                           edp_panel_vdd_work);
5833
5834         intel_connector_attach_encoder(intel_connector, intel_encoder);
5835         drm_connector_register(connector);
5836
5837         if (HAS_DDI(dev))
5838                 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
5839         else
5840                 intel_connector->get_hw_state = intel_connector_get_hw_state;
5841         intel_connector->unregister = intel_dp_connector_unregister;
5842
5843         /* Set up the hotplug pin. */
5844         switch (port) {
5845         case PORT_A:
5846                 intel_encoder->hpd_pin = HPD_PORT_A;
5847                 break;
5848         case PORT_B:
5849                 intel_encoder->hpd_pin = HPD_PORT_B;
5850                 if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
5851                         intel_encoder->hpd_pin = HPD_PORT_A;
5852                 break;
5853         case PORT_C:
5854                 intel_encoder->hpd_pin = HPD_PORT_C;
5855                 break;
5856         case PORT_D:
5857                 intel_encoder->hpd_pin = HPD_PORT_D;
5858                 break;
5859         case PORT_E:
5860                 intel_encoder->hpd_pin = HPD_PORT_E;
5861                 break;
5862         default:
5863                 BUG();
5864         }
5865
5866         if (is_edp(intel_dp)) {
5867                 pps_lock(intel_dp);
5868                 intel_dp_init_panel_power_timestamps(intel_dp);
5869                 if (IS_VALLEYVIEW(dev))
5870                         vlv_initial_power_sequencer_setup(intel_dp);
5871                 else
5872                         intel_dp_init_panel_power_sequencer(dev, intel_dp);
5873                 pps_unlock(intel_dp);
5874         }
5875
5876         intel_dp_aux_init(intel_dp, intel_connector);
5877
5878         /* init MST on ports that can support it */
5879         if (HAS_DP_MST(dev) &&
5880             (port == PORT_B || port == PORT_C || port == PORT_D))
5881                 intel_dp_mst_encoder_init(intel_dig_port,
5882                                           intel_connector->base.base.id);
5883
5884         if (!intel_edp_init_connector(intel_dp, intel_connector)) {
5885                 drm_dp_aux_unregister(&intel_dp->aux);
5886                 if (is_edp(intel_dp)) {
5887                         cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5888                         /*
5889                          * vdd might still be enabled do to the delayed vdd off.
5890                          * Make sure vdd is actually turned off here.
5891                          */
5892                         pps_lock(intel_dp);
5893                         edp_panel_vdd_off_sync(intel_dp);
5894                         pps_unlock(intel_dp);
5895                 }
5896                 drm_connector_unregister(connector);
5897                 drm_connector_cleanup(connector);
5898                 return false;
5899         }
5900
5901         intel_dp_add_properties(intel_dp, connector);
5902
5903         /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
5904          * 0xd.  Failure to do so will result in spurious interrupts being
5905          * generated on the port when a cable is not attached.
5906          */
5907         if (IS_G4X(dev) && !IS_GM45(dev)) {
5908                 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
5909                 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
5910         }
5911
5912         i915_debugfs_connector_add(connector);
5913
5914         return true;
5915 }
5916
5917 void
5918 intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
5919 {
5920         struct drm_i915_private *dev_priv = dev->dev_private;
5921         struct intel_digital_port *intel_dig_port;
5922         struct intel_encoder *intel_encoder;
5923         struct drm_encoder *encoder;
5924         struct intel_connector *intel_connector;
5925
5926         intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
5927         if (!intel_dig_port)
5928                 return;
5929
5930         intel_connector = intel_connector_alloc();
5931         if (!intel_connector)
5932                 goto err_connector_alloc;
5933
5934         intel_encoder = &intel_dig_port->base;
5935         encoder = &intel_encoder->base;
5936
5937         drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
5938                          DRM_MODE_ENCODER_TMDS);
5939
5940         intel_encoder->compute_config = intel_dp_compute_config;
5941         intel_encoder->disable = intel_disable_dp;
5942         intel_encoder->get_hw_state = intel_dp_get_hw_state;
5943         intel_encoder->get_config = intel_dp_get_config;
5944         intel_encoder->suspend = intel_dp_encoder_suspend;
5945         if (IS_CHERRYVIEW(dev)) {
5946                 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
5947                 intel_encoder->pre_enable = chv_pre_enable_dp;
5948                 intel_encoder->enable = vlv_enable_dp;
5949                 intel_encoder->post_disable = chv_post_disable_dp;
5950                 intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
5951         } else if (IS_VALLEYVIEW(dev)) {
5952                 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
5953                 intel_encoder->pre_enable = vlv_pre_enable_dp;
5954                 intel_encoder->enable = vlv_enable_dp;
5955                 intel_encoder->post_disable = vlv_post_disable_dp;
5956         } else {
5957                 intel_encoder->pre_enable = g4x_pre_enable_dp;
5958                 intel_encoder->enable = g4x_enable_dp;
5959                 if (INTEL_INFO(dev)->gen >= 5)
5960                         intel_encoder->post_disable = ilk_post_disable_dp;
5961         }
5962
5963         intel_dig_port->port = port;
5964         intel_dig_port->dp.output_reg = output_reg;
5965
5966         intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
5967         if (IS_CHERRYVIEW(dev)) {
5968                 if (port == PORT_D)
5969                         intel_encoder->crtc_mask = 1 << 2;
5970                 else
5971                         intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
5972         } else {
5973                 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
5974         }
5975         intel_encoder->cloneable = 0;
5976
5977         intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
5978         dev_priv->hotplug.irq_port[port] = intel_dig_port;
5979
5980         if (!intel_dp_init_connector(intel_dig_port, intel_connector))
5981                 goto err_init_connector;
5982
5983         return;
5984
5985 err_init_connector:
5986         drm_encoder_cleanup(encoder);
5987         kfree(intel_connector);
5988 err_connector_alloc:
5989         kfree(intel_dig_port);
5990
5991         return;
5992 }
5993
5994 void intel_dp_mst_suspend(struct drm_device *dev)
5995 {
5996         struct drm_i915_private *dev_priv = dev->dev_private;
5997         int i;
5998
5999         /* disable MST */
6000         for (i = 0; i < I915_MAX_PORTS; i++) {
6001                 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
6002                 if (!intel_dig_port)
6003                         continue;
6004
6005                 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6006                         if (!intel_dig_port->dp.can_mst)
6007                                 continue;
6008                         if (intel_dig_port->dp.is_mst)
6009                                 drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
6010                 }
6011         }
6012 }
6013
6014 void intel_dp_mst_resume(struct drm_device *dev)
6015 {
6016         struct drm_i915_private *dev_priv = dev->dev_private;
6017         int i;
6018
6019         for (i = 0; i < I915_MAX_PORTS; i++) {
6020                 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
6021                 if (!intel_dig_port)
6022                         continue;
6023                 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6024                         int ret;
6025
6026                         if (!intel_dig_port->dp.can_mst)
6027                                 continue;
6028
6029                         ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
6030                         if (ret != 0) {
6031                                 intel_dp_check_mst_status(&intel_dig_port->dp);
6032                         }
6033                 }
6034         }
6035 }